Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:dataMappers.PictureDataMapper.java

public static void addPictureToReport(DBConnector dbconnector, HttpServletRequest request)
        throws FileUploadException, IOException, SQLException {

    if (!ServletFileUpload.isMultipartContent(request)) {
        System.out.println("Invalid upload request");
        return;/*from  w w w. j av  a2 s.  com*/
    }

    // Define limits for disk item
    DiskFileItemFactory factory = new DiskFileItemFactory();
    factory.setSizeThreshold(THRESHOLD_SIZE);

    // Define limit for servlet upload
    ServletFileUpload upload = new ServletFileUpload(factory);
    upload.setFileSizeMax(MAX_FILE_SIZE);
    upload.setSizeMax(MAX_REQUEST_SIZE);

    FileItem itemFile = null;
    int reportID = 0;

    // Get list of items in request (parameters, files etc.)
    List formItems = upload.parseRequest(request);
    Iterator iter = formItems.iterator();

    // Loop items
    while (iter.hasNext()) {
        FileItem item = (FileItem) iter.next();

        if (!item.isFormField()) {
            itemFile = item; // If not form field, must be item
        } else if (item.getFieldName().equalsIgnoreCase("reportID")) { // else it is a form field
            try {
                System.out.println(item.getString());
                reportID = Integer.parseInt(item.getString());
            } catch (NumberFormatException e) {
                reportID = 0;
            }
        }
    }

    // This will be null if no fields were declared as image/upload.
    // Also, reportID must be > 0
    if (itemFile != null || reportID == 0) {

        try {

            // Create credentials from final vars
            BasicAWSCredentials awsCredentials = new BasicAWSCredentials(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY);

            // Create client with credentials
            AmazonS3 s3client = new AmazonS3Client(awsCredentials);
            // Set region
            s3client.setRegion(Region.getRegion(Regions.EU_WEST_1));

            // Set content length (size) of file
            ObjectMetadata om = new ObjectMetadata();
            om.setContentLength(itemFile.getSize());

            // Get extension for file
            String ext = FilenameUtils.getExtension(itemFile.getName());
            // Generate random filename
            String keyName = UUID.randomUUID().toString() + '.' + ext;

            // This is the actual upload command
            s3client.putObject(new PutObjectRequest(S3_BUCKET_NAME, keyName, itemFile.getInputStream(), om));

            // Picture was uploaded to S3 if we made it this far. Now we insert the row into the database for the report.
            PreparedStatement stmt = dbconnector.getCon()
                    .prepareStatement("INSERT INTO reports_pictures" + "(REPORTID, PICTURE) VALUES (?,?)");

            stmt.setInt(1, reportID);
            stmt.setString(2, keyName);

            stmt.executeUpdate();

            stmt.close();

        } catch (AmazonServiceException ase) {

            System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                    + "to Amazon S3, but was rejected with an error response" + " for some reason.");
            System.out.println("Error Message:    " + ase.getMessage());
            System.out.println("HTTP Status Code: " + ase.getStatusCode());
            System.out.println("AWS Error Code:   " + ase.getErrorCode());
            System.out.println("Error Type:       " + ase.getErrorType());
            System.out.println("Request ID:       " + ase.getRequestId());

        } catch (AmazonClientException ace) {

            System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                    + "an internal error while trying to " + "communicate with S3, "
                    + "such as not being able to access the network.");
            System.out.println("Error Message: " + ace.getMessage());

        }

    }
}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

/**
 * {@inheritDoc}//from w w  w.ja va 2 s . c  o  m
 */
@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}

From source file:eu.stratosphere.runtime.fs.s3.S3DataOutputStream.java

License:Apache License

@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }/*from   w  ww.  j  av a 2 s.c o  m*/

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}

From source file:fi.yle.tools.aws.maven.SimpleStorageServiceWagon.java

License:Apache License

@Override
protected void putResource(File source, String destination, TransferProgress transferProgress)
        throws TransferFailedException, ResourceDoesNotExistException {
    String key = getKey(destination);

    mkdirs(key, 0);/*w ww. ja v a  2 s  . c om*/

    InputStream in = null;
    try {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(source.length());
        objectMetadata.setContentType(Mimetypes.getInstance().getMimetype(source));

        in = new TransferProgressFileInputStream(source, transferProgress);

        this.amazonS3.putObject(new PutObjectRequest(this.bucketName, key, in, objectMetadata)
                .withCannedAcl(CannedAccessControlList.BucketOwnerFullControl));
    } catch (AmazonServiceException e) {
        throw new TransferFailedException(String.format("Cannot write file to '%s'", destination), e);
    } catch (FileNotFoundException e) {
        throw new ResourceDoesNotExistException(String.format("Cannot read file from '%s'", source), e);
    } finally {
        IoUtils.closeQuietly(in);
    }
}

From source file:fi.yle.tools.aws.maven.SimpleStorageServiceWagon.java

License:Apache License

private PutObjectRequest createDirectoryPutObjectRequest(String key) {
    ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(0);//  w w  w . ja va 2s. co m

    return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata)
            .withCannedAcl(CannedAccessControlList.BucketOwnerFullControl);
}

From source file:fr.eurecom.hybris.kvs.drivers.AmazonKvs.java

License:Apache License

public void put(String key, byte[] value) throws IOException {
    try {/*from   w  w  w  .j  a  va  2 s  .  co  m*/
        ByteArrayInputStream bais = new ByteArrayInputStream(value);

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(value.length);

        PutObjectRequest request = new PutObjectRequest(this.rootContainer, key, bais, om);
        request.setStorageClass(StorageClass.ReducedRedundancy);

        Upload upload = this.tm.upload(request); // NB: asynchronous, returns immediately
        upload.waitForCompletion();
    } catch (AmazonClientException | InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:fsi_admin.JAwsS3Conn.java

License:Open Source License

@SuppressWarnings("rawtypes")
private boolean subirArchivo(StringBuffer msj, AmazonS3 s3, String S3BUKT, String nombre, Vector archivos) {
    //System.out.println("AwsConn SubirArchivo:" + nombre + ":nombre");

    if (!archivos.isEmpty()) {
        FileItem actual = null;//from w  w  w  .  jav a  2  s . c  o m

        try {
            for (int i = 0; i < archivos.size(); i++) {
                InputStream inputStream = null;
                try {
                    actual = (FileItem) archivos.elementAt(i);
                    /////////////////////////////////////////////////////////
                    //Obtain the Content length of the Input stream for S3 header
                    InputStream is = actual.getInputStream();
                    byte[] contentBytes = IOUtils.toByteArray(is);

                    Long contentLength = Long.valueOf(contentBytes.length);

                    ObjectMetadata metadata = new ObjectMetadata();
                    metadata.setContentLength(contentLength);

                    //Reobtain the tmp uploaded file as input stream
                    inputStream = actual.getInputStream();

                    //Put the object in S3
                    //System.out.println("BUCKET: " + S3BUKT + " OBJETO: " + nombre.replace('_', '-'));
                    //System.out.println("BUCKET: " + S3BUKT + " OBJETO: " + nombre.replace('_', '-'));
                    s3.putObject(new PutObjectRequest(S3BUKT, nombre, inputStream, metadata));
                } finally {
                    if (inputStream != null)
                        try {
                            inputStream.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                }
                ////////////////////////////////////////////////////////////
            }
            return true;
        } catch (AmazonServiceException ase) {
            ase.printStackTrace();
            msj.append("Error de AmazonServiceException al subir archivo a S3.<br>");
            msj.append("Mensaje: " + ase.getMessage() + "<br>");
            msj.append("Cdigo de Estatus HTTP: " + ase.getStatusCode() + "<br>");
            msj.append("Cdigo de Error AWS:   " + ase.getErrorCode() + "<br>");
            msj.append("Tipo de Error:       " + ase.getErrorType() + "<br>");
            msj.append("Request ID:       " + ase.getRequestId());
            return false;
        } catch (AmazonClientException ace) {
            ace.printStackTrace();
            msj.append("Error de AmazonClientException al subir archivo a S3.<br>");
            msj.append("Mensaje: " + ace.getMessage());
            return false;
        } catch (IOException e) {
            e.printStackTrace();
            msj.append("Error de Entrada/Salida al subir archivo a S3: " + e.getMessage());
            return false;
        }

    } else {
        msj.append("Error al subir archivo a la nube: No se envi ningun archivo");
        return false;
    }
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processSingleOp(final Exchange exchange) throws Exception {

    ObjectMetadata objectMetadata = determineMetadata(exchange);

    File filePayload = null;//from  w  w w.  jav a 2 s . c o m
    InputStream is = null;
    Object obj = exchange.getIn().getMandatoryBody();
    PutObjectRequest putObjectRequest = null;
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
        is = new FileInputStream(filePayload);
    } else {
        is = exchange.getIn().getMandatoryBody(InputStream.class);
    }

    putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is,
            objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        // close streams
        IOHelper.close(putObjectRequest.getInputStream());
        IOHelper.close(is);
        FileUtil.deleteFile(filePayload);
    }
}

From source file:gr.abiss.calipso.fs.S3FilePersistenceServiceImpl.java

License:Open Source License

/**
 * Save file in S3//from   w w w. j  av a 2s.c  om
 * @see gr.abiss.calipso.fs.FilePersistenceService#saveFile(java.io.InputStream, long, java.lang.String, java.lang.String)
 */
@Override
public String saveFile(InputStream in, long contentLength, String contentType, String path) {
    String url;
    // create metadata
    ObjectMetadata meta = new ObjectMetadata();
    meta.setContentLength(contentLength);
    meta.setContentType(contentType);

    // save to bucket
    s3Client.putObject(new PutObjectRequest(nameCardBucket, path, in, meta)
            .withCannedAcl(CannedAccessControlList.PublicRead));
    // set the URL to return
    url = s3Client.getUrl(nameCardBucket, path).toString();
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("File saved: " + path + ", size: " + contentLength + ", contentType: " + contentType);
    }
    return url;
}

From source file:io.druid.storage.s3.ServerSideEncryptingAmazonS3.java

License:Apache License

public PutObjectResult putObject(String bucket, String key, String content) {
    final InputStream in = new ByteArrayInputStream(StringUtils.toUtf8(content));
    return putObject(new PutObjectRequest(bucket, key, in, new ObjectMetadata()));
}