Example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest

List of usage examples for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest.

Prototype

public InitiateMultipartUploadRequest(String bucketName, String key, ObjectMetadata objectMetadata) 

Source Link

Document

Constructs a request to initiate a new multipart upload in the specified bucket, stored by the specified key, and with the additional specified object metadata.

Usage

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    try {/*from  w ww  . j  a  v a  2s.  c o m*/
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

        List<PartETag> partTags = new ArrayList<>();
        String fileKey = toAbsoluteFilePath(file.getRelativePath());

        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta);
        InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request);

        long contentLength = file.getFileSize();
        long partSize = 256 * 1024 * 1024;

        try {
            // Uploading the file, part by part.
            long filePosition = 0;

            for (int i = 1; filePosition < contentLength; i++) {

                partSize = Math.min(partSize, (contentLength - filePosition));

                // Creating the request for a part upload
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                        .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i)
                        .withInputStream(fileStream).withPartSize(partSize);

                // Upload part and add response to the result list.
                partTags.add(s3.uploadPart(uploadRequest).getPartETag());
                filePosition += partSize;

                System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of "
                        + Utils.readableFileSize(contentLength));
            }
        } catch (Exception e) {
            System.out.println("UploadPartRequest failed: " + e.getMessage());
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId()));
            return false;
        }

        s3.completeMultipartUpload(
                new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags));
    } catch (AmazonClientException ex) {
        System.out.println("Upload failed: " + ex.getMessage());
        return false;

    }
    return true;
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//from   w ww. j  a  v  a  2  s.  co  m
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.upplication.s3fs.S3OutputStream.java

License:Open Source License

/**
 * Starts the multipart upload process/* w ww. j  a  v a2  s .co m*/
 *
 * @return An instance of {@link InitiateMultipartUploadResult}
 * @throws IOException
 */
private InitiateMultipartUploadResult initiateMultipartUpload() throws IOException {
    final InitiateMultipartUploadRequest request = //
            new InitiateMultipartUploadRequest(objectId.getBucket(), objectId.getKey(), metadata);

    if (storageClass != null) {
        request.setStorageClass(storageClass);
    }

    try {
        return s3.initiateMultipartUpload(request);
    } catch (final AmazonClientException e) {
        throw new IOException("Failed to initiate Amazon S3 multipart upload", e);
    }
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;/*w w  w. j a  v  a 2s .c o m*/
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        LOG.error("aphl-s3: MultiPart upload requires a File input.");
        throw new InvalidArgumentException("aphl-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    LOG.trace("Initiating multipart upload ...");

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception exception) {
        LOG.error("Multi-part upload failed, aborting", exception);
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw exception;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.confluent.connect.s3.storage.S3OutputStream.java

License:Open Source License

private MultipartUpload newMultipartUpload() throws IOException {
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key,
            newObjectMetadata()).withCannedACL(cannedAcl);

    if (SSEAlgorithm.KMS.toString().equalsIgnoreCase(ssea) && StringUtils.isNotBlank(sseKmsKeyId)) {
        initRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsKeyId));
    } else if (sseCustomerKey != null) {
        initRequest.setSSECustomerKey(sseCustomerKey);
    }/*from w  w w  .ja  va  2s . com*/

    try {
        return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId());
    } catch (AmazonClientException e) {
        // TODO: elaborate on the exception interpretation. If this is an AmazonServiceException,
        // there's more info to be extracted.
        throw new IOException("Unable to initiate MultipartUpload: " + e, e);
    }
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;/*from   w w  w . j a v a 2s .  co m*/
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        initRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            LOG.trace("Uploading part [{}] for {}", part, keyName);
            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception e) {
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw e;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFastOutputStream.java

License:Apache License

private MultiPartUpload initiateMultiPartUpload() throws IOException {
    final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, key,
            createDefaultMetadata());/*from ww  w  .ja  v a 2 s. c o  m*/
    initiateMPURequest.setCannedACL(cannedACL);
    try {
        return new MultiPartUpload(client.initiateMultipartUpload(initiateMPURequest).getUploadId());
    } catch (AmazonClientException ace) {
        throw translateException("initiate MultiPartUpload", key, ace);
    }
}

From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java

License:Apache License

private MultiPartUpload initiateMultiPartUpload() throws IOException {
    final ObjectMetadata om = createDefaultMetadata();
    final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(bucket, key,
            om);// w w  w  . j a  va  2 s .c  om
    initiateMPURequest.setCannedACL(cannedACL);
    try {
        return new MultiPartUpload(client.initiateMultipartUpload(initiateMPURequest).getUploadId());
    } catch (AmazonServiceException ase) {
        throw new IOException("Unable to initiate MultiPartUpload (server side)" + ": " + ase, ase);
    } catch (AmazonClientException ace) {
        throw new IOException("Unable to initiate MultiPartUpload (client side)" + ": " + ace, ace);
    }
}

From source file:org.apache.nifi.minifi.c2.cache.s3.S3OutputStream.java

License:Apache License

private MultipartUpload newMultipartUpload() throws IOException {
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key,
            new ObjectMetadata());
    try {//from   ww  w. ja v  a  2  s .co m
        return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId());
    } catch (AmazonClientException e) {
        throw new IOException("Unable to initiate MultipartUpload: " + e, e);
    }
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*  www .  j  av  a  2s  . c o m*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}