Example usage for com.amazonaws.services.s3.model CompleteMultipartUploadResult getVersionId

List of usage examples for com.amazonaws.services.s3.model CompleteMultipartUploadResult getVersionId

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model CompleteMultipartUploadResult getVersionId.

Prototype

public String getVersionId() 

Source Link

Document

Returns the version ID of the new object, only present if versioning has been enabled for the bucket.

Usage

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * This method uploads a file with a length greater than PART_SIZE (5Mb).
 * /*  ww w  .  j  a v  a2  s.  c  om*/
 * @param file to be stored within the storage.
 * @param path is the path for this new file within the root.
 * @throws UniversalIOException when a specific IO error occurs.
 */
private void uploadFile(File file, String path) throws UniversalIOException {
    // Create a list of UploadPartResponse objects. You get one of these
    // for each part upload.
    List<PartETag> partETags = new ArrayList<PartETag>();

    // Step 1: Initialize.
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(this.settings.getRoot(),
            file.getName());
    InitiateMultipartUploadResult initResponse = this.s3client.initiateMultipartUpload(initRequest);

    long contentLength = file.length();
    long partSize = PART_SIZE; // Set part size to 5 MB.

    ObjectMetadata objectMetadata = new ObjectMetadata();
    if (this.settings.getEncryption()) {
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }

    List<Tag> tags = new ArrayList<Tag>();
    for (String key : this.settings.getTags().keySet()) {
        tags.add(new Tag(key, this.settings.getTags().get(key)));
    }

    try {
        this.triggerOnStoreFileListeners();
        // Step 2: Upload parts.
        long filePosition = 0;
        for (int i = 1; filePosition < contentLength; i++) {
            // Last part can be less than 5 MB. Adjust part size.
            partSize = Math.min(partSize, (contentLength - filePosition));

            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)))
                    .withKey(file.getName()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withFileOffset(filePosition).withFile(file).withObjectMetadata(objectMetadata)
                    .withPartSize(partSize);

            // Upload part and add response to our list.
            partETags.add(this.s3client.uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }

        // Step 3: Complete.
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(),
                initResponse.getUploadId(), partETags);

        CompleteMultipartUploadResult result = this.s3client.completeMultipartUpload(compRequest);

        StorageClass storageClass = getStorageClass();
        if (storageClass != StorageClass.Standard) {
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(this.settings.getRoot(), file.getName(),
                    this.settings.getRoot(), file.getName()).withStorageClass(storageClass);

            this.s3client.copyObject(copyObjectRequest);
        }

        if (!tags.isEmpty()) {
            this.s3client.setObjectTagging(new SetObjectTaggingRequest(this.settings.getRoot(), file.getName(),
                    new ObjectTagging(tags)));
        }

        this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(),
                PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/"
                        + file.getName(),
                result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))));
    } catch (Exception e) {
        this.s3client.abortMultipartUpload(new AbortMultipartUploadRequest(this.settings.getRoot(),
                file.getName(), initResponse.getUploadId()));

        UniversalIOException error = new UniversalIOException(e.getMessage());
        this.triggerOnErrorListeners(error);
        throw error;
    }
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;/*from   w w w .  j a v  a2 s  .co  m*/
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        LOG.error("aphl-s3: MultiPart upload requires a File input.");
        throw new InvalidArgumentException("aphl-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    LOG.trace("Initiating multipart upload ...");

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception exception) {
        LOG.error("Multi-part upload failed, aborting", exception);
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw exception;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;/* www . ja v  a 2s . c o m*/
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        initRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            LOG.trace("Uploading part [{}] for {}", part, keyName);
            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception e) {
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw e;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*from w w w  .  j a v  a 2 s.  c om*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}