Example usage for com.amazonaws.services.s3 AmazonS3Client uploadPart

List of usage examples for com.amazonaws.services.s3 AmazonS3Client uploadPart

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3Client uploadPart.

Prototype

@Override
    public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest)
            throws SdkClientException, AmazonServiceException 

Source Link

Usage

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public UploadPartResponseType uploadPart(UploadPartType request, InputStream dataContent) throws S3Exception {
    String bucketName = request.getBucket();
    String key = request.getKey();

    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    try {//from   w ww . j ava  2 s . c o  m
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();

        UploadPartResult result;
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        uploadPartRequest.setBucketName(bucketName);
        uploadPartRequest.setKey(key);
        uploadPartRequest.setInputStream(dataContent);
        uploadPartRequest.setUploadId(request.getUploadId());
        uploadPartRequest.setPartNumber(Integer.valueOf(request.getPartNumber()));
        uploadPartRequest.setMd5Digest(request.getContentMD5());
        uploadPartRequest.setPartSize(Long.valueOf(request.getContentLength()));
        try {
            result = s3Client.uploadPart(uploadPartRequest);
        } catch (AmazonServiceException e) {
            LOG.debug("Error from backend", e);
            throw S3ExceptionMapper.fromAWSJavaSDK(e);
        }
        UploadPartResponseType reply = request.getReply();
        reply.setEtag(result.getETag());
        reply.setLastModified(new Date());
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.netflix.dynomitemanager.sidecore.backup.S3Backup.java

License:Apache License

/**
 * Uses the Amazon S3 API to upload the AOF/RDB to S3
 * Filename: Backup location + DC + Rack + App + Token
 *///from w w  w  .  ja v a 2  s  . c  o  m
@Override
public boolean upload(File file, DateTime todayStart) {
    logger.info("Snapshot backup: sending " + file.length() + " bytes to S3");

    /* Key name is comprised of the 
    * backupDir + DC + Rack + token + Date
    */
    String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/"
            + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + todayStart.getMillis();

    // Get bucket location.
    logger.info("Key in Bucket: " + keyName);
    logger.info("S3 Bucket Name:" + config.getBucketName());

    AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider());

    try {
        // Checking if the S3 bucket exists, and if does not, then we create it

        if (!(s3Client.doesBucketExist(config.getBucketName()))) {
            logger.error("Bucket with name: " + config.getBucketName() + " does not exist");
            return false;
        } else {
            logger.info("Uploading data to S3\n");
            // Create a list of UploadPartResponse objects. You get one of these for
            // each part upload.
            List<PartETag> partETags = new ArrayList<PartETag>();

            InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
                    config.getBucketName(), keyName);

            InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

            long contentLength = file.length();
            long filePosition = 0;
            long partSize = this.initPartSize;

            try {
                for (int i = 1; filePosition < contentLength; i++) {
                    // Last part can be less than initPartSize (500MB). Adjust part size.
                    partSize = Math.min(partSize, (contentLength - filePosition));

                    // Create request to upload a part.
                    UploadPartRequest uploadRequest = new UploadPartRequest()
                            .withBucketName(config.getBucketName()).withKey(keyName)
                            .withUploadId(initResponse.getUploadId()).withPartNumber(i)
                            .withFileOffset(filePosition).withFile(file).withPartSize(partSize);

                    // Upload part and add response to our list.
                    partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());

                    filePosition += partSize;
                }

                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        config.getBucketName(), keyName, initResponse.getUploadId(), partETags);

                s3Client.completeMultipartUpload(compRequest);

            } catch (Exception e) {
                logger.error("Abosting multipart upload due to error");
                s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(config.getBucketName(), keyName,
                        initResponse.getUploadId()));
            }

            return true;
        }
    } catch (AmazonServiceException ase) {

        logger.error(
                "AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error ");
        logger.error("Error Message:    " + ase.getMessage());
        logger.error("HTTP Status Code: " + ase.getStatusCode());
        logger.error("AWS Error Code:   " + ase.getErrorCode());
        logger.error("Error Type:       " + ase.getErrorType());
        logger.error("Request ID:       " + ase.getRequestId());
        return false;

    } catch (AmazonClientException ace) {
        logger.error("AmazonClientException;" + " the client encountered "
                + "an internal error while trying to " + "communicate with S3, ");
        logger.error("Error Message: " + ace.getMessage());
        return false;
    }
}

From source file:com.netflix.exhibitor.core.s3.S3ClientFactoryImpl.java

License:Apache License

@Override
public S3Client makeNewClient(final S3Credential credentials) throws Exception {
    return new S3Client() {
        private final AtomicReference<RefCountedClient> client = new AtomicReference<RefCountedClient>(null);

        {/*  w w w . j a  v a 2s . c  o  m*/
            changeCredentials(credentials);
        }

        @Override
        public void changeCredentials(S3Credential credential) throws Exception {
            RefCountedClient newRefCountedClient = (credential != null) ? new RefCountedClient(
                    new AmazonS3Client(new BasicAWSCredentials(credentials.getAccessKeyId(),
                            credentials.getSecretAccessKey())))
                    : null;
            RefCountedClient oldRefCountedClient = client.getAndSet(newRefCountedClient);
            if (oldRefCountedClient != null) {
                oldRefCountedClient.markForDelete();
            }
        }

        @Override
        public void close() throws IOException {
            try {
                changeCredentials(null);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }

        @Override
        public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request)
                throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.initiateMultipartUpload(request);
            } finally {
                holder.release();
            }
        }

        @Override
        public PutObjectResult putObject(PutObjectRequest request) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.putObject(request);
            } finally {
                holder.release();
            }
        }

        @Override
        public S3Object getObject(String bucket, String key) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.getObject(bucket, key);
            } finally {
                holder.release();
            }
        }

        @Override
        public ObjectListing listObjects(ListObjectsRequest request) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.listObjects(request);
            } finally {
                holder.release();
            }
        }

        @Override
        public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.listNextBatchOfObjects(previousObjectListing);
            } finally {
                holder.release();
            }
        }

        @Override
        public void deleteObject(String bucket, String key) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                amazonS3Client.deleteObject(bucket, key);
            } finally {
                holder.release();
            }
        }

        @Override
        public UploadPartResult uploadPart(UploadPartRequest request) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                return amazonS3Client.uploadPart(request);
            } finally {
                holder.release();
            }
        }

        @Override
        public void completeMultipartUpload(CompleteMultipartUploadRequest request) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                amazonS3Client.completeMultipartUpload(request);
            } finally {
                holder.release();
            }
        }

        @Override
        public void abortMultipartUpload(AbortMultipartUploadRequest request) throws Exception {
            RefCountedClient holder = client.get();
            AmazonS3Client amazonS3Client = holder.useClient();
            try {
                amazonS3Client.abortMultipartUpload(request);
            } finally {
                holder.release();
            }
        }
    };
}

From source file:com.netflix.exhibitor.core.s3.S3ClientImpl.java

License:Apache License

@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws Exception {
    RefCountedClient holder = client.get();
    AmazonS3Client amazonS3Client = holder.useClient();
    try {/*ww  w .jav  a2s  . c  om*/
        return amazonS3Client.uploadPart(request);
    } finally {
        holder.release();
    }
}

From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java

License:Apache License

/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 *///from  w  w  w  .j a  va  2s  . c  o m
public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType)
        throws IOException {
    // Generate bundle
    SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType);

    boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
    String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
    String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
    String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
    int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

    if (!enabled) {
        throw new IOException("Uploading support bundles was disabled by administrator.");
    }

    AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(
            new BasicAWSCredentials(accessKey, secretKey));
    AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

    // Object Metadata
    ObjectMetadata s3Metadata = new ObjectMetadata();
    for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) {
        s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue());
    }

    List<PartETag> partETags;
    InitiateMultipartUploadResult initResponse = null;
    try {
        // Uploading part by part
        LOG.info("Initiating multi-part support bundle upload");
        partETags = new ArrayList<>();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket,
                bundle.getBundleKey());
        initRequest.setObjectMetadata(s3Metadata);
        initResponse = s3Client.initiateMultipartUpload(initRequest);
    } catch (AmazonClientException e) {
        LOG.error("Support bundle upload failed: ", e);
        throw new IOException("Support bundle upload failed", e);
    }

    try {
        byte[] buffer = new byte[bufferSize];
        int partId = 1;
        int size = -1;
        while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
            LOG.debug("Uploading part {} of size {}", partId, size);
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId())
                    .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer))
                    .withPartSize(size);

            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
        }

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket,
                bundle.getBundleKey(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
        LOG.info("Support bundle upload finished");
    } catch (Exception e) {
        LOG.error("Support bundle upload failed", e);
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId()));

        throw new IOException("Can't upload support bundle", e);
    } finally {
        // Close the client
        s3Client.shutdown();
    }
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();// ww  w.  ja  v  a 2 s. c  om
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}