Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:org.alanwilliamson.amazon.s3.Write.java

License:Open Source License

private void writeData(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata,
        StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl,
        String aes256key, Map<String, String> customheaders) throws Exception {
    if (mimetype == null) {
        if (data.getDataType() == cfData.CFBINARYDATA)
            mimetype = "application/unknown";
        else if (cfData.isSimpleValue(data))
            mimetype = "text/plain";
        else/*from w w w.ja  v a 2 s . co m*/
            mimetype = "application/json";

        // Check to see if the mime type is in the metadata
        if (metadata != null && metadata.containsKey("Content-Type"))
            mimetype = metadata.get("Content-Type");
    }

    InputStream ios = null;
    long size = 0;
    if (data.getDataType() == cfData.CFSTRINGDATA) {
        ios = new java.io.ByteArrayInputStream(data.getString().getBytes());
        size = data.getString().length();
    } else if (data.getDataType() == cfData.CFBINARYDATA) {
        ios = new java.io.ByteArrayInputStream(((cfBinaryData) data).getByteArray());
        size = ((cfBinaryData) data).getLength();
    } else {
        serializejson json = new serializejson();
        StringBuilder out = new StringBuilder();
        json.encodeJSON(out, data, false, CaseType.MAINTAIN, DateType.LONG);
        size = out.length();
        mimetype = "application/json";
        ios = new java.io.ByteArrayInputStream(out.toString().getBytes());
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (metadata != null)
        omd.setUserMetadata(metadata);

    omd.setContentType(mimetype);
    omd.setContentLength(size);

    AmazonS3 s3Client = getAmazonS3(amazonKey);

    // Let us run around the number of attempts
    int attempts = 0;
    while (attempts < retry) {
        try {

            PutObjectRequest por = new PutObjectRequest(bucket, key, ios, omd);
            por.setStorageClass(storage);

            if (aes256key != null && !aes256key.isEmpty())
                por.setSSECustomerKey(new SSECustomerKey(aes256key));

            if (acl != null && !acl.isEmpty())
                por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl));

            if (customheaders != null && !customheaders.isEmpty()) {
                Iterator<String> it = customheaders.keySet().iterator();
                while (it.hasNext()) {
                    String k = it.next();
                    por.putCustomRequestHeader(k, customheaders.get(k));
                }
            }

            s3Client.putObject(por);
            break;

        } catch (Exception e) {
            cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt="
                    + (attempts + 1) + "; exception=" + e.getMessage() + ")");
            attempts++;

            if (attempts == retry)
                throw e;
            else
                Thread.sleep(retryseconds * 1000);
        }
    }
}

From source file:org.anhonesteffort.p25.wav.WaveFileS3Sender.java

License:Open Source License

private ObjectMetadata metadata(long length) {
    ObjectMetadata metadata = new ObjectMetadata();

    metadata.setContentLength(length);
    metadata.addUserMetadata(METADATA_CHANNEL_ID, proto.toString(channelId));
    metadata.addUserMetadata(METADATA_TERMINATED, wasTerminated().toString());
    metadata.addUserMetadata(METADATA_START_TIME, getStartTime().toString());
    metadata.addUserMetadata(METADATA_END_TIME, getEndTime().toString());
    metadata.addUserMetadata(METADATA_LATITUDE, getLatitude().toString());
    metadata.addUserMetadata(METADATA_LONGITUDE, getLongitude().toString());

    return metadata;
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3BlockUploadOperator.java

License:Apache License

/**
 * Upload the block into S3 bucket.//from   w  w w .ja va2  s .c o  m
 * @param tuple block data
 */
protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    // Check whether the block metadata is present for this block
    if (blockIdToFilePath.get(tuple.getBlockId()) == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(),
            blockIdToFilePath.get(tuple.getBlockId()));
    S3BlockMetaData metaData = blockInfo.get(uniqueBlockId);
    // Check whether the file metadata is received
    if (metaData == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    long partSize = tuple.getRecord().length;
    PartETag partETag = null;
    ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer);
    // Check if it is a Single block of a file
    if (metaData.isLastBlock && metaData.partNo == 1) {
        ObjectMetadata omd = createObjectMetadata();
        omd.setContentLength(partSize);
        PutObjectResult result = s3Client
                .putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd));
        partETag = new PartETag(1, result.getETag());
    } else {
        // Else upload use multi-part feature
        try {
            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                    .withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId())
                    .withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize);
            partETag = s3Client.uploadPart(uploadRequest).getPartETag();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName());
    output.emit(uploadmetadata);
    currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata);
    try {
        bis.close();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3Reconciler.java

License:Apache License

/**
 * Uploads the file on Amazon S3 using putObject API from S3 client
 */// w  w w  . ja va  2  s .co  m
@Override
protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) {
    try {
        Path path = new Path(outputMetaData.getPath());
        if (fs.exists(path) == false) {
            logger.debug("Ignoring non-existent path assuming replay : {}", path);
            return;
        }
        FSDataInputStream fsinput = fs.open(path);
        ObjectMetadata omd = new ObjectMetadata();
        omd.setContentLength(outputMetaData.getSize());
        String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName();
        PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd);
        if (outputMetaData.getSize() < Integer.MAX_VALUE) {
            request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize());
        } else {
            throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE");
        }
        if (fs.exists(path)) {
            PutObjectResult result = s3client.putObject(request);
            logger.debug("File {} Uploaded at {}", keyName, result.getETag());
        }
    } catch (FileNotFoundException e) {
        logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath());
    } catch (IOException e) {
        logger.error("Unable to create Stream: {}", e.getMessage());
    }
}

From source file:org.apache.camel.component.aws.s3.S3Producer.java

License:Apache License

@Override
public void process(final Exchange exchange) throws Exception {
    ObjectMetadata objectMetadata = new ObjectMetadata();

    Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class);
    if (contentLength != null) {
        objectMetadata.setContentLength(contentLength);
    }/* w w w  .j  a v  a 2 s .  c  o  m*/

    String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class);
    if (contentType != null) {
        objectMetadata.setContentType(contentType);
    }

    String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class);
    if (cacheControl != null) {
        objectMetadata.setCacheControl(cacheControl);
    }

    String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class);
    if (contentDisposition != null) {
        objectMetadata.setContentDisposition(contentDisposition);
    }

    String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class);
    if (contentEncoding != null) {
        objectMetadata.setContentEncoding(contentEncoding);
    }

    String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class);
    if (contentMD5 != null) {
        objectMetadata.setContentMD5(contentMD5);
    }

    Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class);
    if (lastModified != null) {
        objectMetadata.setLastModified(lastModified);
    }

    Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class);
    if (userMetadata != null) {
        objectMetadata.setUserMetadata(userMetadata);
    }

    File filePayload = null;

    Object obj = exchange.getIn().getMandatoryBody();
    if (obj instanceof File) {
        filePayload = (File) obj;
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(),
            determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }
    LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    LOG.trace("Received result [{}]", putObjectResult);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        IOHelper.close(putObjectRequest.getInputStream());
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFastOutputStream.java

License:Apache License

private void putObject() throws IOException {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key);
    final ObjectMetadata om = createDefaultMetadata();
    final int size = buffer.size();
    om.setContentLength(size);
    final PutObjectRequest putObjectRequest = fs.newPutObjectRequest(key, om,
            new ByteArrayInputStream(buffer.toByteArray()));
    putObjectRequest.setGeneralProgressListener(progressListener);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
        @Override//  w ww  .j av a  2 s .c  o m
        public PutObjectResult call() throws Exception {
            fs.incrementPutStartStatistics(size);
            return client.putObject(putObjectRequest);
        }
    });
    //wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload: {}", ie, ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw extractException("regular upload", key, ee);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void createEmptyObject(final String bucketName, final String objectName)
        throws AmazonClientException, AmazonServiceException {
    final InputStream im = new InputStream() {
        @Override/*from w  w w .j a  va  2s .  c o  m*/
        public int read() throws IOException {
            return -1;
        }
    };

    final ObjectMetadata om = new ObjectMetadata();
    om.setContentLength(0L);
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
    putObjectRequest.setCannedAcl(cannedACL);
    s3.putObject(putObjectRequest);
    statistics.incrementWriteOps(1);
}

From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java

License:Apache License

private void putObject() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key);
    }/*from  w  w  w. java2  s  .  co  m*/
    final ObjectMetadata om = createDefaultMetadata();
    om.setContentLength(buffer.size());
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
            new ByteArrayInputStream(buffer.toByteArray()), om);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setGeneralProgressListener(progressListener);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
            return client.putObject(putObjectRequest);
        }
    });
    //wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload:" + ie, ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw new IOException("Regular upload failed", ee.getCause());
    }
}

From source file:org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpi.java

License:Apache License

/**
 * Writes given checkpoint data to a given S3 bucket. Data is serialized to
 * the binary stream and saved to the S3.
 *
 * @param data Checkpoint data.//from   w  w w.ja  va 2 s . com
 * @throws IgniteCheckedException Thrown if an error occurs while marshalling.
 * @throws AmazonClientException If an error occurs while querying Amazon S3.
 */
private void write(S3CheckpointData data) throws IgniteCheckedException, AmazonClientException {
    assert data != null;

    if (log.isDebugEnabled())
        log.debug("Writing data to S3 [bucket=" + bucketName + ", key=" + data.getKey() + ']');

    byte[] buf = data.toBytes();

    ObjectMetadata meta = new ObjectMetadata();

    meta.setContentLength(buf.length);

    s3.putObject(bucketName, data.getKey(), new ByteArrayInputStream(buf), meta);
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*www  .  j  a  v a 2s .  c o  m*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}