Example usage for com.amazonaws.services.s3.model PutObjectResult getETag

List of usage examples for com.amazonaws.services.s3.model PutObjectResult getETag

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectResult getETag.

Prototype

public String getETag() 

Source Link

Document

Gets the server-side ETag value for the newly created object.

Usage

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

/**
 * Compresses the snapshot and uploads it to a bucket in objectstorage gateway as a single or multipart upload based on the configuration in
 * {@link StorageInfo}. Bucket name should be configured before invoking this method. It can be looked up and initialized by {@link #prepareForUpload()} or
 * explicitly set using {@link #setBucketName(String)}
 * /* w  w  w .java 2  s.  c  o m*/
 * @param sourceFileName
 *            absolute path to the snapshot on the file system
 */
@Override
public void upload(String sourceFileName) throws SnapshotTransferException {
    validateInput(); // Validate input
    loadTransferConfig(); // Load the transfer configuration parameters from database
    SnapshotProgressCallback progressCallback = new SnapshotProgressCallback(snapshotId); // Setup the progress callback

    Boolean error = Boolean.FALSE;
    ArrayBlockingQueue<SnapshotPart> partQueue = null;
    SnapshotPart part = null;
    SnapshotUploadInfo snapUploadInfo = null;
    Future<List<PartETag>> uploadPartsFuture = null;
    Future<String> completeUploadFuture = null;

    byte[] buffer = new byte[READ_BUFFER_SIZE];
    Long readOffset = 0L;
    Long bytesRead = 0L;
    Long bytesWritten = 0L;
    int len;
    int partNumber = 1;

    try {
        // Get the uncompressed file size for uploading as metadata
        Long uncompressedSize = getFileSize(sourceFileName);

        // Setup the snapshot and part entities.
        snapUploadInfo = SnapshotUploadInfo.create(snapshotId, bucketName, keyName);
        Path zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf(partNumber));
        part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset);

        FileInputStream inputStream = new FileInputStream(sourceFileName);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        GZIPOutputStream gzipStream = new GZIPOutputStream(baos);
        FileOutputStream outputStream = new FileOutputStream(zipFilePath.toString());

        try {
            LOG.debug("Reading snapshot " + snapshotId + " and compressing it to disk in chunks of size "
                    + partSize + " bytes or greater");
            while ((len = inputStream.read(buffer)) > 0) {
                bytesRead += len;
                gzipStream.write(buffer, 0, len);

                if ((bytesWritten + baos.size()) < partSize) {
                    baos.writeTo(outputStream);
                    bytesWritten += baos.size();
                    baos.reset();
                } else {
                    gzipStream.close();
                    baos.writeTo(outputStream); // Order is important. Closing the gzip stream flushes stuff
                    bytesWritten += baos.size();
                    baos.reset();
                    outputStream.close();

                    if (partNumber > 1) {// Update the part status
                        part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.FALSE);
                    } else {// Initialize multipart upload only once after the first part is created
                        LOG.info("Uploading snapshot " + snapshotId
                                + " to objectstorage using multipart upload");
                        progressCallback.setUploadSize(uncompressedSize);
                        uploadId = initiateMulitpartUpload(uncompressedSize);
                        snapUploadInfo = snapUploadInfo.updateUploadId(uploadId);
                        part = part.updateStateCreated(uploadId, bytesWritten, bytesRead, Boolean.FALSE);
                        partQueue = new ArrayBlockingQueue<SnapshotPart>(queueSize);
                        uploadPartsFuture = Threads.enqueue(serviceConfig, UploadPartTask.class, poolSize,
                                new UploadPartTask(partQueue, progressCallback));
                    }

                    // Check for the future task before adding part to the queue.
                    if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                        // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                        throw new SnapshotUploadPartException(
                                "Error uploading parts, aborting part creation process. Check previous log messages for the exact error");
                    }

                    // Add part to the queue
                    partQueue.put(part);

                    // Prep the metadata for the next part
                    readOffset += bytesRead;
                    bytesRead = 0L;
                    bytesWritten = 0L;

                    // Setup the part entity for next part
                    zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf((++partNumber)));
                    part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber,
                            readOffset);

                    gzipStream = new GZIPOutputStream(baos);
                    outputStream = new FileOutputStream(zipFilePath.toString());
                }
            }

            gzipStream.close();
            baos.writeTo(outputStream);
            bytesWritten += baos.size();
            baos.reset();
            outputStream.close();
            inputStream.close();

            // Update the part status
            part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.TRUE);

            // Update the snapshot upload info status
            snapUploadInfo = snapUploadInfo.updateStateCreatedParts(partNumber);
        } catch (Exception e) {
            LOG.error("Failed to upload " + snapshotId + " due to: ", e);
            error = Boolean.TRUE;
            throw new SnapshotTransferException("Failed to upload " + snapshotId + " due to: ", e);
        } finally {
            if (inputStream != null) {
                inputStream.close();
            }
            if (gzipStream != null) {
                gzipStream.close();
            }
            if (outputStream != null) {
                outputStream.close();
            }
            baos.reset();
        }

        if (partNumber > 1) {
            // Check for the future task before adding the last part to the queue.
            if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                throw new SnapshotUploadPartException(
                        "Error uploading parts, aborting part upload process. Check previous log messages for the exact error");
            }
            // Add the last part to the queue
            partQueue.put(part);
            // Kick off the completion task
            completeUploadFuture = Threads.enqueue(serviceConfig, CompleteMpuTask.class, poolSize,
                    new CompleteMpuTask(uploadPartsFuture, snapUploadInfo, partNumber));
        } else {
            try {
                LOG.info("Uploading snapshot " + snapshotId
                        + " to objectstorage as a single object. Compressed size of snapshot (" + bytesWritten
                        + " bytes) is less than minimum part size (" + partSize
                        + " bytes) for multipart upload");
                PutObjectResult putResult = uploadSnapshotAsSingleObject(zipFilePath.toString(), bytesWritten,
                        uncompressedSize, progressCallback);
                markSnapshotAvailable();
                try {
                    part = part.updateStateUploaded(putResult.getETag());
                    snapUploadInfo = snapUploadInfo.updateStateUploaded(putResult.getETag());
                } catch (Exception e) {
                    LOG.debug("Failed to update status in DB for " + snapUploadInfo);
                }
                LOG.info("Uploaded snapshot " + snapshotId + " to objectstorage");
            } catch (Exception e) {
                error = Boolean.TRUE;
                LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
                throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
            } finally {
                deleteFile(zipFilePath);
            }
        }
    } catch (SnapshotTransferException e) {
        error = Boolean.TRUE;
        throw e;
    } catch (Exception e) {
        error = Boolean.TRUE;
        LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
        throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
    } finally {
        if (error) {
            abortUpload(snapUploadInfo);
            if (uploadPartsFuture != null && !uploadPartsFuture.isDone()) {
                uploadPartsFuture.cancel(true);
            }
            if (completeUploadFuture != null && !completeUploadFuture.isDone()) {
                completeUploadFuture.cancel(true);
            }
        }
    }
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public PutObjectResponseType putObject(PutObjectType request, InputStream inputData) throws S3Exception {
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    try {//from w w  w  .  jav  a 2  s  .co m
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        PutObjectResult result;
        ObjectMetadata metadata = getS3ObjectMetadata(request);
        //Set the acl to private.
        PutObjectRequest putRequest = new PutObjectRequest(request.getBucket(), request.getKey(), inputData,
                metadata).withCannedAcl(CannedAccessControlList.Private);
        result = s3Client.putObject(putRequest);

        PutObjectResponseType reply = request.getReply();
        if (result == null) {
            throw new InternalErrorException("Null result from backend");
        } else {
            reply.setEtag(result.getETag());
            reply.setVersionId(result.getVersionId());
            reply.setLastModified(new Date());
        }
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.netflix.exhibitor.core.s3.S3Utils.java

License:Apache License

public static ObjectMetadata simpleUploadFile(S3Client client, byte[] bytes, String bucket, String key)
        throws Exception {
    byte[] md5 = md5(bytes, bytes.length);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bytes.length);
    metadata.setLastModified(new Date());
    metadata.setContentMD5(S3Utils.toBase64(md5));
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(bytes),
            metadata);// ww  w. j  a v a2 s .  com
    PutObjectResult putObjectResult = client.putObject(putObjectRequest);

    if (!putObjectResult.getETag().equals(S3Utils.toHex(md5))) {
        throw new Exception("Unable to match MD5 for config");
    }

    return metadata;
}

From source file:com.yahoo.ycsb.db.S3Client.java

License:Open Source License

/**
* Upload a new object to S3 or update an object on S3.
*
* @param bucket/*from   w  w  w.j  ava  2  s . com*/
*            The name of the bucket
* @param key
*            The file key of the object to upload/update.
* @param values
*            The data to be written on the object
* @param updateMarker
*            A boolean value. If true a new object will be uploaded
*            to S3. If false an existing object will be re-uploaded
*
*/
protected Status writeToStorage(String bucket, String key, HashMap<String, ByteIterator> values,
        Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) {
    int totalSize = 0;
    int fieldCount = values.size(); //number of fields to concatenate
    // getting the first field in the values
    Object keyToSearch = values.keySet().toArray()[0];
    // getting the content of just one field
    byte[] sourceArray = values.get(keyToSearch).toArray();
    int sizeArray = sourceArray.length; //size of each array
    if (updateMarker) {
        totalSize = sizeArray * fieldCount;
    } else {
        try {
            Map.Entry<S3Object, ObjectMetadata> objectAndMetadata = getS3ObjectAndMetadata(bucket, key,
                    ssecLocal);
            int sizeOfFile = (int) objectAndMetadata.getValue().getContentLength();
            fieldCount = sizeOfFile / sizeArray;
            totalSize = sizeOfFile;
            objectAndMetadata.getKey().close();
        } catch (Exception e) {
            System.err.println("Not possible to get the object :" + key);
            e.printStackTrace();
            return Status.ERROR;
        }
    }
    byte[] destinationArray = new byte[totalSize];
    int offset = 0;
    for (int i = 0; i < fieldCount; i++) {
        System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray);
        offset += sizeArray;
    }
    try (InputStream input = new ByteArrayInputStream(destinationArray)) {
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(totalSize);
        PutObjectRequest putObjectRequest = null;
        if (sseLocal.equals("true")) {
            metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
        } else if (ssecLocal != null) {
            putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal);
        } else {
            putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
        }

        try {
            PutObjectResult res = s3Client.putObject(putObjectRequest);
            if (res.getETag() == null) {
                return Status.ERROR;
            } else {
                if (sseLocal.equals("true")) {
                    System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
                } else if (ssecLocal != null) {
                    System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
                }
            }
        } catch (Exception e) {
            System.err.println("Not possible to write object :" + key);
            e.printStackTrace();
            return Status.ERROR;
        }
    } catch (Exception e) {
        System.err.println("Error in the creation of the stream :" + e.toString());
        e.printStackTrace();
        return Status.ERROR;
    }

    return Status.OK;
}

From source file:com.yahoo.ycsb.utils.connection.S3Connection.java

License:Open Source License

public Status insert(String key, byte[] bytes) {
    try (InputStream input = new ByteArrayInputStream(bytes)) {
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(bytes.length);
        PutObjectRequest putObjectRequest = null;
        if (ssecKey != null) {
            if (ssecKey.equals("true")) {
                metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
                putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
            } else {
                putObjectRequest = new PutObjectRequest(bucket, key, input, metadata)
                        .withSSECustomerKey(ssecKey);
            }/*  www  .j a  va2 s .  co  m*/
        } else {
            putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
        }

        try {
            PutObjectResult res = awsClient.putObject(putObjectRequest);
            if (res.getETag() == null) {
                return Status.ERROR;
            } else {
                if (ssecKey != null) {
                    if (ssecKey.equals("true")) {
                        logger.debug("Uploaded object encryption status is " + res.getSSEAlgorithm());
                    } else {
                        logger.debug("Uploaded object encryption status is " + res.getSSEAlgorithm());
                    }
                }
            }
        } catch (Exception e) {
            logger.error("Not possible to write object :" + key);
            System.err.println("Retrying " + key);
            insert(key, bytes);
        }
    } catch (Exception e) {
        logger.error("Error in the creation of the stream :" + e.toString());
        System.err.println("Retrying " + key);
        insert(key, bytes);
        //e.printStackTrace();
        //return Status.ERROR;
    }
    return Status.OK;
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processSingleOp(final Exchange exchange) throws Exception {

    ObjectMetadata objectMetadata = determineMetadata(exchange);

    File filePayload = null;//from  w  ww .  ja  v  a 2 s . c  o m
    InputStream is = null;
    Object obj = exchange.getIn().getMandatoryBody();
    PutObjectRequest putObjectRequest = null;
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
        is = new FileInputStream(filePayload);
    } else {
        is = exchange.getIn().getMandatoryBody(InputStream.class);
    }

    putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is,
            objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        // close streams
        IOHelper.close(putObjectRequest.getInputStream());
        IOHelper.close(is);
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

public void processSingleOp(final Exchange exchange) throws Exception {

    ObjectMetadata objectMetadata = determineMetadata(exchange);

    File filePayload = null;//from   ww  w  .  j  a  va2 s.co m
    InputStream is = null;
    ByteArrayOutputStream baos = null;
    Object obj = exchange.getIn().getMandatoryBody();
    PutObjectRequest putObjectRequest = null;
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
        is = new FileInputStream(filePayload);
    } else {
        is = exchange.getIn().getMandatoryBody(InputStream.class);
        baos = determineLengthInputStream(is);
        objectMetadata.setContentLength(baos.size());
        is = new ByteArrayInputStream(baos.toByteArray());
    }

    putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is,
            objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        putObjectRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    LOG.trace("Received result [{}]", putObjectResult);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        // close streams
        IOHelper.close(putObjectRequest.getInputStream());
        IOHelper.close(is);
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.akvo.flow.deploy.Deploy.java

License:Open Source License

private static void uploadS3(String accessKey, String secretKey, String s3Path, File file)
        throws AmazonServiceException, AmazonClientException {
    BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3 s3 = new AmazonS3Client(credentials);

    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, s3Path, file);
    ObjectMetadata metadata = new ObjectMetadata();

    // set content type as android package file
    metadata.setContentType("application/vnd.android.package-archive");

    // set content length to length of file
    metadata.setContentLength(file.length());

    // set access to public
    putRequest.setMetadata(metadata);/*from ww w  . j  av  a  2  s .  c o m*/
    putRequest.setCannedAcl(CannedAccessControlList.PublicRead);

    // try to put the apk in S3
    PutObjectResult result = s3.putObject(putRequest);
    System.out.println("Apk uploaded successfully, with result ETag " + result.getETag());
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3BlockUploadOperator.java

License:Apache License

/**
 * Upload the block into S3 bucket./*from  w w  w . j  a v  a 2 s .c om*/
 * @param tuple block data
 */
protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    // Check whether the block metadata is present for this block
    if (blockIdToFilePath.get(tuple.getBlockId()) == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(),
            blockIdToFilePath.get(tuple.getBlockId()));
    S3BlockMetaData metaData = blockInfo.get(uniqueBlockId);
    // Check whether the file metadata is received
    if (metaData == null) {
        if (!waitingTuples.contains(tuple)) {
            waitingTuples.add(tuple);
        }
        return;
    }
    long partSize = tuple.getRecord().length;
    PartETag partETag = null;
    ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer);
    // Check if it is a Single block of a file
    if (metaData.isLastBlock && metaData.partNo == 1) {
        ObjectMetadata omd = createObjectMetadata();
        omd.setContentLength(partSize);
        PutObjectResult result = s3Client
                .putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd));
        partETag = new PartETag(1, result.getETag());
    } else {
        // Else upload use multi-part feature
        try {
            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                    .withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId())
                    .withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize);
            partETag = s3Client.uploadPart(uploadRequest).getPartETag();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName());
    output.emit(uploadmetadata);
    currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata);
    try {
        bis.close();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3Reconciler.java

License:Apache License

/**
 * Uploads the file on Amazon S3 using putObject API from S3 client
 *///from   w w w . j ava  2  s  .co  m
@Override
protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) {
    try {
        Path path = new Path(outputMetaData.getPath());
        if (fs.exists(path) == false) {
            logger.debug("Ignoring non-existent path assuming replay : {}", path);
            return;
        }
        FSDataInputStream fsinput = fs.open(path);
        ObjectMetadata omd = new ObjectMetadata();
        omd.setContentLength(outputMetaData.getSize());
        String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName();
        PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd);
        if (outputMetaData.getSize() < Integer.MAX_VALUE) {
            request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize());
        } else {
            throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE");
        }
        if (fs.exists(path)) {
            PutObjectResult result = s3client.putObject(request);
            logger.debug("File {} Uploaded at {}", keyName, result.getETag());
        }
    } catch (FileNotFoundException e) {
        logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath());
    } catch (IOException e) {
        logger.error("Unable to create Stream: {}", e.getMessage());
    }
}