Example usage for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId

List of usage examples for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId.

Prototype

public String getUploadId() 

Source Link

Document

Returns the initiated multipart upload ID.

Usage

From source file:com.upplication.s3fs.AmazonS3Client.java

License:Open Source License

public void multipartCopyObject(S3Path s3Source, S3Path s3Target, Long objectSize, S3MultipartOptions opts) {

    final String sourceBucketName = s3Source.getBucket();
    final String sourceObjectKey = s3Source.getKey();
    final String targetBucketName = s3Target.getBucket();
    final String targetObjectKey = s3Target.getKey();

    // Step 2: Initialize
    InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(targetBucketName,
            targetObjectKey);//w  w w. ja  v  a 2s.  c  o m

    InitiateMultipartUploadResult initResult = client.initiateMultipartUpload(initiateRequest);

    // Step 3: Save upload Id.
    String uploadId = initResult.getUploadId();

    // Get object size.
    if (objectSize == null) {
        GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest(sourceBucketName,
                sourceObjectKey);
        ObjectMetadata metadataResult = client.getObjectMetadata(metadataRequest);
        objectSize = metadataResult.getContentLength(); // in bytes
    }

    final int partSize = opts.getChunkSize(objectSize);
    ExecutorService executor = S3OutputStream.getOrCreateExecutor(opts.getMaxThreads());
    List<Callable<CopyPartResult>> copyPartRequests = new ArrayList<>();

    // Step 4. create copy part requests
    long bytePosition = 0;
    for (int i = 1; bytePosition < objectSize; i++) {
        long lastPosition = bytePosition + partSize - 1 >= objectSize ? objectSize - 1
                : bytePosition + partSize - 1;

        CopyPartRequest copyRequest = new CopyPartRequest().withDestinationBucketName(targetBucketName)
                .withDestinationKey(targetObjectKey).withSourceBucketName(sourceBucketName)
                .withSourceKey(sourceObjectKey).withUploadId(uploadId).withFirstByte(bytePosition)
                .withLastByte(lastPosition).withPartNumber(i);

        copyPartRequests.add(copyPart(client, copyRequest, opts));
        bytePosition += partSize;
    }

    log.trace(
            "Starting multipart copy from: {} to {} -- uploadId={}; objectSize={}; chunkSize={}; numOfChunks={}",
            s3Source, s3Target, uploadId, objectSize, partSize, copyPartRequests.size());

    List<PartETag> etags = new ArrayList<>();
    List<Future<CopyPartResult>> responses;
    try {
        // Step 5. Start parallel parts copy
        responses = executor.invokeAll(copyPartRequests);

        // Step 6. Fetch all results
        for (Future<CopyPartResult> response : responses) {
            CopyPartResult result = response.get();
            etags.add(new PartETag(result.getPartNumber(), result.getETag()));
        }
    } catch (Exception e) {
        throw new IllegalStateException("Multipart copy reported an unexpected error -- uploadId=" + uploadId,
                e);
    }

    // Step 7. Complete copy operation
    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(targetBucketName,
            targetObjectKey, initResult.getUploadId(), etags);

    log.trace("Completing multipart copy uploadId={}", uploadId);
    client.completeMultipartUpload(completeRequest);
}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

private String initiateMultipartUpload() throws IOException {

    boolean operationSuccessful = false;
    final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
    if (this.useRRS) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    } else {/*from   w w w .  j  av  a 2s . c om*/
        request.setStorageClass(StorageClass.Standard);
    }

    try {

        final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
        operationSuccessful = true;
        return result.getUploadId();

    } catch (AmazonServiceException e) {
        throw new IOException(StringUtils.stringifyException(e));
    } finally {
        if (!operationSuccessful) {
            abortUpload();
        }
    }
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;//from  ww w.j a va2 s.  co  m
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        LOG.error("aphl-s3: MultiPart upload requires a File input.");
        throw new InvalidArgumentException("aphl-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    LOG.trace("Initiating multipart upload ...");

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception exception) {
        LOG.error("Multi-part upload failed, aborting", exception);
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw exception;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;//  ww w. j  a v  a  2 s.  c  o  m
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        initRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            LOG.trace("Uploading part [{}] for {}", part, keyName);
            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception e) {
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw e;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.minio.awssdk.tests.S3TestUtils.java

License:Apache License

void uploadMultipartObject(String bucketName, String keyName, String filePath, SSECustomerKey sseKey)
        throws IOException {

    File file = new File(filePath);

    List<PartETag> partETags = new ArrayList<PartETag>();

    // Step 1: Initialize.
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);

    if (sseKey != null) {
        initRequest.setSSECustomerKey(sseKey);
    }// ww  w .j  a v  a  2s .  c  o  m

    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

    long contentLength = file.length();
    long partSize = 5242880; // Set part size to 5 MB.

    // Step 2: Upload parts.
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
        // Last part can be less than 5 MB. Adjust part size.
        partSize = Math.min(partSize, (contentLength - filePosition));

        // Create request to upload a part.
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(keyName)
                .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition)
                .withFile(file).withPartSize(partSize);

        if (sseKey != null) {
            uploadRequest.withSSECustomerKey(sseKey);
        }

        // Upload part and add response to our list.
        partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());

        filePosition += partSize;
    }

    // Step 3: Complete.
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName,
            initResponse.getUploadId(), partETags);

    s3Client.completeMultipartUpload(compRequest);
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3InitiateFileUploadOperator.java

License:Apache License

/**
 * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput,
 * uploadMetadataOutput ports./*from  w ww .ja  va  2 s . c om*/
 * @param tuple given tuple
 */
protected void processTuple(AbstractFileSplitter.FileMetadata tuple) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    String keyName = getKeyName(tuple.getFilePath());
    String uploadId = "";
    if (tuple.getNumberOfBlocks() > 1) {
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
        initRequest.setObjectMetadata(createObjectMetadata());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();
    }
    UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName);
    fileMetadataOutput.emit(uploadFileMetadata);
    uploadMetadataOutput.emit(uploadFileMetadata);
    currentWindowRecoveryState.add(uploadFileMetadata);
}

From source file:org.apache.beam.sdk.io.aws.s3.S3FileSystem.java

License:Apache License

@VisibleForTesting
CompleteMultipartUploadResult multipartCopy(S3ResourceId sourcePath, S3ResourceId destinationPath,
        ObjectMetadata sourceObjectMetadata) throws AmazonClientException {
    InitiateMultipartUploadRequest initiateUploadRequest = new InitiateMultipartUploadRequest(
            destinationPath.getBucket(), destinationPath.getKey()).withStorageClass(options.getS3StorageClass())
                    .withObjectMetadata(sourceObjectMetadata);
    initiateUploadRequest.setSSECustomerKey(options.getSSECustomerKey());

    InitiateMultipartUploadResult initiateUploadResult = amazonS3.get()
            .initiateMultipartUpload(initiateUploadRequest);
    final String uploadId = initiateUploadResult.getUploadId();

    List<PartETag> eTags = new ArrayList<>();

    final long objectSize = sourceObjectMetadata.getContentLength();
    // extra validation in case a caller calls directly S3FileSystem.multipartCopy
    // without using S3FileSystem.copy in the future
    if (objectSize == 0) {
        final CopyPartRequest copyPartRequest = new CopyPartRequest()
                .withSourceBucketName(sourcePath.getBucket()).withSourceKey(sourcePath.getKey())
                .withDestinationBucketName(destinationPath.getBucket())
                .withDestinationKey(destinationPath.getKey()).withUploadId(uploadId).withPartNumber(1);
        copyPartRequest.setSourceSSECustomerKey(options.getSSECustomerKey());
        copyPartRequest.setDestinationSSECustomerKey(options.getSSECustomerKey());

        CopyPartResult copyPartResult = amazonS3.get().copyPart(copyPartRequest);
        eTags.add(copyPartResult.getPartETag());
    } else {//from   ww  w.  j av  a  2  s .com
        long bytePosition = 0;
        Integer uploadBufferSizeBytes = options.getS3UploadBufferSizeBytes();
        // Amazon parts are 1-indexed, not zero-indexed.
        for (int partNumber = 1; bytePosition < objectSize; partNumber++) {
            final CopyPartRequest copyPartRequest = new CopyPartRequest()
                    .withSourceBucketName(sourcePath.getBucket()).withSourceKey(sourcePath.getKey())
                    .withDestinationBucketName(destinationPath.getBucket())
                    .withDestinationKey(destinationPath.getKey()).withUploadId(uploadId)
                    .withPartNumber(partNumber).withFirstByte(bytePosition)
                    .withLastByte(Math.min(objectSize - 1, bytePosition + uploadBufferSizeBytes - 1));
            copyPartRequest.setSourceSSECustomerKey(options.getSSECustomerKey());
            copyPartRequest.setDestinationSSECustomerKey(options.getSSECustomerKey());

            CopyPartResult copyPartResult = amazonS3.get().copyPart(copyPartRequest);
            eTags.add(copyPartResult.getPartETag());

            bytePosition += uploadBufferSizeBytes;
        }
    }

    CompleteMultipartUploadRequest completeUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(destinationPath.getBucket()).withKey(destinationPath.getKey())
            .withUploadId(uploadId).withPartETags(eTags);
    return amazonS3.get().completeMultipartUpload(completeUploadRequest);
}

From source file:org.apache.beam.sdk.io.aws.s3.S3WritableByteChannel.java

License:Apache License

S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
        throws IOException {
    this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
    this.options = checkNotNull(options);
    this.path = checkNotNull(path, "path");
    checkArgument(/*  w  ww . j a  va2s  .  c  o  m*/
            atMostOne(options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null,
                    options.getSSEAwsKeyManagementParams() != null),
            "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
                    + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
    // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
    checkArgument(options
            .getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
            "S3UploadBufferSizeBytes must be at least %s bytes",
            S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
    this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
    eTags = new ArrayList<>();

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentType(contentType);
    if (options.getSSEAlgorithm() != null) {
        objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
    }
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
            .withStorageClass(options.getS3StorageClass()).withObjectMetadata(objectMetadata);
    request.setSSECustomerKey(options.getSSECustomerKey());
    request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
    InitiateMultipartUploadResult result;
    try {
        result = amazonS3.initiateMultipartUpload(request);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    }
    uploadId = result.getUploadId();
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();//from   w  w  w.  j ava  2s .c  o m
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);/*from   w ww  . j  a v  a 2  s  .  c o m*/
        om.setContentType(mimeType);
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}