Example usage for com.amazonaws.services.s3.model AbortMultipartUploadRequest AbortMultipartUploadRequest

List of usage examples for com.amazonaws.services.s3.model AbortMultipartUploadRequest AbortMultipartUploadRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model AbortMultipartUploadRequest AbortMultipartUploadRequest.

Prototype

public AbortMultipartUploadRequest(String bucketName, String key, String uploadId) 

Source Link

Document

Constructs a new request to abort a multipart upload.

Usage

From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java

License:Apache License

/**
 * Aborts multipart upload./* w  w  w  .  ja  v  a2  s.  c o  m*/
 */
private void abortMultiPartUpload() {
    AmazonClientException lastException;
    do {
        try {
            mClient.abortMultipartUpload(new AbortMultipartUploadRequest(mBucketName, mKey, mUploadId));
            LOG.warn("Aborted multipart upload for key {} and id '{}' to bucket {}", mKey, mUploadId,
                    mBucketName);
            return;
        } catch (AmazonClientException e) {
            lastException = e;
        }
    } while (mRetryPolicy.attempt());
    // This point is only reached if the operation failed more
    // than the allowed retry count
    LOG.warn(
            "Unable to abort multipart upload for key '{}' and id '{}' to bucket {}. "
                    + "You may need to enable the periodical cleanup by setting property {}" + "to be true.",
            mKey, mUploadId, mBucketName, PropertyKey.UNDERFS_CLEANUP_ENABLED.getName(), lastException);
}

From source file:baldrickv.s3streamingtool.S3CleanupMultipart.java

License:Open Source License

public static void cleanup(S3StreamConfig config) throws Exception {

    AmazonS3Client s3 = config.getS3Client();
    String bucket = config.getS3Bucket();

    ListMultipartUploadsRequest list_req = new ListMultipartUploadsRequest(bucket);

    List<MultipartUpload> list = s3.listMultipartUploads(list_req).getMultipartUploads();

    Scanner scan = new Scanner(System.in);

    for (MultipartUpload mu : list) {
        System.out.println("-----------------------");
        System.out.println("  bucket: " + bucket);
        System.out.println("  key: " + mu.getKey());
        System.out.println("  uploadId: " + mu.getUploadId());
        System.out.println("  initiated at: " + mu.getInitiated());
        System.out.println("  initiated by: " + mu.getInitiator());
        System.out.println("-----------------------");

        System.out.print("Abort this upload [y|N]? ");
        String result = scan.nextLine().trim().toLowerCase();
        if (result.equals("y")) {
            AbortMultipartUploadRequest abort = new AbortMultipartUploadRequest(bucket, mu.getKey(),
                    mu.getUploadId());//from w w  w . j a va  2  s . c o  m

            s3.abortMultipartUpload(abort);
            System.out.println("Aborted upload");
        } else {
            System.out.println("Leaving this one alone");

        }

    }

}

From source file:c3.ops.priam.aws.S3PartUploader.java

License:Apache License

public void abortUpload() {
    AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(dataPart.getBucketName(),
            dataPart.getS3key(), dataPart.getUploadID());
    client.abortMultipartUpload(abortRequest);
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    try {//from  ww  w .  j  a v a2  s  . co m
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

        List<PartETag> partTags = new ArrayList<>();
        String fileKey = toAbsoluteFilePath(file.getRelativePath());

        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta);
        InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request);

        long contentLength = file.getFileSize();
        long partSize = 256 * 1024 * 1024;

        try {
            // Uploading the file, part by part.
            long filePosition = 0;

            for (int i = 1; filePosition < contentLength; i++) {

                partSize = Math.min(partSize, (contentLength - filePosition));

                // Creating the request for a part upload
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                        .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i)
                        .withInputStream(fileStream).withPartSize(partSize);

                // Upload part and add response to the result list.
                partTags.add(s3.uploadPart(uploadRequest).getPartETag());
                filePosition += partSize;

                System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of "
                        + Utils.readableFileSize(contentLength));
            }
        } catch (Exception e) {
            System.out.println("UploadPartRequest failed: " + e.getMessage());
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId()));
            return false;
        }

        s3.completeMultipartUpload(
                new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags));
    } catch (AmazonClientException ex) {
        System.out.println("Upload failed: " + ex.getMessage());
        return false;

    }
    return true;
}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private void abortMultipartUpload() {
    if (uploadId != null) {
        try {//  w w  w  .  j av  a 2s  .  co  m
            LOG.debug("Aborting multipart upload: snapshotId=" + snapshotId + ", bucketName=" + ", keyName="
                    + keyName + ", uploadId=" + uploadId);
            retryAfterRefresh(new Function<AbortMultipartUploadRequest, String>() {

                @Override
                @Nullable
                public String apply(@Nullable AbortMultipartUploadRequest arg0) {
                    eucaS3Client.refreshEndpoint();
                    eucaS3Client.abortMultipartUpload(arg0);
                    return null;
                }
            }, new AbortMultipartUploadRequest(bucketName, keyName, uploadId), REFRESH_TOKEN_RETRIES);
        } catch (Exception e) {
            LOG.debug("Failed to abort multipart upload for snapshot " + snapshotId);
        }
    }
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public AbortMultipartUploadResponseType abortMultipartUpload(AbortMultipartUploadType request)
        throws S3Exception {
    AbortMultipartUploadResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    String key = request.getKey();
    String uploadId = request.getUploadId();
    AbortMultipartUploadRequest abortMultipartUploadRequest = new AbortMultipartUploadRequest(bucketName, key,
            uploadId);/*from w  w  w .ja  v  a 2  s .c  o  m*/
    try {
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();

        s3Client.abortMultipartUpload(abortMultipartUploadRequest);
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
    return reply;
}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * //from www  .ja  v  a 2  s .c om
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    Utils.log("start uploading");
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        Utils.log("the bucket = " + s3bucketName);
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        initRequest.getRequestClientOptions()
                .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//w w w  .j  a  v  a2  s.  com
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added/*from   ww w . jav  a  2s .co m*/
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//  w  w  w . j  a v  a  2 s .  co m
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}