Example usage for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId

List of usage examples for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model InitiateMultipartUploadResult getUploadId.

Prototype

public String getUploadId() 

Source Link

Document

Returns the initiated multipart upload ID.

Usage

From source file:c3.ops.priam.aws.S3FileSystem.java

License:Apache License

@Override
public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
    uploadCount.incrementAndGet();/*from  www .j  a v  a  2  s.com*/
    AmazonS3 s3Client = getS3Client();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(),
            path.getRemotePath());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
    List<PartETag> partETags = Lists.newArrayList();
    long chunkSize = config.getBackupChunkSize();
    if (path.getSize() > 0)
        chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1))
                : chunkSize;
    logger.info(String.format("Uploading to %s/%s with chunk size %d", config.getBackupPrefix(),
            path.getRemotePath(), chunkSize));
    try {
        Iterator<byte[]> chunks = compress.compress(in, chunkSize);
        // Upload parts.
        int partNum = 0;
        while (chunks.hasNext()) {
            byte[] chunk = chunks.next();
            rateLimiter.acquire(chunk.length);
            DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(),
                    initResponse.getUploadId());
            S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
            executor.submit(partUploader);
            bytesUploaded.addAndGet(chunk.length);
        }
        executor.sleepTillEmpty();
        if (partNum != partETags.size())
            throw new BackupRestoreException("Number of parts(" + partNum
                    + ")  does not match the uploaded parts(" + partETags.size() + ")");
        new S3PartUploader(s3Client, part, partETags).completeUpload();

        if (logger.isDebugEnabled()) {
            final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
            final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
            final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
            logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
        }

    } catch (Exception e) {
        new S3PartUploader(s3Client, part, partETags).abortUpload();
        throw new BackupRestoreException("Error uploading file " + path.getFileName(), e);
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    try {//from  www .  j ava2  s . c o  m
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

        List<PartETag> partTags = new ArrayList<>();
        String fileKey = toAbsoluteFilePath(file.getRelativePath());

        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta);
        InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request);

        long contentLength = file.getFileSize();
        long partSize = 256 * 1024 * 1024;

        try {
            // Uploading the file, part by part.
            long filePosition = 0;

            for (int i = 1; filePosition < contentLength; i++) {

                partSize = Math.min(partSize, (contentLength - filePosition));

                // Creating the request for a part upload
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                        .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i)
                        .withInputStream(fileStream).withPartSize(partSize);

                // Upload part and add response to the result list.
                partTags.add(s3.uploadPart(uploadRequest).getPartETag());
                filePosition += partSize;

                System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of "
                        + Utils.readableFileSize(contentLength));
            }
        } catch (Exception e) {
            System.out.println("UploadPartRequest failed: " + e.getMessage());
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId()));
            return false;
        }

        s3.completeMultipartUpload(
                new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags));
    } catch (AmazonClientException ex) {
        System.out.println("Upload failed: " + ex.getMessage());
        return false;

    }
    return true;
}

From source file:com.emc.vipr.s3.s3api.java

License:Open Source License

public static void CreateLargeObject(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT,
        String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, File file, String metaKey, String metaValue)
        throws Exception {

    System.out.println("Access ID:" + S3_ACCESS_KEY_ID);
    System.out.println("Access secret:" + S3_SECRET_KEY);
    System.out.println("Access URL:" + S3_ENDPOINT);
    System.out.println("Access namespace:" + S3_ViPR_NAMESPACE);
    System.out.println("Access bucket:" + S3_BUCKET);
    System.out.println("Access key:" + key);

    ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE);

    ObjectMetadata objmeta = new ObjectMetadata();
    if (!(metaKey.equals("") && metaValue.equals(""))) {

        objmeta.addUserMetadata(metaKey, metaValue);
    }//  w  w w .j  a va2 s  . c om
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(S3_BUCKET, key)
            .withObjectMetadata(objmeta);
    InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest);
    long partSize = 1 * 1024 * 1024; // Set part size to 1 MB.
    // list of UploadPartResponse objects for each part that is uploaded
    List<PartETag> partETags = new ArrayList<PartETag>();
    long filePosition = 0;
    for (int i = 1; filePosition < file.length(); i++) {
        // get the size of the chunk.  Note - the last part can be less than the chunk size
        partSize = Math.min(partSize, (file.length() - filePosition));

        System.out.println(String.format("Sending chunk [%d] starting at position [%d]", i, filePosition));

        // Create request to upload a part.
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(S3_BUCKET).withKey(key)
                .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition)
                .withFile(file).withPartSize(partSize);

        // Upload part and add response to our list.
        PartETag eTagPart = s3.uploadPart(uploadRequest).getPartETag();
        partETags.add(eTagPart);

        // set file position to the next part in the file
        filePosition += partSize;
    }
    System.out.println("Waiting for completion of multi-part upload");
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(S3_BUCKET, key,
            initResponse.getUploadId(), partETags);

    s3.completeMultipartUpload(compRequest);

}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private String initiateMulitpartUpload(Long uncompressedSize) throws SnapshotInitializeMpuException {
    InitiateMultipartUploadResult initResponse = null;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    Map<String, String> userMetadataMap = new HashMap<String, String>();
    userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata
    objectMetadata.setUserMetadata(userMetadataMap);
    initRequest.setObjectMetadata(objectMetadata);

    try {// ww w. j a va  2  s  .c o  m
        LOG.info("Inititating multipart upload: snapshotId=" + snapshotId + ", bucketName=" + bucketName
                + ", keyName=" + keyName);
        initResponse = retryAfterRefresh(
                new Function<InitiateMultipartUploadRequest, InitiateMultipartUploadResult>() {

                    @Override
                    @Nullable
                    public InitiateMultipartUploadResult apply(@Nullable InitiateMultipartUploadRequest arg0) {
                        eucaS3Client.refreshEndpoint();
                        return eucaS3Client.initiateMultipartUpload(arg0);
                    }

                }, initRequest, REFRESH_TOKEN_RETRIES);
    } catch (Exception ex) {
        throw new SnapshotInitializeMpuException("Failed to initialize multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName, ex);
    }

    if (StringUtils.isBlank(initResponse.getUploadId())) {
        throw new SnapshotInitializeMpuException("Invalid upload ID for multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName);
    }
    return initResponse.getUploadId();
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public InitiateMultipartUploadResponseType initiateMultipartUpload(InitiateMultipartUploadType request)
        throws S3Exception {
    InitiateMultipartUploadResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    String key = request.getKey();
    InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName, key);/*from w  w  w. j a v  a2s .c  o  m*/
    ObjectMetadata metadata = new ObjectMetadata();
    for (MetaDataEntry meta : request.getMetaData()) {
        metadata.addUserMetadata(meta.getName(), meta.getValue());
    }

    initiateMultipartUploadRequest.setObjectMetadata(metadata);
    try {
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        InitiateMultipartUploadResult result = s3Client.initiateMultipartUpload(initiateMultipartUploadRequest);
        reply.setUploadId(result.getUploadId());
        reply.setBucket(bucketName);
        reply.setKey(key);
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * /*from www.  jav  a 2 s .  c  o  m*/
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    Utils.log("start uploading");
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        Utils.log("the bucket = " + s3bucketName);
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        initRequest.getRequestClientOptions()
                .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added/*from  ww w .  ja  v  a 2  s  .c  o  m*/
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added/*from  w  w  w .ja va 2  s  .  c  om*/
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//from   w w  w  . j  a  v a 2s .c om
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.netflix.bdp.s3.S3Util.java

License:Apache License

public static PendingUpload multipartUpload(AmazonS3 client, File localFile, String partition, String bucket,
        String key, long uploadPartSize) {

    InitiateMultipartUploadResult initiate = client
            .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key));
    String uploadId = initiate.getUploadId();

    boolean threw = true;
    try {//ww  w.jav  a 2  s.  c o  m
        Map<Integer, String> etags = Maps.newLinkedHashMap();

        long offset = 0;
        long numParts = (localFile.length() / uploadPartSize
                + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

        Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile);

        for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
            long size = Math.min(localFile.length() - offset, uploadPartSize);
            UploadPartRequest part = new UploadPartRequest().withBucketName(bucket).withKey(key)
                    .withPartNumber(partNumber).withUploadId(uploadId).withFile(localFile)
                    .withFileOffset(offset).withPartSize(size).withLastPart(partNumber == numParts);

            UploadPartResult partResult = client.uploadPart(part);
            PartETag etag = partResult.getPartETag();
            etags.put(etag.getPartNumber(), etag.getETag());

            offset += uploadPartSize;
        }

        PendingUpload pending = new PendingUpload(partition, bucket, key, uploadId, etags);

        threw = false;

        return pending;

    } finally {
        if (threw) {
            try {
                client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
            } catch (AmazonClientException e) {
                LOG.error("Failed to abort multi-part upload", e);
            }
        }
    }
}