Example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest

List of usage examples for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest InitiateMultipartUploadRequest.

Prototype

public InitiateMultipartUploadRequest(String bucketName, String key) 

Source Link

Document

Constructs a request to initiate a new multipart upload in the specified bucket, stored by the specified key.

Usage

From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java

License:Apache License

/**
 * Initializes multipart upload.//ww  w.j  a  va 2  s .com
 */
private void initMultiPartUpload() throws IOException {
    // Generate the object metadata by setting server side encryption, md5 checksum,
    // and encoding as octet stream since no assumptions are made about the file type
    ObjectMetadata meta = new ObjectMetadata();
    if (mSseEnabled) {
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    if (mHash != null) {
        meta.setContentMD5(Base64.encodeAsString(mHash.digest()));
    }
    meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);

    AmazonClientException lastException;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(mBucketName, mKey)
            .withObjectMetadata(meta);
    do {
        try {
            mUploadId = mClient.initiateMultipartUpload(initRequest).getUploadId();
            return;
        } catch (AmazonClientException e) {
            lastException = e;
        }
    } while (mRetryPolicy.attempt());
    // This point is only reached if the operation failed more
    // than the allowed retry count
    throw new IOException("Unable to init multipart upload to " + mKey, lastException);
}

From source file:c3.ops.priam.aws.S3FileSystem.java

License:Apache License

@Override
public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
    uploadCount.incrementAndGet();/*from  ww w .  j  av  a2  s.c  om*/
    AmazonS3 s3Client = getS3Client();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(),
            path.getRemotePath());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
    List<PartETag> partETags = Lists.newArrayList();
    long chunkSize = config.getBackupChunkSize();
    if (path.getSize() > 0)
        chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1))
                : chunkSize;
    logger.info(String.format("Uploading to %s/%s with chunk size %d", config.getBackupPrefix(),
            path.getRemotePath(), chunkSize));
    try {
        Iterator<byte[]> chunks = compress.compress(in, chunkSize);
        // Upload parts.
        int partNum = 0;
        while (chunks.hasNext()) {
            byte[] chunk = chunks.next();
            rateLimiter.acquire(chunk.length);
            DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(),
                    initResponse.getUploadId());
            S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
            executor.submit(partUploader);
            bytesUploaded.addAndGet(chunk.length);
        }
        executor.sleepTillEmpty();
        if (partNum != partETags.size())
            throw new BackupRestoreException("Number of parts(" + partNum
                    + ")  does not match the uploaded parts(" + partETags.size() + ")");
        new S3PartUploader(s3Client, part, partETags).completeUpload();

        if (logger.isDebugEnabled()) {
            final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
            final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
            final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
            logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
        }

    } catch (Exception e) {
        new S3PartUploader(s3Client, part, partETags).abortUpload();
        throw new BackupRestoreException("Error uploading file " + path.getFileName(), e);
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:com.emc.vipr.s3.s3api.java

License:Open Source License

public static void CreateLargeObject(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT,
        String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, File file, String metaKey, String metaValue)
        throws Exception {

    System.out.println("Access ID:" + S3_ACCESS_KEY_ID);
    System.out.println("Access secret:" + S3_SECRET_KEY);
    System.out.println("Access URL:" + S3_ENDPOINT);
    System.out.println("Access namespace:" + S3_ViPR_NAMESPACE);
    System.out.println("Access bucket:" + S3_BUCKET);
    System.out.println("Access key:" + key);

    ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE);

    ObjectMetadata objmeta = new ObjectMetadata();
    if (!(metaKey.equals("") && metaValue.equals(""))) {

        objmeta.addUserMetadata(metaKey, metaValue);
    }/*ww  w  .  j  a  va2  s.  c o  m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(S3_BUCKET, key)
            .withObjectMetadata(objmeta);
    InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest);
    long partSize = 1 * 1024 * 1024; // Set part size to 1 MB.
    // list of UploadPartResponse objects for each part that is uploaded
    List<PartETag> partETags = new ArrayList<PartETag>();
    long filePosition = 0;
    for (int i = 1; filePosition < file.length(); i++) {
        // get the size of the chunk.  Note - the last part can be less than the chunk size
        partSize = Math.min(partSize, (file.length() - filePosition));

        System.out.println(String.format("Sending chunk [%d] starting at position [%d]", i, filePosition));

        // Create request to upload a part.
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(S3_BUCKET).withKey(key)
                .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition)
                .withFile(file).withPartSize(partSize);

        // Upload part and add response to our list.
        PartETag eTagPart = s3.uploadPart(uploadRequest).getPartETag();
        partETags.add(eTagPart);

        // set file position to the next part in the file
        filePosition += partSize;
    }
    System.out.println("Waiting for completion of multi-part upload");
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(S3_BUCKET, key,
            initResponse.getUploadId(), partETags);

    s3.completeMultipartUpload(compRequest);

}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private String initiateMulitpartUpload(Long uncompressedSize) throws SnapshotInitializeMpuException {
    InitiateMultipartUploadResult initResponse = null;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    Map<String, String> userMetadataMap = new HashMap<String, String>();
    userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata
    objectMetadata.setUserMetadata(userMetadataMap);
    initRequest.setObjectMetadata(objectMetadata);

    try {/*from   w w w  .j  a  v  a  2  s .  c  o  m*/
        LOG.info("Inititating multipart upload: snapshotId=" + snapshotId + ", bucketName=" + bucketName
                + ", keyName=" + keyName);
        initResponse = retryAfterRefresh(
                new Function<InitiateMultipartUploadRequest, InitiateMultipartUploadResult>() {

                    @Override
                    @Nullable
                    public InitiateMultipartUploadResult apply(@Nullable InitiateMultipartUploadRequest arg0) {
                        eucaS3Client.refreshEndpoint();
                        return eucaS3Client.initiateMultipartUpload(arg0);
                    }

                }, initRequest, REFRESH_TOKEN_RETRIES);
    } catch (Exception ex) {
        throw new SnapshotInitializeMpuException("Failed to initialize multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName, ex);
    }

    if (StringUtils.isBlank(initResponse.getUploadId())) {
        throw new SnapshotInitializeMpuException("Invalid upload ID for multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName);
    }
    return initResponse.getUploadId();
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public InitiateMultipartUploadResponseType initiateMultipartUpload(InitiateMultipartUploadType request)
        throws S3Exception {
    InitiateMultipartUploadResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    String key = request.getKey();
    InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName, key);// www  .j a  va  2s  . c  om
    ObjectMetadata metadata = new ObjectMetadata();
    for (MetaDataEntry meta : request.getMetaData()) {
        metadata.addUserMetadata(meta.getName(), meta.getValue());
    }

    initiateMultipartUploadRequest.setObjectMetadata(metadata);
    try {
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        InitiateMultipartUploadResult result = s3Client.initiateMultipartUpload(initiateMultipartUploadRequest);
        reply.setUploadId(result.getUploadId());
        reply.setBucket(bucketName);
        reply.setKey(key);
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * //  w w  w .  java 2 s. c  om
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    Utils.log("start uploading");
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        Utils.log("the bucket = " + s3bucketName);
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        initRequest.getRequestClientOptions()
                .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//from w  w  w.  j  a  v a  2s .  co  m
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added/*from   ww w.  jav a2  s  . c  om*/
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.lithium.flow.filer.S3Filer.java

License:Apache License

@Override
@Nonnull/*from w  ww. j  av a  2  s . c  o m*/
public OutputStream writeFile(@Nonnull String path) throws IOException {
    String key = path.substring(1);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    List<Future<PartETag>> futureTags = new ArrayList<>();
    Lazy<String> uploadId = new Lazy<>(
            () -> s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)).getUploadId());

    return new OutputStream() {
        @Override
        public void write(int b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b, int off, int len) throws IOException {
            baos.write(b, off, len);
            flip(partSize);
        }

        @Override
        public void close() throws IOException {
            if (futureTags.size() == 0) {
                InputStream in = new ByteArrayInputStream(baos.toByteArray());
                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setContentLength(baos.size());
                s3.putObject(bucket, key, in, metadata);
            } else {
                flip(1);

                List<PartETag> tags = Lists.newArrayList();
                for (Future<PartETag> futureTag : futureTags) {
                    try {
                        tags.add(futureTag.get());
                    } catch (Exception e) {
                        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId.get()));
                        throw new IOException("failed to upload: " + path, e);
                    }
                }

                s3.completeMultipartUpload(
                        new CompleteMultipartUploadRequest(bucket, key, uploadId.get(), tags));
            }
        }

        private void flip(long minSize) throws IOException {
            if (baos.size() < minSize) {
                return;
            }

            File file = new File(tempDir, UUID.randomUUID().toString());
            file.deleteOnExit();

            OutputStream out = new FileOutputStream(file);
            out.write(baos.toByteArray());
            out.close();

            baos.reset();

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(uploadId.get())
                    .withBucketName(bucket).withKey(key).withPartNumber(futureTags.size() + 1)
                    .withPartSize(file.length()).withFile(file);

            futureTags.add(service.submit(() -> {
                try {
                    return s3.uploadPart(uploadRequest).getPartETag();
                } finally {
                    file.delete();
                }
            }));
        }
    };
}

From source file:com.netflix.bdp.s3.S3Util.java

License:Apache License

public static PendingUpload multipartUpload(AmazonS3 client, File localFile, String partition, String bucket,
        String key, long uploadPartSize) {

    InitiateMultipartUploadResult initiate = client
            .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key));
    String uploadId = initiate.getUploadId();

    boolean threw = true;
    try {//from  ww  w.j  av a  2 s  . c o  m
        Map<Integer, String> etags = Maps.newLinkedHashMap();

        long offset = 0;
        long numParts = (localFile.length() / uploadPartSize
                + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

        Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile);

        for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
            long size = Math.min(localFile.length() - offset, uploadPartSize);
            UploadPartRequest part = new UploadPartRequest().withBucketName(bucket).withKey(key)
                    .withPartNumber(partNumber).withUploadId(uploadId).withFile(localFile)
                    .withFileOffset(offset).withPartSize(size).withLastPart(partNumber == numParts);

            UploadPartResult partResult = client.uploadPart(part);
            PartETag etag = partResult.getPartETag();
            etags.put(etag.getPartNumber(), etag.getETag());

            offset += uploadPartSize;
        }

        PendingUpload pending = new PendingUpload(partition, bucket, key, uploadId, etags);

        threw = false;

        return pending;

    } finally {
        if (threw) {
            try {
                client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
            } catch (AmazonClientException e) {
                LOG.error("Failed to abort multi-part upload", e);
            }
        }
    }
}