Example usage for com.amazonaws.services.s3 AmazonS3 initiateMultipartUpload

List of usage examples for com.amazonaws.services.s3 AmazonS3 initiateMultipartUpload

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 initiateMultipartUpload.

Prototype

public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request)
        throws SdkClientException, AmazonServiceException;

Source Link

Document

Initiates a multipart upload and returns an InitiateMultipartUploadResult which contains an upload ID.

Usage

From source file:c3.ops.priam.aws.S3FileSystem.java

License:Apache License

@Override
public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
    uploadCount.incrementAndGet();/*from   ww w.j  a va  2s  . c  om*/
    AmazonS3 s3Client = getS3Client();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(),
            path.getRemotePath());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
    List<PartETag> partETags = Lists.newArrayList();
    long chunkSize = config.getBackupChunkSize();
    if (path.getSize() > 0)
        chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1))
                : chunkSize;
    logger.info(String.format("Uploading to %s/%s with chunk size %d", config.getBackupPrefix(),
            path.getRemotePath(), chunkSize));
    try {
        Iterator<byte[]> chunks = compress.compress(in, chunkSize);
        // Upload parts.
        int partNum = 0;
        while (chunks.hasNext()) {
            byte[] chunk = chunks.next();
            rateLimiter.acquire(chunk.length);
            DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(),
                    initResponse.getUploadId());
            S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
            executor.submit(partUploader);
            bytesUploaded.addAndGet(chunk.length);
        }
        executor.sleepTillEmpty();
        if (partNum != partETags.size())
            throw new BackupRestoreException("Number of parts(" + partNum
                    + ")  does not match the uploaded parts(" + partETags.size() + ")");
        new S3PartUploader(s3Client, part, partETags).completeUpload();

        if (logger.isDebugEnabled()) {
            final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
            final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
            final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
            logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
        }

    } catch (Exception e) {
        new S3PartUploader(s3Client, part, partETags).abortUpload();
        throw new BackupRestoreException("Error uploading file " + path.getFileName(), e);
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???/*from   w  ww.j  a  va 2  s  . c o m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.netflix.bdp.s3.S3Util.java

License:Apache License

public static PendingUpload multipartUpload(AmazonS3 client, File localFile, String partition, String bucket,
        String key, long uploadPartSize) {

    InitiateMultipartUploadResult initiate = client
            .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key));
    String uploadId = initiate.getUploadId();

    boolean threw = true;
    try {//from   w w  w  . j ava 2s.c o  m
        Map<Integer, String> etags = Maps.newLinkedHashMap();

        long offset = 0;
        long numParts = (localFile.length() / uploadPartSize
                + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

        Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile);

        for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
            long size = Math.min(localFile.length() - offset, uploadPartSize);
            UploadPartRequest part = new UploadPartRequest().withBucketName(bucket).withKey(key)
                    .withPartNumber(partNumber).withUploadId(uploadId).withFile(localFile)
                    .withFileOffset(offset).withPartSize(size).withLastPart(partNumber == numParts);

            UploadPartResult partResult = client.uploadPart(part);
            PartETag etag = partResult.getPartETag();
            etags.put(etag.getPartNumber(), etag.getETag());

            offset += uploadPartSize;
        }

        PendingUpload pending = new PendingUpload(partition, bucket, key, uploadId, etags);

        threw = false;

        return pending;

    } finally {
        if (threw) {
            try {
                client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
            } catch (AmazonClientException e) {
                LOG.error("Failed to abort multi-part upload", e);
            }
        }
    }
}

From source file:org.apache.beam.sdk.io.aws.s3.S3WritableByteChannel.java

License:Apache License

S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
        throws IOException {
    this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
    this.options = checkNotNull(options);
    this.path = checkNotNull(path, "path");
    checkArgument(/*w  w  w  .  ja  v a  2  s . c om*/
            atMostOne(options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null,
                    options.getSSEAwsKeyManagementParams() != null),
            "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
                    + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
    // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
    checkArgument(options
            .getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
            "S3UploadBufferSizeBytes must be at least %s bytes",
            S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
    this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
    eTags = new ArrayList<>();

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentType(contentType);
    if (options.getSSEAlgorithm() != null) {
        objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
    }
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
            .withStorageClass(options.getS3StorageClass()).withObjectMetadata(objectMetadata);
    request.setSSECustomerKey(options.getSSECustomerKey());
    request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
    InitiateMultipartUploadResult result;
    try {
        result = amazonS3.initiateMultipartUpload(request);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    }
    uploadId = result.getUploadId();
}

From source file:org.deeplearning4j.aws.s3.uploader.S3Uploader.java

License:Apache License

private void doMultiPart(AmazonS3 s3Client, String bucketName, File file) {
    // Create a list of UploadPartResponse objects. You get one of these
    // for each part upload.
    List<PartETag> partETags = new ArrayList<>();

    // Step 1: Initialize.
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, file.getName());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

    long contentLength = file.length();
    long partSize = 5242880; // Set part size to 5 MB.

    try {/*from w  ww.ja v a  2  s.co m*/
        // Step 2: Upload parts.
        long filePosition = 0;
        for (int i = 1; filePosition < contentLength; i++) {
            // Last part can be less than 5 MB. Adjust part size.
            partSize = Math.min(partSize, (contentLength - filePosition));

            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                    .withKey(file.getName()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withFileOffset(filePosition).withFile(file).withPartSize(partSize);

            // Upload part and add response to our list.
            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }

        // Step 3: Complete.
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName,
                file.getName(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
    } catch (Exception e) {
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucketName, file.getName(), initResponse.getUploadId()));
    }
}