Example usage for com.amazonaws.services.s3.model UploadPartRequest UploadPartRequest

List of usage examples for com.amazonaws.services.s3.model UploadPartRequest UploadPartRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model UploadPartRequest UploadPartRequest.

Prototype

UploadPartRequest

Source Link

Usage

From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (mClosed) {
        return;//from  w  w  w .j a  va2 s  .  co m
    }

    // Set the closed flag, we never retry close() even if exception occurs
    mClosed = true;

    // Multi-part upload has not been initialized
    if (mUploadId == null) {
        LOG.debug("S3A Streaming upload output stream closed without uploading any data.");
        return;
    }

    try {
        if (mFile != null) {
            mLocalOutputStream.close();
            int partNumber = mPartNumber.getAndIncrement();
            final UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(mBucketName)
                    .withKey(mKey).withUploadId(mUploadId).withPartNumber(partNumber).withFile(mFile)
                    .withPartSize(mFile.length());
            uploadRequest.setLastPart(true);
            execUpload(uploadRequest);
        }

        waitForAllPartsUpload();
        completeMultiPartUpload();
    } catch (Exception e) {
        LOG.error("Failed to upload {}: {}", mKey, e.toString());
        throw new IOException(e);
    }
}

From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java

License:Apache License

/**
 * Uploads part async./*from w  w w. j a v  a2 s  .  co m*/
 */
private void uploadPart() throws IOException {
    if (mFile == null) {
        return;
    }
    mLocalOutputStream.close();
    int partNumber = mPartNumber.getAndIncrement();
    File newFileToUpload = new File(mFile.getPath());
    mFile = null;
    mLocalOutputStream = null;
    UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(mBucketName).withKey(mKey)
            .withUploadId(mUploadId).withPartNumber(partNumber).withFile(newFileToUpload)
            .withPartSize(newFileToUpload.length());
    execUpload(uploadRequest);
}

From source file:c3.ops.priam.aws.S3PartUploader.java

License:Apache License

private Void uploadPart() throws AmazonClientException, BackupRestoreException {
    UploadPartRequest req = new UploadPartRequest();
    req.setBucketName(dataPart.getBucketName());
    req.setKey(dataPart.getS3key());/*from w w w . j  av  a  2  s .  c om*/
    req.setUploadId(dataPart.getUploadID());
    req.setPartNumber(dataPart.getPartNo());
    req.setPartSize(dataPart.getPartData().length);
    req.setMd5Digest(SystemUtils.toBase64(dataPart.getMd5()));
    req.setInputStream(new ByteArrayInputStream(dataPart.getPartData()));
    UploadPartResult res = client.uploadPart(req);
    PartETag partETag = res.getPartETag();
    if (!partETag.getETag().equals(SystemUtils.toHex(dataPart.getMd5())))
        throw new BackupRestoreException("Unable to match MD5 for part " + dataPart.getPartNo());
    partETags.add(partETag);
    return null;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    try {/*  w  ww .j  a v a2 s  .co  m*/
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

        List<PartETag> partTags = new ArrayList<>();
        String fileKey = toAbsoluteFilePath(file.getRelativePath());

        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta);
        InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request);

        long contentLength = file.getFileSize();
        long partSize = 256 * 1024 * 1024;

        try {
            // Uploading the file, part by part.
            long filePosition = 0;

            for (int i = 1; filePosition < contentLength; i++) {

                partSize = Math.min(partSize, (contentLength - filePosition));

                // Creating the request for a part upload
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                        .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i)
                        .withInputStream(fileStream).withPartSize(partSize);

                // Upload part and add response to the result list.
                partTags.add(s3.uploadPart(uploadRequest).getPartETag());
                filePosition += partSize;

                System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of "
                        + Utils.readableFileSize(contentLength));
            }
        } catch (Exception e) {
            System.out.println("UploadPartRequest failed: " + e.getMessage());
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId()));
            return false;
        }

        s3.completeMultipartUpload(
                new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags));
    } catch (AmazonClientException ex) {
        System.out.println("Upload failed: " + ex.getMessage());
        return false;

    }
    return true;
}

From source file:com.emc.vipr.s3.s3api.java

License:Open Source License

public static void CreateLargeObject(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT,
        String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, File file, String metaKey, String metaValue)
        throws Exception {

    System.out.println("Access ID:" + S3_ACCESS_KEY_ID);
    System.out.println("Access secret:" + S3_SECRET_KEY);
    System.out.println("Access URL:" + S3_ENDPOINT);
    System.out.println("Access namespace:" + S3_ViPR_NAMESPACE);
    System.out.println("Access bucket:" + S3_BUCKET);
    System.out.println("Access key:" + key);

    ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE);

    ObjectMetadata objmeta = new ObjectMetadata();
    if (!(metaKey.equals("") && metaValue.equals(""))) {

        objmeta.addUserMetadata(metaKey, metaValue);
    }/*w  ww . j  av a2 s . c o m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(S3_BUCKET, key)
            .withObjectMetadata(objmeta);
    InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest);
    long partSize = 1 * 1024 * 1024; // Set part size to 1 MB.
    // list of UploadPartResponse objects for each part that is uploaded
    List<PartETag> partETags = new ArrayList<PartETag>();
    long filePosition = 0;
    for (int i = 1; filePosition < file.length(); i++) {
        // get the size of the chunk.  Note - the last part can be less than the chunk size
        partSize = Math.min(partSize, (file.length() - filePosition));

        System.out.println(String.format("Sending chunk [%d] starting at position [%d]", i, filePosition));

        // Create request to upload a part.
        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(S3_BUCKET).withKey(key)
                .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition)
                .withFile(file).withPartSize(partSize);

        // Upload part and add response to our list.
        PartETag eTagPart = s3.uploadPart(uploadRequest).getPartETag();
        partETags.add(eTagPart);

        // set file position to the next part in the file
        filePosition += partSize;
    }
    System.out.println("Waiting for completion of multi-part upload");
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(S3_BUCKET, key,
            initResponse.getUploadId(), partETags);

    s3.completeMultipartUpload(compRequest);

}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private PartETag uploadPart(SnapshotPart part, SnapshotProgressCallback progressCallback)
        throws SnapshotUploadPartException {
    try {/*from  w w  w  .  j a va 2  s .  c  o m*/
        part = part.updateStateUploading();
    } catch (Exception e) {
        LOG.debug("Failed to update part status in DB. Moving on. " + part);
    }

    try {
        LOG.debug("Uploading " + part);
        UploadPartResult uploadPartResult = retryAfterRefresh(
                new Function<UploadPartRequest, UploadPartResult>() {

                    @Override
                    @Nullable
                    public UploadPartResult apply(@Nullable UploadPartRequest arg0) {
                        eucaS3Client.refreshEndpoint();
                        return eucaS3Client.uploadPart(arg0);
                    }
                },
                new UploadPartRequest().withBucketName(part.getBucketName()).withKey(part.getKeyName())
                        .withUploadId(part.getUploadId()).withPartNumber(part.getPartNumber())
                        .withPartSize(part.getSize()).withFile(new File(part.getFileName())),
                REFRESH_TOKEN_RETRIES);

        progressCallback.update(part.getInputFileBytesRead());

        try {
            part = part.updateStateUploaded(uploadPartResult.getPartETag().getETag());
        } catch (Exception e) {
            LOG.debug("Failed to update part status in DB. Moving on. " + part);
        }
        LOG.debug("Uploaded " + part);
        return uploadPartResult.getPartETag();
    } catch (Exception e) {
        LOG.error("Failed to upload part " + part, e);
        try {
            part = part.updateStateFailed();
        } catch (Exception ie) {
            LOG.debug("Failed to update part status in DB. Moving on. " + part);
        }
        throw new SnapshotUploadPartException("Failed to upload part " + part, e);
    } finally {
        deleteFile(part.getFileName());
    }
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public UploadPartResponseType uploadPart(UploadPartType request, InputStream dataContent) throws S3Exception {
    String bucketName = request.getBucket();
    String key = request.getKey();

    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    try {// w  ww  .j  a  va2s. co m
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();

        UploadPartResult result;
        UploadPartRequest uploadPartRequest = new UploadPartRequest();
        uploadPartRequest.setBucketName(bucketName);
        uploadPartRequest.setKey(key);
        uploadPartRequest.setInputStream(dataContent);
        uploadPartRequest.setUploadId(request.getUploadId());
        uploadPartRequest.setPartNumber(Integer.valueOf(request.getPartNumber()));
        uploadPartRequest.setMd5Digest(request.getContentMD5());
        uploadPartRequest.setPartSize(Long.valueOf(request.getContentLength()));
        try {
            result = s3Client.uploadPart(uploadPartRequest);
        } catch (AmazonServiceException e) {
            LOG.debug("Error from backend", e);
            throw S3ExceptionMapper.fromAWSJavaSDK(e);
        }
        UploadPartResponseType reply = request.getReply();
        reply.setEtag(result.getETag());
        reply.setLastModified(new Date());
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * /*from  ww w  .j av  a2 s .c  o m*/
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    Utils.log("start uploading");
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        Utils.log("the bucket = " + s3bucketName);
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        initRequest.getRequestClientOptions()
                .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//from ww w.  j ava  2s . c  o  m
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//from   w  w w .j a  v a  2s  . co  m
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}