Example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest setObjectMetadata

List of usage examples for com.amazonaws.services.s3.model InitiateMultipartUploadRequest setObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model InitiateMultipartUploadRequest setObjectMetadata.

Prototype

public void setObjectMetadata(ObjectMetadata objectMetadata) 

Source Link

Document

Sets the additional information about the new object being created, such as content type, content encoding, user metadata, etc.

Usage

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private String initiateMulitpartUpload(Long uncompressedSize) throws SnapshotInitializeMpuException {
    InitiateMultipartUploadResult initResponse = null;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    Map<String, String> userMetadataMap = new HashMap<String, String>();
    userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata
    objectMetadata.setUserMetadata(userMetadataMap);
    initRequest.setObjectMetadata(objectMetadata);

    try {// w w  w.  j  a  v a2s  .  c om
        LOG.info("Inititating multipart upload: snapshotId=" + snapshotId + ", bucketName=" + bucketName
                + ", keyName=" + keyName);
        initResponse = retryAfterRefresh(
                new Function<InitiateMultipartUploadRequest, InitiateMultipartUploadResult>() {

                    @Override
                    @Nullable
                    public InitiateMultipartUploadResult apply(@Nullable InitiateMultipartUploadRequest arg0) {
                        eucaS3Client.refreshEndpoint();
                        return eucaS3Client.initiateMultipartUpload(arg0);
                    }

                }, initRequest, REFRESH_TOKEN_RETRIES);
    } catch (Exception ex) {
        throw new SnapshotInitializeMpuException("Failed to initialize multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName, ex);
    }

    if (StringUtils.isBlank(initResponse.getUploadId())) {
        throw new SnapshotInitializeMpuException("Invalid upload ID for multipart upload part for snapshotId="
                + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName);
    }
    return initResponse.getUploadId();
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public InitiateMultipartUploadResponseType initiateMultipartUpload(InitiateMultipartUploadType request)
        throws S3Exception {
    InitiateMultipartUploadResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    String key = request.getKey();
    InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName, key);//from w  ww.  jav  a2s. c om
    ObjectMetadata metadata = new ObjectMetadata();
    for (MetaDataEntry meta : request.getMetaData()) {
        metadata.addUserMetadata(meta.getName(), meta.getValue());
    }

    initiateMultipartUploadRequest.setObjectMetadata(metadata);
    try {
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        InitiateMultipartUploadResult result = s3Client.initiateMultipartUpload(initiateMultipartUploadRequest);
        reply.setUploadId(result.getUploadId());
        reply.setBucket(bucketName);
        reply.setKey(key);
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.nextdoor.bender.ipc.s3.S3Transport.java

License:Apache License

protected void sendStream(InputStream input, String key, long streamSize) throws TransportException {
    /*/*from www.  jav  a  2 s .c  om*/
     * Create metadata
     */
    ObjectMetadata metadata = new ObjectMetadata();

    /*
     * Find if a multipart upload has already begun or start a new one.
     */
    MultiPartUpload upload;

    synchronized (multiPartUploads) {
        if (!multiPartUploads.containsKey(key)) {
            InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(bucketName, key);
            uploadRequest.setObjectMetadata(metadata);

            InitiateMultipartUploadResult res = client.initiateMultipartUpload(uploadRequest);
            upload = new MultiPartUpload(bucketName, key, res.getUploadId());
            multiPartUploads.put(key, upload);
        } else {
            upload = multiPartUploads.get(key);
        }
    }

    /*
     * Write out to S3. Note that the S3 client auto closes the input stream.
     */
    UploadPartRequest req = upload.getUploadPartRequest().withInputStream(input).withPartSize(streamSize);

    try {
        UploadPartResult res = client.uploadPart(req);
        upload.addPartETag(res.getPartETag());
    } catch (AmazonClientException e) {
        client.abortMultipartUpload(upload.getAbortMultipartUploadRequest());
        throw new TransportException("unable to put file" + e, e);
    } finally {
        try {
            input.close();
        } catch (IOException e) {
            logger.warn("error encountered while closing input stream", e);
        }
    }
}

From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java

License:Apache License

/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 *///from   ww w  . j a v a 2 s. c  o  m
public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType)
        throws IOException {
    // Generate bundle
    SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType);

    boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
    String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
    String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
    String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
    int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

    if (!enabled) {
        throw new IOException("Uploading support bundles was disabled by administrator.");
    }

    AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(
            new BasicAWSCredentials(accessKey, secretKey));
    AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

    // Object Metadata
    ObjectMetadata s3Metadata = new ObjectMetadata();
    for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) {
        s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue());
    }

    List<PartETag> partETags;
    InitiateMultipartUploadResult initResponse = null;
    try {
        // Uploading part by part
        LOG.info("Initiating multi-part support bundle upload");
        partETags = new ArrayList<>();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket,
                bundle.getBundleKey());
        initRequest.setObjectMetadata(s3Metadata);
        initResponse = s3Client.initiateMultipartUpload(initRequest);
    } catch (AmazonClientException e) {
        LOG.error("Support bundle upload failed: ", e);
        throw new IOException("Support bundle upload failed", e);
    }

    try {
        byte[] buffer = new byte[bufferSize];
        int partId = 1;
        int size = -1;
        while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
            LOG.debug("Uploading part {} of size {}", partId, size);
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId())
                    .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer))
                    .withPartSize(size);

            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
        }

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket,
                bundle.getBundleKey(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
        LOG.info("Support bundle upload finished");
    } catch (Exception e) {
        LOG.error("Support bundle upload failed", e);
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId()));

        throw new IOException("Can't upload support bundle", e);
    } finally {
        // Close the client
        s3Client.shutdown();
    }
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3InitiateFileUploadOperator.java

License:Apache License

/**
 * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput,
 * uploadMetadataOutput ports./*  www . jav a  2s. co m*/
 * @param tuple given tuple
 */
protected void processTuple(AbstractFileSplitter.FileMetadata tuple) {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    String keyName = getKeyName(tuple.getFilePath());
    String uploadId = "";
    if (tuple.getNumberOfBlocks() > 1) {
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
        initRequest.setObjectMetadata(createObjectMetadata());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();
    }
    UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName);
    fileMetadataOutput.emit(uploadFileMetadata);
    uploadMetadataOutput.emit(uploadFileMetadata);
    currentWindowRecoveryState.add(uploadFileMetadata);
}

From source file:org.elasticsearch.cloud.aws.blobstore.DefaultS3OutputStream.java

License:Apache License

protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName,
        boolean serverSideEncryption) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName);
    if (serverSideEncryption) {
        ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(md);
    }/* w  w  w .  j  av  a 2 s .co m*/
    return blobStore.client().initiateMultipartUpload(request).getUploadId();
}

From source file:org.elasticsearch.repositories.s3.DefaultS3OutputStream.java

License:Apache License

protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName,
        boolean serverSideEncryption) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName)
            .withCannedACL(blobStore.getCannedACL()).withStorageClass(blobStore.getStorageClass());

    if (serverSideEncryption) {
        ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(md);
    }/*  ww  w. java 2  s.  c  om*/

    return blobStore.client().initiateMultipartUpload(request).getUploadId();
}