Example usage for com.amazonaws.services.s3.model PutObjectRequest setMetadata

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest setMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest setMetadata.

Prototype

public void setMetadata(ObjectMetadata metadata) 

Source Link

Document

Sets the optional metadata instructing Amazon S3 how to handle the uploaded data (e.g.

Usage

From source file:com.pinterest.secor.uploader.S3UploadManager.java

License:Apache License

private void enableS3Encryption(PutObjectRequest uploadRequest) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    uploadRequest.setMetadata(objectMetadata);
}

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * This method uploads a file with a length lesser than PART_SIZE (5Mb).
 * /*from  w ww  .j  a  v  a 2  s .c  o m*/
 * @param file to be stored within the storage.
 * @param path is the path for this new file within the root.
 * @throws UniversalIOException when a specific IO error occurs.
 */
private void uploadTinyFile(File file, String path) throws UniversalIOException {
    try {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (this.settings.getEncryption()) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }

        List<Tag> tags = new ArrayList<Tag>();
        for (String key : this.settings.getTags().keySet()) {
            tags.add(new Tag(key, this.settings.getTags().get(key)));
        }

        PutObjectRequest request = new PutObjectRequest(
                this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(), file);
        request.setMetadata(objectMetadata);
        request.setTagging(new ObjectTagging(tags));
        request.setStorageClass(getStorageClass());
        this.triggerOnStoreFileListeners();

        PutObjectResult result = this.s3client.putObject(request);

        this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(),
                PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/"
                        + file.getName(),
                result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))));
    } catch (Exception e) {
        UniversalIOException error = new UniversalIOException(e.getMessage());
        this.triggerOnErrorListeners(error);
        throw error;
    }
}

From source file:example.uploads3.UploadS3.java

License:Apache License

public static void main(String[] args) throws Exception {
    String uploadFileName = args[0];
    String bucketName = "haos3";
    String keyName = "test/byspark.txt";
    // Create a Java Spark Context.
    SparkConf conf = new SparkConf().setAppName("UploadS3");
    JavaSparkContext sc = new JavaSparkContext(conf);

    AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
    try {//ww w  . ja va2 s  . c o  m
        System.out.println("Uploading a new object to S3 from a file\n");
        File file = new File(uploadFileName);
        PutObjectRequest putRequest = new PutObjectRequest(bucketName, keyName, file);

        // Request server-side encryption.
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setServerSideEncryption("AES256");
        putRequest.setMetadata(objectMetadata);

        s3client.putObject(putRequest);

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:io.stallion.services.S3StorageService.java

License:Open Source License

public void uploadFile(File file, String bucket, String fileKey, boolean isPublic, String contentType,
        Map<String, String> headers) {
    client.putObject(bucket, fileKey, file);
    PutObjectRequest req = new PutObjectRequest(bucket, fileKey, file);
    if (isPublic) {
        req.withCannedAcl(CannedAccessControlList.PublicRead);
    }//from   w  w w.ja  v a2 s .c  o  m
    ObjectMetadata meta = new ObjectMetadata();

    if (headers != null) {
        for (String key : headers.keySet()) {
            meta.setHeader(key, headers.get(key));
        }
    }
    if (!empty(contentType)) {
        meta.setContentType(contentType);
    }
    req.setMetadata(meta);
    client.putObject(req);

}

From source file:md.djembe.aws.AmazonS3WebClient.java

License:Apache License

public static void uploadToBucket(final String filename, final File image) {

    PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, filename, image);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    if (putObjectRequest.getMetadata() == null) {
        putObjectRequest.setMetadata(new ObjectMetadata());
    }//from   w  w w .ja v a  2  s  .c o  m
    putObjectRequest.getMetadata().setContentType("image/jpeg");

    AmazonS3 s3Client = getS3Client();
    s3Client.putObject(putObjectRequest);
    LOGGER.info("File Uploaded to Amazon S3.");
}

From source file:ohnosequences.ivy.S3Repository.java

License:Apache License

@Override
protected void put(File source, String destination, boolean overwrite) {
    //System.out.print("parent> ");
    String bucket = S3Utils.getBucket(destination);
    String key = S3Utils.getKey(destination);
    // System.out.println("publishing: bucket=" + bucket + " key=" + key);
    PutObjectRequest request = new PutObjectRequest(bucket, key, source);
    request = request.withCannedAcl(acl);

    if (serverSideEncryption) {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setMetadata(objectMetadata);
    }// w  w  w.  jav a2  s.c  o  m

    if (!getS3Client().doesBucketExist(bucket)) {
        if (!createBucket(bucket, region)) {
            throw new Error("couldn't create bucket");
        }
    }

    if (!this.overwrite && !getS3Client().listObjects(bucket, key).getObjectSummaries().isEmpty()) {
        throw new Error(destination + " exists but overwriting is disabled");
    }
    getS3Client().putObject(request);

}

From source file:org.akvo.flow.deploy.Deploy.java

License:Open Source License

private static void uploadS3(String accessKey, String secretKey, String s3Path, File file)
        throws AmazonServiceException, AmazonClientException {
    BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3 s3 = new AmazonS3Client(credentials);

    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, s3Path, file);
    ObjectMetadata metadata = new ObjectMetadata();

    // set content type as android package file
    metadata.setContentType("application/vnd.android.package-archive");

    // set content length to length of file
    metadata.setContentLength(file.length());

    // set access to public
    putRequest.setMetadata(metadata);
    putRequest.setCannedAcl(CannedAccessControlList.PublicRead);

    // try to put the apk in S3
    PutObjectResult result = s3.putObject(putRequest);
    System.out.println("Apk uploaded successfully, with result ETag " + result.getETag());
}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void uploadFile(Map<String, Object> jobFile) {

    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);/*www . j a  v  a2s  .c om*/
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));

    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);

        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"),
                localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));

        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));

        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));

        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");

            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }

        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();

        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));

        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);

        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();

    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());

        callbackCfc(jobFile, false, e.getMessage());

        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;

        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }

        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));

    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }

}

From source file:org.alanwilliamson.amazon.s3.Write.java

License:Open Source License

private void writeFile(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata,
        StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile,
        boolean background, String callback, String callbackdata, String appname, String acl, String aes256key,
        Map<String, String> customheaders) throws Exception {
    File localFile = new File(localpath);
    if (!localFile.isFile())
        throw new Exception("The file specified does not exist: " + localpath);

    // Push this to the background loader to handle and return immediately
    if (background) {
        BackgroundUploader.acceptFile(amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds,
                deletefile, callback, callbackdata, appname, acl, aes256key, customheaders);
        return;/*ww w .ja va2  s  .  c  o  m*/
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (metadata != null)
        omd.setUserMetadata(metadata);

    AmazonS3 s3Client = getAmazonS3(amazonKey);

    // Let us run around the number of attempts
    int attempts = 0;
    while (attempts < retry) {
        try {

            PutObjectRequest por = new PutObjectRequest(bucket, key, localFile);
            por.setMetadata(omd);
            por.setStorageClass(storage);

            if (acl != null && !acl.isEmpty())
                por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl));

            if (aes256key != null && !aes256key.isEmpty())
                por.setSSECustomerKey(new SSECustomerKey(aes256key));

            if (customheaders != null && !customheaders.isEmpty()) {
                Iterator<String> it = customheaders.keySet().iterator();
                while (it.hasNext()) {
                    String k = it.next();
                    por.putCustomRequestHeader(k, customheaders.get(k));
                }
            }

            s3Client.putObject(por);
            break;

        } catch (Exception e) {
            cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile
                    + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")");
            attempts++;

            if (attempts == retry)
                throw e;
            else
                Thread.sleep(retryseconds * 1000);
        }
    }

    // delete the file now that it is a success
    if (deletefile)
        localFile.delete();
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name./*from w w  w.  j  av  a 2 s  .  co  m*/
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}