Example usage for com.amazonaws.services.s3.model ObjectMetadata setServerSideEncryption

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setServerSideEncryption

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setServerSideEncryption.

Prototype

@Deprecated
public void setServerSideEncryption(String algorithm) 

Source Link

Usage

From source file:be.ugent.intec.halvade.uploader.AWSUploader.java

License:Open Source License

public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    if (SSE)/*  ww  w  . jav a 2  s.co  m*/
        meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    meta.setContentLength(size);
    Upload upload = tm.upload(existingBucketName, key, input, meta);

    try {
        // Or you can block and wait for the upload to finish
        upload.waitForCompletion();
        Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
        Logger.DEBUG("Unable to upload file, upload was aborted.");
        Logger.EXCEPTION(amazonClientException);
    }
}

From source file:be.ugent.intec.halvade.uploader.CopyOfAWSUploader.java

License:Open Source License

public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    meta.setContentLength(size);//from   ww  w .  jav  a  2 s.  co m
    Upload upload = tm.upload(existingBucketName, key, input, meta);

    try {
        // Or you can block and wait for the upload to finish
        upload.waitForCompletion();
        Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
        Logger.DEBUG("Unable to upload file, upload was aborted.");
        Logger.EXCEPTION(amazonClientException);
    }
}

From source file:example.uploads3.UploadS3.java

License:Apache License

public static void main(String[] args) throws Exception {
    String uploadFileName = args[0];
    String bucketName = "haos3";
    String keyName = "test/byspark.txt";
    // Create a Java Spark Context.
    SparkConf conf = new SparkConf().setAppName("UploadS3");
    JavaSparkContext sc = new JavaSparkContext(conf);

    AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
    try {//from www.  ja  v a 2s  . c  om
        System.out.println("Uploading a new object to S3 from a file\n");
        File file = new File(uploadFileName);
        PutObjectRequest putRequest = new PutObjectRequest(bucketName, keyName, file);

        // Request server-side encryption.
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setServerSideEncryption("AES256");
        putRequest.setMetadata(objectMetadata);

        s3client.putObject(putRequest);

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//from  ww w.  ja  va  2s . co  m
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }/*from  w ww.  j a  v  a 2s .  co m*/

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMultipartCopyPartSize(partSize);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void createEmptyObject(final String bucketName, final String objectName)
        throws AmazonClientException, AmazonServiceException {
    final InputStream im = new InputStream() {
        @Override//from w ww. j  av a 2 s.  co  m
        public int read() throws IOException {
            return -1;
        }
    };

    final ObjectMetadata om = new ObjectMetadata();
    om.setContentLength(0L);
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
    putObjectRequest.setCannedAcl(cannedACL);
    s3.putObject(putObjectRequest);
    statistics.incrementWriteOps(1);
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;/*from  ww  w  .java2s  .c  om*/
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}

From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java

License:Apache License

private ObjectMetadata createDefaultMetadata() {
    ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }//from w  w w. j a va  2  s. c  o m
    return om;
}

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//from w ww . ja  va2 s.c  o  m
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Copying local file from " + src + " to " + dst);
    }

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            default:
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }// w ww. ja v  a  2  s. c o  m

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            default:
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    }
}