Example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager

List of usage examples for com.amazonaws.services.s3.transfer TransferManager TransferManager

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager.

Prototype

protected TransferManager(TransferManagerBuilder builder) 

Source Link

Document

Constructor for use by classes that need to extend the TransferManager.

Usage

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * Gets a transfer manager with the specified parameters including proxy host, proxy port, S3 access key, S3 secret key, and max threads.
 *
 * @param params the parameters.//from  w ww.ja v a2 s .  co  m
 *
 * @return a newly created transfer manager.
 */
private TransferManager getTransferManager(final S3FileTransferRequestParamsDto params) {
    // We are returning a new transfer manager each time it is called. Although the Javadocs of TransferManager say to share a single instance
    // if possible, this could potentially be a problem if TransferManager.shutdown(true) is called and underlying resources are not present when needed
    // for subsequent transfers.
    if (params.getMaxThreads() == null) {
        // Create a transfer manager that will internally use an appropriate number of threads.
        return new TransferManager(getAmazonS3(params));
    } else {
        // Create a transfer manager with our own executor configured with the specified total threads.
        LOGGER.info("Creating a transfer manager with max threads: " + params.getMaxThreads());
        return new TransferManager(getAmazonS3(params), Executors.newFixedThreadPool(params.getMaxThreads()));
    }
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * Gets a transfer manager with the specified parameters including proxy host, proxy port, S3 access key, S3 secret key, and max threads.
 *
 * @param params the parameters.//from w w w.j a v a 2 s  . co m
 *
 * @return a newly created transfer manager.
 */
private TransferManager getTransferManager(final S3FileTransferRequestParamsDto params) {
    // We are returning a new transfer manager each time it is called. Although the Javadocs of TransferManager say to share a single instance
    // if possible, this could potentially be a problem if TransferManager.shutdown(true) is called and underlying resources are not present when needed
    // for subsequent transfers.
    if (params.getMaxThreads() == null) {
        // Create a transfer manager that will internally use an appropriate number of threads.
        return new TransferManager(getAmazonS3(params));
    } else {
        // Create a transfer manager with our own executor configured with the specified total threads.
        LOGGER.info("Creating a transfer manager. fixedThreadPoolSize={}", params.getMaxThreads());
        return new TransferManager(getAmazonS3(params), Executors.newFixedThreadPool(params.getMaxThreads()));
    }
}

From source file:org.kuali.rice.kew.notes.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

@Override
public void persistAttachedFileAndSetAttachmentBusinessObjectValue(Attachment attachment) throws Exception {
    if (attachment.getFileLoc() == null) {
        String s3Url = generateS3Url(attachment);
        attachment.setFileLoc(s3Url);//from w  ww .  j a  v a2 s  .c om
    }
    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    if (attachment.getMimeType() != null) {
        metadata.setContentType(attachment.getMimeType());
    }
    if (attachment.getFileName() != null) {
        metadata.setContentDisposition(
                "attachment; filename=" + URLEncoder.encode(attachment.getFileName(), "UTF-8"));
    }
    Upload upload = manager.upload(this.bucketName, parseObjectKey(attachment.getFileLoc()),
            attachment.getAttachedObject(), metadata);
    upload.waitForCompletion();
}

From source file:org.kuali.rice.krad.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

/**
 * @see org.kuali.rice.krad.service.AttachmentService#createAttachment(GloballyUnique,
 * String, String, int, java.io.InputStream, String)
 *//*  www  .j av a  2 s. co m*/
@Override
public Attachment createAttachment(GloballyUnique parent, String uploadedFileName, String mimeType,
        int fileSize, InputStream fileContents, String attachmentTypeCode) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("starting to create attachment for document: " + parent.getObjectId());
    }
    if (parent == null) {
        throw new IllegalArgumentException("invalid (null or uninitialized) document");
    }
    if (StringUtils.isBlank(uploadedFileName)) {
        throw new IllegalArgumentException("invalid (blank) fileName");
    }
    if (StringUtils.isBlank(mimeType)) {
        throw new IllegalArgumentException("invalid (blank) mimeType");
    }
    if (fileSize <= 0) {
        throw new IllegalArgumentException("invalid (non-positive) fileSize");
    }
    if (fileContents == null) {
        throw new IllegalArgumentException("invalid (null) inputStream");
    }

    String uniqueFileNameGuid = UUID.randomUUID().toString();

    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(mimeType);
    metadata.setContentDisposition("attachment; filename=" + URLEncoder.encode(uploadedFileName, "UTF-8"));
    metadata.setContentLength(fileSize);
    Upload upload = manager.upload(this.bucketName, generateObjectKey(uniqueFileNameGuid), fileContents,
            metadata);
    try {
        upload.waitForCompletion();
    } catch (InterruptedException e) {
        throw new IllegalStateException("Failed to upload file to s3", e);
    }

    // create DocumentAttachment
    Attachment attachment = new Attachment();
    attachment.setAttachmentIdentifier(uniqueFileNameGuid);
    attachment.setAttachmentFileName(uploadedFileName);
    attachment.setAttachmentFileSize(new Long(fileSize));
    attachment.setAttachmentMimeTypeCode(mimeType);
    attachment.setAttachmentTypeCode(attachmentTypeCode);

    if (LOG.isDebugEnabled()) {
        LOG.debug("finished creating attachment for document: " + parent.getObjectId());
    }
    return attachment;
}

From source file:org.nuxeo.ecm.core.storage.sql.S3BinaryManager.java

License:Apache License

@Override
protected void setupCloudClient() throws IOException {
    // Get settings from the configuration
    bucketName = getProperty(BUCKET_NAME_PROPERTY);
    bucketNamePrefix = MoreObjects.firstNonNull(getProperty(BUCKET_PREFIX_PROPERTY), StringUtils.EMPTY);
    String bucketRegion = getProperty(BUCKET_REGION_PROPERTY);
    if (isBlank(bucketRegion)) {
        bucketRegion = DEFAULT_BUCKET_REGION;
    }//from   w  w  w.  jav  a2  s.c  o m
    String awsID = getProperty(AWS_ID_PROPERTY);
    String awsSecret = getProperty(AWS_SECRET_PROPERTY);

    String proxyHost = Framework.getProperty(Environment.NUXEO_HTTP_PROXY_HOST);
    String proxyPort = Framework.getProperty(Environment.NUXEO_HTTP_PROXY_PORT);
    String proxyLogin = Framework.getProperty(Environment.NUXEO_HTTP_PROXY_LOGIN);
    String proxyPassword = Framework.getProperty(Environment.NUXEO_HTTP_PROXY_PASSWORD);

    int maxConnections = getIntProperty(CONNECTION_MAX_PROPERTY);
    int maxErrorRetry = getIntProperty(CONNECTION_RETRY_PROPERTY);
    int connectionTimeout = getIntProperty(CONNECTION_TIMEOUT_PROPERTY);
    int socketTimeout = getIntProperty(SOCKET_TIMEOUT_PROPERTY);

    String keystoreFile = getProperty(KEYSTORE_FILE_PROPERTY);
    String keystorePass = getProperty(KEYSTORE_PASS_PROPERTY);
    String privkeyAlias = getProperty(PRIVKEY_ALIAS_PROPERTY);
    String privkeyPass = getProperty(PRIVKEY_PASS_PROPERTY);
    String endpoint = getProperty(ENDPOINT_PROPERTY);
    String sseprop = getProperty(SERVERSIDE_ENCRYPTION_PROPERTY);
    if (isNotBlank(sseprop)) {
        userServerSideEncryption = Boolean.parseBoolean(sseprop);
    }

    // Fallback on default env keys for ID and secret
    if (isBlank(awsID)) {
        awsID = System.getenv(AWS_ID_ENV);
    }
    if (isBlank(awsSecret)) {
        awsSecret = System.getenv(AWS_SECRET_ENV);
    }

    if (isBlank(bucketName)) {
        throw new RuntimeException("Missing conf: " + BUCKET_NAME_PROPERTY);
    }

    if (!isBlank(bucketNamePrefix) && !bucketNamePrefix.endsWith("/")) {
        log.warn(String.format("%s %s S3 bucket prefix should end by '/' " + ": added automatically.",
                BUCKET_PREFIX_PROPERTY, bucketNamePrefix));
        bucketNamePrefix += "/";
    }
    // set up credentials
    if (isBlank(awsID) || isBlank(awsSecret)) {
        awsCredentialsProvider = new InstanceProfileCredentialsProvider();
        try {
            awsCredentialsProvider.getCredentials();
        } catch (AmazonClientException e) {
            throw new RuntimeException("Missing AWS credentials and no instance role found");
        }
    } else {
        awsCredentialsProvider = new BasicAWSCredentialsProvider(awsID, awsSecret);
    }

    // set up client configuration
    clientConfiguration = new ClientConfiguration();
    if (isNotBlank(proxyHost)) {
        clientConfiguration.setProxyHost(proxyHost);
    }
    if (isNotBlank(proxyPort)) {
        clientConfiguration.setProxyPort(Integer.parseInt(proxyPort));
    }
    if (isNotBlank(proxyLogin)) {
        clientConfiguration.setProxyUsername(proxyLogin);
    }
    if (proxyPassword != null) { // could be blank
        clientConfiguration.setProxyPassword(proxyPassword);
    }
    if (maxConnections > 0) {
        clientConfiguration.setMaxConnections(maxConnections);
    }
    if (maxErrorRetry >= 0) { // 0 is allowed
        clientConfiguration.setMaxErrorRetry(maxErrorRetry);
    }
    if (connectionTimeout >= 0) { // 0 is allowed
        clientConfiguration.setConnectionTimeout(connectionTimeout);
    }
    if (socketTimeout >= 0) { // 0 is allowed
        clientConfiguration.setSocketTimeout(socketTimeout);
    }

    // set up encryption
    encryptionMaterials = null;
    if (isNotBlank(keystoreFile)) {
        boolean confok = true;
        if (keystorePass == null) { // could be blank
            log.error("Keystore password missing");
            confok = false;
        }
        if (isBlank(privkeyAlias)) {
            log.error("Key alias missing");
            confok = false;
        }
        if (privkeyPass == null) { // could be blank
            log.error("Key password missing");
            confok = false;
        }
        if (!confok) {
            throw new RuntimeException("S3 Crypto configuration incomplete");
        }
        try {
            // Open keystore
            File ksFile = new File(keystoreFile);
            FileInputStream ksStream = new FileInputStream(ksFile);
            KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType());
            keystore.load(ksStream, keystorePass.toCharArray());
            ksStream.close();
            // Get keypair for alias
            if (!keystore.isKeyEntry(privkeyAlias)) {
                throw new RuntimeException("Alias " + privkeyAlias + " is missing or not a key alias");
            }
            PrivateKey privKey = (PrivateKey) keystore.getKey(privkeyAlias, privkeyPass.toCharArray());
            Certificate cert = keystore.getCertificate(privkeyAlias);
            PublicKey pubKey = cert.getPublicKey();
            KeyPair keypair = new KeyPair(pubKey, privKey);
            // Get encryptionMaterials from keypair
            encryptionMaterials = new EncryptionMaterials(keypair);
            cryptoConfiguration = new CryptoConfiguration();
        } catch (IOException | GeneralSecurityException e) {
            throw new RuntimeException("Could not read keystore: " + keystoreFile + ", alias: " + privkeyAlias,
                    e);
        }
    }
    isEncrypted = encryptionMaterials != null;

    // Try to create bucket if it doesn't exist
    if (!isEncrypted) {
        amazonS3 = new AmazonS3Client(awsCredentialsProvider, clientConfiguration);
    } else {
        amazonS3 = new AmazonS3EncryptionClient(awsCredentialsProvider,
                new StaticEncryptionMaterialsProvider(encryptionMaterials), clientConfiguration,
                cryptoConfiguration);
    }
    if (isNotBlank(endpoint)) {
        amazonS3.setEndpoint(endpoint);
    }

    // Set region explicitely for regions that reguire Version 4 signature
    ArrayList<String> V4_ONLY_REGIONS = new ArrayList<String>();
    V4_ONLY_REGIONS.add("eu-central-1");
    V4_ONLY_REGIONS.add("ap-northeast-2");
    if (V4_ONLY_REGIONS.contains(bucketRegion)) {
        amazonS3.setRegion(Region.getRegion(Regions.fromName(bucketRegion)));
    }

    try {
        if (!amazonS3.doesBucketExist(bucketName)) {
            amazonS3.createBucket(bucketName, bucketRegion);
            amazonS3.setBucketAcl(bucketName, CannedAccessControlList.Private);
        }
    } catch (AmazonClientException e) {
        throw new IOException(e);
    }

    // compat for NXP-17895, using "downloadfroms3", to be removed
    // these two fields have already been initialized by the base class initialize()
    // using standard property "directdownload"
    String dd = getProperty(DIRECTDOWNLOAD_PROPERTY_COMPAT);
    if (dd != null) {
        directDownload = Boolean.parseBoolean(dd);
    }
    int dde = getIntProperty(DIRECTDOWNLOAD_EXPIRE_PROPERTY_COMPAT);
    if (dde >= 0) {
        directDownloadExpire = dde;
    }

    transferManager = new TransferManager(amazonS3);
    abortOldUploads();
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private void multiPartUpload(PutObjectRequest req)
        throws AmazonServiceException, AmazonClientException, InterruptedException {
    TransferManager tx = null;/*  w  w  w  . j  ava2  s . com*/
    try {
        if (awsCredentials != null)
            tx = new TransferManager(awsCredentials);
        else
            tx = new TransferManager(new InstanceProfileCredentialsProvider());
        Upload myUpload = tx.upload(req);
        myUpload.waitForCompletion();
    } finally {
        if (tx != null)
            tx.shutdownNow();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private void multiPartDownload(String keyName, File f)
        throws AmazonServiceException, AmazonClientException, InterruptedException {
    TransferManager tx = null;/*from  w w w .j  a v a 2s. c o  m*/
    try {
        if (awsCredentials != null)
            tx = new TransferManager(awsCredentials);
        else
            tx = new TransferManager(new InstanceProfileCredentialsProvider());
        Download myDownload = tx.download(this.name, keyName, f);
        myDownload.waitForCompletion();
    } finally {
        if (tx != null)
            tx.shutdownNow();
    }
}

From source file:org.springframework.integration.aws.outbound.S3MessageHandler.java

License:Apache License

public S3MessageHandler(AmazonS3 amazonS3, String bucket, boolean produceReply) {
    this(new TransferManager(amazonS3), bucket, produceReply);
    Assert.notNull(amazonS3, "'amazonS3' must not be null");
}

From source file:org.springframework.integration.aws.outbound.S3MessageHandler.java

License:Apache License

public S3MessageHandler(AmazonS3 amazonS3, Expression bucketExpression, boolean produceReply) {
    this(new TransferManager(amazonS3), bucketExpression, produceReply);
    Assert.notNull(amazonS3, "'amazonS3' must not be null");
}

From source file:org.springframework.integration.aws.s3.core.AmazonS3OperationsImpl.java

License:Apache License

/**
 * The implemented afterPropertiesSet method
 *///from  w  w w  .j a v  a  2  s  .  c  o m
public void afterPropertiesSet() throws Exception {
    if (threadPoolExecutor == null) {
        //Will use the Default Executor, 
        //See com.amazonaws.services.s3.transfer.internal.TransferManagerUtils for more details         
        transferManager = new TransferManager(client);
    } else {
        transferManager = new TransferManager(client, threadPoolExecutor);
    }
    //As per amazon it is recommended to use Multi part upload above 100 MB
    if (multipartUploadThreshold > 0) {
        TransferManagerConfiguration config = new TransferManagerConfiguration();
        if (multipartUploadThreshold > Integer.MAX_VALUE)
            config.setMultipartUploadThreshold(Integer.MAX_VALUE); //2GB
        else
            config.setMultipartUploadThreshold((int) multipartUploadThreshold);

        transferManager.setConfiguration(config);
    }
    //If none is set, we use the default
}