Example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager

List of usage examples for com.amazonaws.services.s3.transfer TransferManager TransferManager

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager.

Prototype

protected TransferManager(TransferManagerBuilder builder) 

Source Link

Document

Constructor for use by classes that need to extend the TransferManager.

Usage

From source file:msv_upload_tool.FXMLDocumentController.java

private void uploadObject() {

    final Long max = file.length();

    task = new Task<Void>() {
        @Override//w ww  . jav a 2 s.co m
        protected Void call() {

            boolean doLoop = true;
            long total = 0;

            while (doLoop) {

                lock.readLock().lock();

                try {
                    total = totalBytes;
                } finally {
                    lock.readLock().unlock();
                }

                updateProgress(total, max);
                if (total == max)
                    doLoop = false;

                try {
                    Thread.sleep(50); //1000 milliseconds is one second.
                } catch (InterruptedException ex) {
                    Thread.currentThread().interrupt();
                }

            }

            updateProgress(-1, max);

            this.succeeded();
            return null;

        }
    };

    uploadProgress.progressProperty().bind(task.progressProperty());
    task.setOnSucceeded(new EventHandler() {

        @Override
        public void handle(Event event) {

            label.setText("");

            label2.setText("");
            button.setDisable(true);
            button2.setDisable(false);

        }

    });

    Thread th = new Thread(task);

    th.setDaemon(true);

    //disable the buttons
    button.setDisable(true);
    button2.setDisable(true);

    th.start();

    String existingBucketName = "mstargeneralfiles";
    String keyName = "duh/" + file.getName();
    String filePath = file.getAbsolutePath();

    TransferManager tm = new TransferManager(new ProfileCredentialsProvider());

    // For more advanced uploads, you can create a request object 
    // and supply additional request parameters (ex: progress listeners,
    // canned ACLs, etc.)
    PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));

    // You can ask the upload for its progress, or you can 
    // add a ProgressListener to your request to receive notifications 
    // when bytes are transferred.
    request.setGeneralProgressListener(new ProgressListener() {

        @Override
        public void progressChanged(ProgressEvent progressEvent) {

            System.out.println(progressEvent.toString());

            lock.writeLock().lock();

            try {
                totalBytes += progressEvent.getBytesTransferred();
            } finally {
                lock.writeLock().unlock();
            }

        }
    });

    // TransferManager processes all transfers asynchronously, 
    // so this call will return immediately.
    Upload upload = tm.upload(request);

}

From source file:n3phele.agent.repohandlers.S3Large.java

License:Open Source License

public Origin put(InputStream input, long length, String encoding) {
    Origin result = new Origin(source + "/" + root + "/" + key, 0, null, null);
    TransferManager tm = null;/*from   w w  w . jav a 2s  . c  o m*/
    try {
        tm = new TransferManager(this.credentials);
        tm.getAmazonS3Client().setEndpoint(source.toString());

        objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(this.length = length);
        this.encoding = encoding;
        if (encoding != null)
            objectMetadata.setContentType(this.encoding);
        log.info("Output: " + source + "/" + root + "/" + key + " Content-Type: " + encoding + "length: "
                + length);
        Upload upload = tm.upload(root, key, input, objectMetadata);
        upload.waitForCompletion();
        // PutObjectResult object = s3().putObject(root, key, input, objectMetadata);
        result.setLength(length);
        ObjectMetadata od = s3().getObjectMetadata(root, key);
        result.setModified(od.getLastModified());
    } catch (AmazonServiceException e) {
        throw e;
    } catch (AmazonClientException e) {
        throw e;
    } catch (InterruptedException e) {
        throw new AmazonClientException(e.getMessage());
    } finally {
        try {
            input.close();
        } catch (IOException e) {
        }
        try {
            tm.shutdownNow();
        } catch (Exception e) {
        }
        try {
            s3().shutdown();
        } catch (Exception e) {
        }
    }
    return result;

}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void uploadFile(Map<String, Object> jobFile) {

    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);// ww  w  .jav  a  2 s.c  o  m
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));

    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);

        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"),
                localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));

        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));

        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));

        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");

            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }

        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();

        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));

        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);

        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();

    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());

        callbackCfc(jobFile, false, e.getMessage());

        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;

        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }

        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));

    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }

}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void cleanupMultiPartUploads(AmazonS3 s3Client, String bucket) {
    TransferManager tm = new TransferManager(s3Client);
    try {/*from w  ww.  jav  a  2  s .  com*/
        tm.abortMultipartUploads(bucket, new Date(System.currentTimeMillis() - DateUtil.DAY_MS));
    } catch (AmazonClientException amazonClientException) {
        cfEngine.log("AmazonS3Write.BackgroundUploader.cleanupMultiPartUploads():"
                + amazonClientException.getMessage());
    }
    tm.shutdownNow(true);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem//w w  w.j a  v a 2  s.  c  o  m
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);

    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
            this.getWorkingDirectory());

    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(NEW_ACCESS_KEY, conf.get(OLD_ACCESS_KEY, null));
    String secretKey = conf.get(NEW_SECRET_KEY, conf.get(OLD_SECRET_KEY, null));

    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }

    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
            new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
            new AnonymousAWSCredentialsProvider());

    bucket = name.getHost();

    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(NEW_MAXIMUM_CONNECTIONS,
            conf.getInt(OLD_MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)));
    awsConf.setProtocol(conf.getBoolean(NEW_SECURE_CONNECTIONS,
            conf.getBoolean(OLD_SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) ? Protocol.HTTPS
                    : Protocol.HTTP);
    awsConf.setMaxErrorRetry(
            conf.getInt(NEW_MAX_ERROR_RETRIES, conf.getInt(OLD_MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)));
    awsConf.setSocketTimeout(
            conf.getInt(NEW_SOCKET_TIMEOUT, conf.getInt(OLD_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)));

    s3 = new AmazonS3Client(credentials, awsConf);

    maxKeys = conf.getInt(NEW_MAX_PAGING_KEYS, conf.getInt(OLD_MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS));
    partSize = conf.getLong(NEW_MULTIPART_SIZE, conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE));
    partSizeThreshold = conf.getLong(NEW_MIN_MULTIPART_THRESHOLD,
            conf.getLong(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD));

    if (partSize < 5 * 1024 * 1024) {
        LOG.error(NEW_MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }

    if (partSizeThreshold < 5 * 1024 * 1024) {
        LOG.error(NEW_MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        partSizeThreshold = 5 * 1024 * 1024;
    }

    String cannedACLName = conf.get(NEW_CANNED_ACL, conf.get(OLD_CANNED_ACL, DEFAULT_CANNED_ACL));
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }

    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }

    boolean purgeExistingMultipart = conf.getBoolean(NEW_PURGE_EXISTING_MULTIPART,
            conf.getBoolean(OLD_PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART));
    long purgeExistingMultipartAge = conf.getLong(NEW_PURGE_EXISTING_MULTIPART_AGE,
            conf.getLong(OLD_PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE));

    if (purgeExistingMultipart) {
        TransferManager transferManager = new TransferManager(s3);
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        transferManager.abortMultipartUploads(bucket, purgeBefore);
        transferManager.shutdownNow(false);
    }

    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM, null);

    setConf(conf);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name./*from   ww w  . j  av  a  2s .co  m*/
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }/*from   w  ww .j  a v a  2  s .c o  m*/

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMultipartCopyPartSize(partSize);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;/*from   w ww .j  av a  2s . com*/
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}

From source file:org.apache.storm.s3.output.BlockingTransferManagerUploader.java

License:Apache License

public BlockingTransferManagerUploader(AmazonS3 client) {
    super(client);
    this.tx = new TransferManager(client);
}

From source file:org.apache.streams.s3.S3OutputStreamWrapper.java

License:Apache License

private void addFile() throws Exception {

    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();

    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);

    metadata.addUserMetadata("writer", "org.apache.streams");

    for (String s : metaData.keySet())
        metadata.addUserMetadata(s, metaData.get(s));

    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {//from   w  w  w.jav a 2s . c o  m
        upload.waitForUploadResult();

        is.close();
        transferManager.shutdownNow(false);
        LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception e) {
        // No Op
    }

}