Example usage for com.amazonaws.services.s3.transfer Upload getProgress

List of usage examples for com.amazonaws.services.s3.transfer Upload getProgress

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer Upload getProgress.

Prototype

public TransferProgress getProgress();

Source Link

Document

Returns progress information about this transfer.

Usage

From source file:aws.example.s3.XferMgrProgress.java

License:Open Source License

public static void showMultiUploadProgress(MultipleFileUpload multi_upload) {
    // print the upload's human-readable description
    System.out.println(multi_upload.getDescription());

    Collection<? extends Upload> sub_xfers = new ArrayList<Upload>();
    sub_xfers = multi_upload.getSubTransfers();

    do {/* w  ww . jav a2  s  .c  o m*/
        System.out.println("\nSubtransfer progress:\n");
        for (Upload u : sub_xfers) {
            System.out.println("  " + u.getDescription());
            if (u.isDone()) {
                TransferState xfer_state = u.getState();
                System.out.println("  " + xfer_state);
            } else {
                TransferProgress progress = u.getProgress();
                double pct = progress.getPercentTransferred();
                printProgressBar(pct);
                System.out.println();
            }
        }

        // wait a bit before the next update.
        try {
            Thread.sleep(200);
        } catch (InterruptedException e) {
            return;
        }
    } while (multi_upload.isDone() == false);
    // print the final state of the transfer.
    TransferState xfer_state = multi_upload.getState();
    System.out.println("\nMultipleFileUpload " + xfer_state);
}

From source file:com.davidsoergel.s3napback.S3ops.java

License:Apache License

public static void upload(TransferManager tx, String bucket, String filename, int chunkSize)
        throws InterruptedException, IOException {
    //throw new NotImplementedException();

    // break input stream into chunks

    // fully read each chunk into memory before sending, in order to know the size and the md5

    // ** prepare the next chunk while the last is sending; need to deal with multithreading properly
    // ** 4 concurrent streams?

    InputStream in = new BufferedInputStream(System.in);
    int chunkNum = 0;
    while (in.available() > 0) {
        byte[] buf = new byte[chunkSize];
        int bytesRead = in.read(buf);

        String md5 = new MD5(buf);

        // presume AWS does its own buffering, no need for BufferedInputStream (?)

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(bytesRead);
        meta.setContentMD5(md5);/*from www .  j a va2 s  . c o  m*/

        Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta);
        UploadResult result = myUpload.waitForUploadResult();

        while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
        }
    }
}

From source file:com.emc.ecs.sync.target.S3Target.java

License:Open Source License

protected void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om = AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata());
    if (obj.isDirectory())
        om.setContentType(AwsS3Util.TYPE_DIRECTORY);

    PutObjectRequest req;/*from   w  ww  .  j  a  v  a2 s .c  o  m*/
    if (obj.isDirectory()) {
        req = new PutObjectRequest(bucketName, targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (obj instanceof FileSyncObject) {
        req = new PutObjectRequest(bucketName, targetKey, ((FileSyncObject) obj).getRawSourceIdentifier());
    } else {
        req = new PutObjectRequest(bucketName, targetKey, obj.getInputStream(), om);
    }

    if (includeAcl)
        req.setAccessControlList(AwsS3Util.s3AclFromSyncAcl(obj.getMetadata().getAcl(), ignoreInvalidAcls));

    // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
    // and abort if it fails
    TransferManagerConfiguration xferConfig = new TransferManagerConfiguration();
    xferConfig.setMultipartUploadThreshold((long) mpuThresholdMB * 1024 * 1024);
    xferConfig.setMinimumUploadPartSize((long) mpuPartSizeMB * 1024 * 1024);
    TransferManager xferManager = new TransferManager(s3, Executors.newFixedThreadPool(mpuThreadCount));
    xferManager.setConfiguration(xferConfig);

    Upload upload = xferManager.upload(req);
    try {
        log.debug("Wrote {}, etag: {}", targetKey, upload.waitForUploadResult().getETag());
    } catch (InterruptedException e) {
        throw new RuntimeException("upload thread was interrupted", e);
    } finally {
        // make sure bytes read is accurate if we bypassed the counting stream
        if (obj instanceof FileSyncObject) {
            try {
                ((FileSyncObject) obj).setOverrideBytesRead(upload.getProgress().getBytesTransferred());
            } catch (Throwable t) {
                log.warn("could not get bytes transferred from upload", t);
            }
        }
    }
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName,
        final InputStream inputStream, final CannedAccessControlList cannedAcl)
        throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info(// w w  w  .  ja va  2s  . co m
            "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}",
            bucketName, fileName, cannedAcl);
    File tempFile = null;
    PutObjectRequest putObjectRequest = null;
    Upload upload = null;
    try {
        // Create temporary file from stream to avoid 'out of memory' exception
        tempFile = AWSUtil.createTempFileFromStream(inputStream);
        putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile).withCannedAcl(cannedAcl);
        final TransferManager transferMgr = new TransferManager(s3client);
        upload = transferMgr.upload(putObjectRequest);
        // You can poll your transfer's status to check its progress
        if (upload.isDone()) {
            LOGGER.info("Start: {}  , State: {} and Progress (%): {}", upload.getDescription(),
                    upload.getState(), upload.getProgress().getPercentTransferred());
        }

        // Add progressListener to listen asynchronous notifications about your transfer's progress
        // Uncomment below code snippet during development
        /*upload.addProgressListener(new ProgressListener() {
           public void progressChanged(ProgressEvent event) {
              if (LOGGER.isDebugEnabled()) {
          LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred());
              }
        }
        });*/

        try {
            //Block the current thread and wait for completion
            //If the transfer fails AmazonClientException will be thrown
            upload.waitForCompletion();
        } catch (AmazonClientException | InterruptedException excp) {
            LOGGER.error("Exception occured while waiting for transfer: ", excp);
        }
    } finally {
        AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded
    }
    LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(),
            upload.getProgress().getPercentTransferred());
    return upload.isDone();
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName,
        final InputStream inputStream, final boolean isPublicAccessible)
        throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info(//from   ww  w.j a  va  2 s .c o  m
            "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}",
            bucketName, fileName, isPublicAccessible);
    File tempFile = null;
    PutObjectRequest putObjectRequest = null;
    Upload upload = null;
    try {
        // Create temporary file from stream to avoid 'out of memory' exception
        tempFile = AWSUtil.createTempFileFromStream(inputStream);
        putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile);
        if (isPublicAccessible) {
            putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
        }
        final TransferManager transferMgr = new TransferManager(s3client);
        upload = transferMgr.upload(putObjectRequest);
        // You can poll your transfer's status to check its progress
        if (upload.isDone()) {
            LOGGER.info("Start: {}  , State: {} and Progress (%): {}", upload.getDescription(),
                    upload.getState(), upload.getProgress().getPercentTransferred());
        }

        // Add progressListener to listen asynchronous notifications about your transfer's progress
        // Uncomment below code snippet during development
        /*upload.addProgressListener(new ProgressListener() {
           public void progressChanged(ProgressEvent event) {
              if (LOGGER.isDebugEnabled()) {
          LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred());
              }
        }
        });*/

        try {
            //Block the current thread and wait for completion
            //If the transfer fails AmazonClientException will be thrown
            upload.waitForCompletion();
        } catch (AmazonClientException | InterruptedException excp) {
            LOGGER.error("Exception occured while waiting for transfer: ", excp);
        }
    } finally {
        AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded
    }
    LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(),
            upload.getProgress().getPercentTransferred());
    return upload.isDone();
}

From source file:com.github.rholder.esthree.command.Put.java

License:Apache License

@Override
public Integer call() throws Exception {
    TransferManager t = new TransferManager(amazonS3Client);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadata);

    Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata));

    // TODO this listener spews out garbage >100% on a retry, add a test to verify
    if (progressListener != null) {
        progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress()));
        u.addProgressListener(progressListener);
    }/*from w w  w  . ja  va2 s. c om*/
    try {
        u.waitForCompletion();
    } finally {
        t.shutdownNow();
    }
    return 0;
}

From source file:jp.classmethod.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java

License:Apache License

@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();// w w w  .j a va 2s  .  co m
    File file = getFile();

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }
    if (file == null) {
        throw new GradleException("file is not specified");
    }
    if (file.isFile() == false) {
        throw new GradleException("file must be regular file");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    TransferManager s3mgr = TransferManagerBuilder.standard().withS3Client(s3).build();
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);

    Upload upload = s3mgr.upload(
            new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {

        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(s3.getUrl(bucketName, key).toString());
    getLogger().info("Upload completed: {}", getResourceUrl());
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;/*from  w  ww  .  j a v  a 2 s .co m*/
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}

From source file:org.apache.hadoop.fs.s3r.S3ROutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;//  w  w  w.  ja v  a2 s. c  om
    }

    backupStream.close();
    if (LOG.isDebugEnabled()) {
        LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
        LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
    }

    try {
        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: {}", backupFile);
        }
        super.close();
        closed = true;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("OutputStream for key '" + key + "' upload complete");
    }
}

From source file:org.xmlsh.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java

License:BSD License

@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();//ww  w . j  ava2 s . c  om
    File file = getFile();

    if (bucketName == null)
        throw new GradleException("bucketName is not specified");
    if (key == null)
        throw new GradleException("key is not specified");
    if (file == null)
        throw new GradleException("file is not specified");
    if (file.isFile() == false)
        throw new GradleException("file must be regular file");

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    TransferManager s3mgr = new TransferManager(s3);
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);

    Upload upload = s3mgr.upload(
            new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {
        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(((AmazonS3Client) s3).getResourceUrl(bucketName, key));
    getLogger().info("Upload completed: {}", getResourceUrl());
}