Example usage for com.amazonaws.services.s3.transfer Upload addProgressListener

List of usage examples for com.amazonaws.services.s3.transfer Upload addProgressListener

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer Upload addProgressListener.

Prototype

public void addProgressListener(ProgressListener listener);

Source Link

Document

Adds the specified progress listener to the list of listeners receiving updates about this transfer's progress.

Usage

From source file:aws.example.s3.XferMgrProgress.java

License:Open Source License

public static void uploadFileWithListener(String file_path, String bucket_name, String key_prefix,
        boolean pause) {
    System.out.println("file: " + file_path + (pause ? " (pause)" : ""));

    String key_name = null;//from  ww w .  ja  v a 2 s . c om
    if (key_prefix != null) {
        key_name = key_prefix + '/' + file_path;
    } else {
        key_name = file_path;
    }

    File f = new File(file_path);
    TransferManager xfer_mgr = new TransferManager();
    try {
        Upload u = xfer_mgr.upload(bucket_name, key_name, f);
        // print an empty progress bar...
        printProgressBar(0.0);
        u.addProgressListener(new ProgressListener() {
            public void progressChanged(ProgressEvent e) {
                double pct = e.getBytesTransferred() * 100.0 / e.getBytes();
                eraseProgressBar();
                printProgressBar(pct);
            }
        });
        // block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(u);
        // print the final state of the transfer.
        TransferState xfer_state = u.getState();
        System.out.println(": " + xfer_state);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}

From source file:com.github.rholder.esthree.command.Put.java

License:Apache License

@Override
public Integer call() throws Exception {
    TransferManager t = new TransferManager(amazonS3Client);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadata);

    Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata));

    // TODO this listener spews out garbage >100% on a retry, add a test to verify
    if (progressListener != null) {
        progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress()));
        u.addProgressListener(progressListener);
    }/*from   w  ww  .j  a va 2s  . c  o  m*/
    try {
        u.waitForCompletion();
    } finally {
        t.shutdownNow();
    }
    return 0;
}

From source file:com.proofpoint.event.collector.combiner.S3StorageSystem.java

License:Apache License

@Override
public StoredObject putObject(final URI location, File source) {
    try {//w  w w  .  j a v a2  s  . co  m
        log.info("starting upload: %s", location);
        final AtomicLong totalTransferred = new AtomicLong();
        Upload upload = s3TransferManager.upload(getS3Bucket(location), getS3ObjectKey(location), source);
        upload.addProgressListener(new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                // NOTE: This may be invoked by multiple threads.
                long transferred = totalTransferred.addAndGet(progressEvent.getBytesTransferred());
                log.debug("upload progress: %s: transferred=%d code=%d", location, transferred,
                        progressEvent.getEventCode());
            }
        });
        UploadResult uploadResult = upload.waitForUploadResult();
        ObjectMetadata metadata = s3Service.getObjectMetadata(getS3Bucket(location), getS3ObjectKey(location));
        if (!uploadResult.getETag().equals(metadata.getETag())) {
            // this might happen in rare cases due to S3's eventual consistency
            throw new IllegalStateException("uploaded etag is different from retrieved object etag");
        }
        log.info("completed upload: %s (size=%d bytes)", location, totalTransferred.get());
        return updateStoredObject(location, metadata);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java

License:Apache License

public Uploader getUploader() {
    Utils.checkState(hasTransferManager(), "transferManager not available");
    return (bucket, key, is) -> {
        Utils.checkNotNull(bucket, "bucket");
        Utils.checkNotNull(key, "key");
        Utils.checkNotNull(is, "is");
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, is,
                getEncryptionMetadataBuilder().build());
        Upload upload = getTransferManager().upload(putObjectRequest);
        upload.addProgressListener(new UploaderProgressListener(bucket + key));
        return upload;
    };/* ww  w  . ja va 2  s  . co m*/
}

From source file:com.streamsets.pipeline.stage.destination.s3.FileHelper.java

License:Apache License

Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, fileName, is, metadata);
    final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
    Upload upload = transferManager.upload(putObjectRequest);
    upload.addProgressListener((ProgressListener) progressEvent -> {
        switch (progressEvent.getEventType()) {
        case TRANSFER_STARTED_EVENT:
            LOG.debug("Started uploading object {} into Amazon S3", object);
            break;
        case TRANSFER_COMPLETED_EVENT:
            LOG.debug("Completed uploading object {} into Amazon S3", object);
            break;
        case TRANSFER_FAILED_EVENT:
            LOG.debug("Failed uploading object {} into Amazon S3", object);
            break;
        default://from   w w w . jav  a  2s.  co m
            break;
        }
    });
    return upload;
}

From source file:com.wowza.wms.plugin.s3upload.ModuleS3Upload.java

License:Open Source License

private void resumeUploads() {
    if (!resumeUploads) {
        transferManager.abortMultipartUploads(bucketName, new Date());
        return;//from  w  w  w  .java 2s .  co  m
    }

    File storageDir = new File(appInstance.getStreamStorageDir());
    File[] files = storageDir.listFiles(new FilenameFilter() {

        @Override
        public boolean accept(File dir, String name) {
            return name.toLowerCase().endsWith(".upload");
        }
    });

    for (File file : files) {
        String mediaName = file.getPath().replace(storageDir.getPath(), "");
        if (mediaName.startsWith("/"))
            mediaName = mediaName.substring(1);

        mediaName = mediaName.substring(0, mediaName.indexOf(".upload"));
        Upload upload = null;
        FileInputStream fis = null;
        try {
            if (file.length() == 0) {
                File mediaFile = new File(storageDir, mediaName);
                if (mediaFile.exists()) {
                    // In order to support setting ACL permissions for the file upload, we will wrap the upload properties in a PutObjectRequest
                    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, mediaName, file);

                    // If the user has specified ACL properties, setup the putObjectRequest with the acl permissions generated
                    if (acl != null) {
                        putObjectRequest.withAccessControlList(acl);
                    }

                    upload = transferManager.upload(putObjectRequest);
                } else {
                    file.delete();
                }
            } else {
                fis = new FileInputStream(file);
                // Deserialize PersistableUpload information from disk.
                PersistableUpload persistableUpload = PersistableTransfer.deserializeFrom(fis);
                upload = transferManager.resumeUpload(persistableUpload);
            }
            if (upload != null)
                upload.addProgressListener(new ProgressListener(mediaName));
        } catch (Exception e) {
            logger.error(MODULE_NAME + ".resumeUploads error resuming upload: [" + appInstance.getContextStr()
                    + "/" + file.getName() + "]", e);
        } finally {
            if (fis != null) {
                try {
                    fis.close();
                } catch (IOException e) {
                }
            }
        }
    }
}

From source file:jp.classmethod.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java

License:Apache License

@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();/*from   w w w .j a v  a2  s .c  o m*/
    File file = getFile();

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }
    if (file == null) {
        throw new GradleException("file is not specified");
    }
    if (file.isFile() == false) {
        throw new GradleException("file must be regular file");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    TransferManager s3mgr = TransferManagerBuilder.standard().withS3Client(s3).build();
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);

    Upload upload = s3mgr.upload(
            new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {

        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(s3.getUrl(bucketName, key).toString());
    getLogger().info("Upload completed: {}", getResourceUrl());
}

From source file:modules.storage.AmazonS3Storage.java

License:Open Source License

@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {/*from ww w  .  j a v a  2 s .c o m*/
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException(ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//from  w  ww. ja  v  a  2  s .c o m
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;/*w w w  .j  av  a 2s. co m*/
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}