Example usage for com.amazonaws.services.s3.transfer TransferManager shutdownNow

List of usage examples for com.amazonaws.services.s3.transfer TransferManager shutdownNow

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager shutdownNow.

Prototype

public void shutdownNow() 

Source Link

Document

Forcefully shuts down this TransferManager instance - currently executing transfers will not be allowed to finish.

Usage

From source file:com.github.rholder.esthree.command.Put.java

License:Apache License

@Override
public Integer call() throws Exception {
    TransferManager t = new TransferManager(amazonS3Client);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadata);

    Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata));

    // TODO this listener spews out garbage >100% on a retry, add a test to verify
    if (progressListener != null) {
        progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress()));
        u.addProgressListener(progressListener);
    }/*w  w  w  .ja  v  a2s  .co  m*/
    try {
        u.waitForCompletion();
    } finally {
        t.shutdownNow();
    }
    return 0;
}

From source file:com.mesosphere.dcos.cassandra.executor.backup.S3StorageDriver.java

License:Apache License

@Override
public void upload(BackupRestoreContext ctx) throws Exception {
    final String localLocation = ctx.getLocalLocation();
    final String backupName = ctx.getName();
    final String nodeId = ctx.getNodeId();
    final String key = getPrefixKey(ctx) + "/" + nodeId;
    LOGGER.info("Backup key: " + key);
    final TransferManager tx = getS3TransferManager(ctx);
    final File dataDirectory = new File(localLocation);

    try {/*from   ww w .java2 s. c  o m*/
        // Ex: data/<keyspace>/<cf>/snapshots/</snapshot-dir>/<files>
        for (File keyspaceDir : dataDirectory.listFiles()) {
            if (keyspaceDir.isFile()) {
                // Skip any files in the data directory.
                // Only enter keyspace directory.
                continue;
            }
            LOGGER.info("Entering keyspace: {}", keyspaceDir.getName());
            for (File cfDir : getColumnFamilyDir(keyspaceDir)) {
                LOGGER.info("Entering column family dir: {}", cfDir.getName());
                File snapshotDir = new File(cfDir, "snapshots");
                File backupDir = new File(snapshotDir, backupName);
                if (!StorageUtil.isValidBackupDir(keyspaceDir, cfDir, snapshotDir, backupDir)) {
                    LOGGER.info("Skipping directory: {}", snapshotDir.getAbsolutePath());
                    continue;
                }
                LOGGER.info(
                        "Valid backup directories. KeyspaceDir: {} | ColumnFamilyDir: {} | SnapshotDir: {} | BackupName: {}",
                        keyspaceDir.getAbsolutePath(), cfDir.getAbsolutePath(), snapshotDir.getAbsolutePath(),
                        backupName);

                final Optional<File> snapshotDirectory = StorageUtil.getValidSnapshotDirectory(snapshotDir,
                        backupName);
                LOGGER.info("Valid snapshot directory: {}", snapshotDirectory.isPresent());
                if (snapshotDirectory.isPresent()) {
                    // Upload this directory
                    LOGGER.info("Going to upload directory: {}", snapshotDirectory.get().getAbsolutePath());

                    uploadDirectory(tx, getBucketName(ctx), key, keyspaceDir.getName(), cfDir.getName(),
                            snapshotDirectory.get());
                } else {
                    LOGGER.warn("Snapshots directory: {} doesn't contain the current backup directory: {}",
                            snapshotDir.getName(), backupName);
                }
            }
        }
        LOGGER.info("Done uploading snapshots for backup: {}", backupName);
    } catch (Exception e) {
        LOGGER.info("Failed uploading snapshots for backup: {}, error: {}", backupName, e);
        throw new Exception(e);
    } finally {
        tx.shutdownNow();
    }
}

From source file:com.mesosphere.dcos.cassandra.executor.backup.S3StorageDriver.java

License:Apache License

@Override
public void download(BackupRestoreContext ctx) throws Exception {
    // download sstables at data/keyspace/cf/<files>
    final String backupName = ctx.getName();
    final String nodeId = ctx.getNodeId();
    final File[] keyspaces = getNonSystemKeyspaces(ctx);
    final String bucketName = getBucketName(ctx);
    final String localLocation = ctx.getLocalLocation();
    final TransferManager tx = getS3TransferManager(ctx);
    final AmazonS3Client amazonS3Client = getAmazonS3Client(ctx);

    try {/*from w  w w  .  j  a  v  a 2  s .  c  o m*/
        if (Objects.equals(ctx.getRestoreType(), new String("new"))) {
            final Map<String, Long> snapshotFileKeys = listSnapshotFiles(amazonS3Client, bucketName,
                    backupName + File.separator + nodeId);
            LOGGER.info("Snapshot files for this node: {}", snapshotFileKeys);
            for (String fileKey : snapshotFileKeys.keySet()) {
                downloadFile(tx, bucketName, fileKey, localLocation + File.separator + fileKey);
            }
        } else {
            for (File keyspace : keyspaces) {
                for (File cfDir : getColumnFamilyDir(keyspace)) {
                    final String columnFamily = cfDir.getName().substring(0, cfDir.getName().indexOf("-"));
                    final Map<String, Long> snapshotFileKeys = listSnapshotFiles(amazonS3Client, bucketName,
                            backupName + "/" + nodeId + "/" + keyspace.getName() + "/" + columnFamily);
                    for (String fileKey : snapshotFileKeys.keySet()) {
                        final String destinationFile = cfDir.getAbsolutePath()
                                + fileKey.substring(fileKey.lastIndexOf("/"));
                        downloadFile(tx, bucketName, fileKey, destinationFile);
                        LOGGER.info("Keyspace {}, Column Family {}, FileKey {}, destination {}", keyspace,
                                columnFamily, fileKey, destinationFile);
                    }
                }
            }
        }
        LOGGER.info("Done downloading snapshots for backup: {}", backupName);
    } catch (Exception e) {
        LOGGER.info("Failed downloading snapshots for backup: {}, error: {}", backupName, e);
        throw new Exception(e);
    } finally {
        tx.shutdownNow();
    }
}

From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java

License:Apache License

/**
 * @param file Local file to upload//from  w  w  w .j av a2  s.  c o m
 * @param endpoint S3 endpoint URL
 * @param accessKey Access key
 * @param secretKey Secret key
 * @param bucket Bucket name 
 * @param path Key name (path + file name)
 * @throws Exception On any error
 */
public static void upload(File file, String endpoint, String accessKey, String secretKey, String bucket,
        String path) throws Exception {
    AmazonS3Client amazonS3Client = null;
    try {
        AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration clientConfiguration = new ClientConfiguration();
        clientConfiguration.setMaxConnections(MAX_CONNECTIONS);
        clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY);
        clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT);
        amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration);
        S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true);
        amazonS3Client.setS3ClientOptions(clientOptions);
        amazonS3Client.setEndpoint(endpoint);
        //         amazonS3Client.putObject(new PutObjectRequest(bucket, path, file)); // up to 5GB
        TransferManager tm = new TransferManager(amazonS3Client); // up to 5TB
        Upload upload = tm.upload(bucket, path, file);
        // while (!upload.isDone()) { upload.getProgress().getBytesTransferred(); Thread.sleep(1000); } // to get progress
        upload.waitForCompletion();
        tm.shutdownNow();
    } catch (AmazonServiceException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } catch (AmazonClientException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } finally {
        if (amazonS3Client != null) {
            try {
                amazonS3Client.shutdown();
            } catch (Exception e) {
            }
        }
    }
}

From source file:jenkins.plugins.itemstorage.s3.S3Callable.java

License:Open Source License

/**
 * Override this if you don't want a transfer manager
 */// ww w.j  av  a 2s  .c  om
@Override
public T invoke(File f, VirtualChannel channel) throws IOException, InterruptedException {
    TransferManager transferManager = new TransferManager(helper.client());

    try {
        return invoke(transferManager, f, channel);
    } finally {
        transferManager.shutdownNow();
    }
}

From source file:modules.storage.AmazonS3Storage.java

License:Open Source License

@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {//from w w w  .  ja  v a2 s  . com
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException(ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}

From source file:n3phele.agent.repohandlers.S3Large.java

License:Open Source License

public Origin put(InputStream input, long length, String encoding) {
    Origin result = new Origin(source + "/" + root + "/" + key, 0, null, null);
    TransferManager tm = null;
    try {/*from  w  w  w .  j  ava 2 s . c om*/
        tm = new TransferManager(this.credentials);
        tm.getAmazonS3Client().setEndpoint(source.toString());

        objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(this.length = length);
        this.encoding = encoding;
        if (encoding != null)
            objectMetadata.setContentType(this.encoding);
        log.info("Output: " + source + "/" + root + "/" + key + " Content-Type: " + encoding + "length: "
                + length);
        Upload upload = tm.upload(root, key, input, objectMetadata);
        upload.waitForCompletion();
        // PutObjectResult object = s3().putObject(root, key, input, objectMetadata);
        result.setLength(length);
        ObjectMetadata od = s3().getObjectMetadata(root, key);
        result.setModified(od.getLastModified());
    } catch (AmazonServiceException e) {
        throw e;
    } catch (AmazonClientException e) {
        throw e;
    } catch (InterruptedException e) {
        throw new AmazonClientException(e.getMessage());
    } finally {
        try {
            input.close();
        } catch (IOException e) {
        }
        try {
            tm.shutdownNow();
        } catch (Exception e) {
        }
        try {
            s3().shutdown();
        } catch (Exception e) {
        }
    }
    return result;

}

From source file:org.apache.flink.streaming.tests.util.s3.S3UtilProgram.java

License:Apache License

private static void downloadFile(ParameterTool params) {
    final String bucket = params.getRequired("bucket");
    final String s3file = params.getRequired("s3file");
    final String localFile = params.getRequired("localFile");
    TransferManager tx = TransferManagerBuilder.defaultTransferManager();
    try {//  w  ww  .  ja v  a  2 s.c  om
        tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
    } catch (InterruptedException e) {
        System.out.println("Transfer interrupted");
    } finally {
        tx.shutdownNow();
    }
}

From source file:org.apache.flink.streaming.tests.util.s3.S3UtilProgram.java

License:Apache License

private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
    final String bucket = params.getRequired("bucket");
    final String s3prefix = params.getRequired("s3prefix");
    final String localFolder = params.getRequired("localFolder");
    final String s3filePrefix = params.get("s3filePrefix", "");
    TransferManager tx = TransferManagerBuilder.defaultTransferManager();
    Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
    KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL
            : objectSummary -> keyPredicate.test(objectSummary.getKey());
    try {/*  w w w .java2  s .co  m*/
        tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
    } catch (InterruptedException e) {
        System.out.println("Transfer interrupted");
    } finally {
        tx.shutdownNow();
    }
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * Performs a file/directory transfer.//from  ww w . ja v a 2  s .c  o  m
 *
 * @param params the parameters.
 * @param transferer a transferer that knows how to perform the transfer.
 *
 * @return the results.
 * @throws InterruptedException if a problem is encountered.
 */
private S3FileTransferResultsDto performTransfer(final S3FileTransferRequestParamsDto params,
        Transferer transferer) throws InterruptedException {
    // Create a transfer manager.
    TransferManager transferManager = null;

    try {
        // Create a transfer manager.
        transferManager = getTransferManager(params);

        // Start a stop watch to keep track of how long the transfer takes.
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();

        // Perform the transfer.
        Transfer transfer = transferer.performTransfer(transferManager);
        TransferProgress transferProgress = transfer.getProgress();

        LOGGER.info(
                String.format("%d bytes transferred out of %d (%.1f%%)", transferProgress.getBytesTransferred(),
                        transferProgress.getTotalBytesToTransfer(), transferProgress.getPercentTransferred()));

        long stepCount = 0;

        // Loop until the transfer is complete.
        do {
            Thread.sleep(SLEEP_INTERVAL_MILLIS);
            stepCount++;

            // Log progress status every 30 seconds and when transfer is complete.
            if (transfer.isDone() || stepCount % 300 == 0) {
                LOGGER.info(String.format("%d bytes transferred out of %d (%.1f%%)",
                        transferProgress.getBytesTransferred(), transferProgress.getTotalBytesToTransfer(),
                        transferProgress.getPercentTransferred()));
            }
        } while (!transfer.isDone());

        // Stop the stop watch and create a results object.
        stopWatch.stop();

        // If the transfer failed, throw the underlying AWS exception if we can determine one. Otherwise, throw our own exception.
        TransferState transferState = transfer.getState();
        if (transferState == TransferState.Failed) {
            // The waitForException method should return the underlying AWS exception since the state is "Failed". It should not block since the
            // transfer is already "done" per previous code checking "isDone".
            AmazonClientException amazonClientException = transfer.waitForException();

            // If the returned exception is null, we weren't able to get the underlying AWS exception so just throw our own exception.
            // This is unlikely since the transfer failed, but it's better to handle the possibility just in case.
            if (amazonClientException == null) {
                throw new IllegalStateException("The transfer operation \"" + transfer.getDescription()
                        + "\" failed for an unknown reason.");
            }

            // Throw the Amazon underlying exception.
            throw amazonClientException;
        }
        // Ensure the transfer completed. If not, throw an exception.
        else if (transferState != TransferState.Completed) {
            throw new IllegalStateException("The transfer operation \"" + transfer.getDescription()
                    + "\" did not complete successfully. Current state: \"" + transferState + "\".");
        }

        // TransferProgress.getBytesTransferred() are not populated for S3 Copy objects.
        if (!(transfer instanceof Copy)) {
            // Sanity check for the number of bytes transferred.
            Assert.isTrue(transferProgress.getBytesTransferred() >= transferProgress.getTotalBytesToTransfer(),
                    String.format(
                            "Actual number of bytes transferred is less than expected (actual: %d bytes; expected: %d bytes).",
                            transferProgress.getBytesTransferred(),
                            transferProgress.getTotalBytesToTransfer()));
        }

        // Create the results object and populate it with the standard data.
        S3FileTransferResultsDto results = new S3FileTransferResultsDto();
        results.setDurationMillis(stopWatch.getTime());
        results.setTotalBytesTransferred(transfer.getProgress().getBytesTransferred());
        results.setTotalFilesTransferred(1L);

        if (transfer instanceof MultipleFileUpload) {
            // For upload directory, we need to calculate the total number of files transferred differently.
            results.setTotalFilesTransferred((long) ((MultipleFileUpload) transfer).getSubTransfers().size());
        } else if (transfer instanceof MultipleFileDownload) {
            // For download directory, we need to calculate the total number of files differently.
            results.setTotalFilesTransferred((long) listDirectory(params).size());
        }

        // Return the results.
        return results;
    } finally {
        // Shutdown the transfer manager to release resources. If this isn't done, the JVM may delay upon exiting.
        if (transferManager != null) {
            transferManager.shutdownNow();
        }
    }
}