Example usage for com.amazonaws.services.s3.transfer TransferManager shutdownNow

List of usage examples for com.amazonaws.services.s3.transfer TransferManager shutdownNow

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager shutdownNow.

Prototype

public void shutdownNow(boolean shutDownS3Client) 

Source Link

Document

Forcefully shuts down this TransferManager instance - currently executing transfers will not be allowed to finish.

Usage

From source file:com.hpe.caf.worker.datastore.s3.S3DataStore.java

License:Apache License

private String store(InputStream inputStream, String partialReference, Long length) throws DataStoreException {
    try {// www  . jav a  2  s  . com
        String fullReference = partialReference + UUID.randomUUID().toString();

        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (length != null) {
            objectMetadata.setContentLength(length);
        }

        TransferManager transferManager = new TransferManager(amazonS3Client);
        Upload upload = transferManager.upload(bucketName, fullReference, inputStream, objectMetadata);

        upload.waitForCompletion();
        //            amazonS3Client.putObject(bucketName, fullReference, inputStream, objectMetadata);

        transferManager.shutdownNow(false);
        return fullReference;
    } catch (Exception ex) {
        errors.incrementAndGet();
        throw new DataStoreException("Could not store input stream.", ex);
    }
}

From source file:jetbrains.buildServer.util.amazon.S3Util.java

License:Apache License

@NotNull
private static <T extends Transfer> Collection<T> withTransferManager(@NotNull final AmazonS3Client s3Client,
        final boolean shutdownClient, @NotNull final WithTransferManager<T> withTransferManager)
        throws Throwable {

    final TransferManager manager = TransferManagerBuilder.standard().withS3Client(s3Client)
            .withExecutorFactory(createExecutorFactory(createDefaultExecutorService()))
            .withShutDownThreadPools(true).build();

    try {//from  w ww .  ja  va  2  s .  co m
        final ArrayList<T> transfers = new ArrayList<T>(withTransferManager.run(manager));

        final AtomicBoolean isInterrupted = new AtomicBoolean(false);

        if (withTransferManager instanceof InterruptAwareWithTransferManager) {
            final TransferManagerInterruptHook hook = new TransferManagerInterruptHook() {
                @Override
                public void interrupt() throws Throwable {
                    isInterrupted.set(true);

                    for (T transfer : transfers) {
                        if (transfer instanceof Download) {
                            ((Download) transfer).abort();
                            continue;
                        }

                        if (transfer instanceof Upload) {
                            ((Upload) transfer).abort();
                            continue;
                        }

                        if (transfer instanceof MultipleFileDownload) {
                            ((MultipleFileDownload) transfer).abort();
                            continue;
                        }

                        LOG.warn("Transfer type " + transfer.getClass().getName()
                                + " does not support interrupt");
                    }
                }
            };
            ((InterruptAwareWithTransferManager) withTransferManager).setInterruptHook(hook);
        }

        for (T transfer : transfers) {
            try {
                transfer.waitForCompletion();
            } catch (Throwable t) {
                if (!isInterrupted.get()) {
                    throw t;
                }
            }
        }

        return CollectionsUtil.filterCollection(transfers, new Filter<T>() {
            @Override
            public boolean accept(@NotNull T data) {
                return Transfer.TransferState.Completed == data.getState();
            }
        });
    } finally {
        manager.shutdownNow(shutdownClient);
    }
}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void uploadFile(Map<String, Object> jobFile) {

    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);/*from  www . j  a va  2 s. com*/
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));

    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);

        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"),
                localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));

        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));

        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));

        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");

            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }

        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();

        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));

        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);

        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();

    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());

        callbackCfc(jobFile, false, e.getMessage());

        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;

        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }

        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));

    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }

}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void cleanupMultiPartUploads(AmazonS3 s3Client, String bucket) {
    TransferManager tm = new TransferManager(s3Client);
    try {/*from   www . j a  v  a  2s .c  om*/
        tm.abortMultipartUploads(bucket, new Date(System.currentTimeMillis() - DateUtil.DAY_MS));
    } catch (AmazonClientException amazonClientException) {
        cfEngine.log("AmazonS3Write.BackgroundUploader.cleanupMultiPartUploads():"
                + amazonClientException.getMessage());
    }
    tm.shutdownNow(true);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem//from   w ww. ja v  a2s.c  o  m
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);

    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
            this.getWorkingDirectory());

    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(NEW_ACCESS_KEY, conf.get(OLD_ACCESS_KEY, null));
    String secretKey = conf.get(NEW_SECRET_KEY, conf.get(OLD_SECRET_KEY, null));

    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }

    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
            new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
            new AnonymousAWSCredentialsProvider());

    bucket = name.getHost();

    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(NEW_MAXIMUM_CONNECTIONS,
            conf.getInt(OLD_MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)));
    awsConf.setProtocol(conf.getBoolean(NEW_SECURE_CONNECTIONS,
            conf.getBoolean(OLD_SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) ? Protocol.HTTPS
                    : Protocol.HTTP);
    awsConf.setMaxErrorRetry(
            conf.getInt(NEW_MAX_ERROR_RETRIES, conf.getInt(OLD_MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)));
    awsConf.setSocketTimeout(
            conf.getInt(NEW_SOCKET_TIMEOUT, conf.getInt(OLD_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)));

    s3 = new AmazonS3Client(credentials, awsConf);

    maxKeys = conf.getInt(NEW_MAX_PAGING_KEYS, conf.getInt(OLD_MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS));
    partSize = conf.getLong(NEW_MULTIPART_SIZE, conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE));
    partSizeThreshold = conf.getLong(NEW_MIN_MULTIPART_THRESHOLD,
            conf.getLong(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD));

    if (partSize < 5 * 1024 * 1024) {
        LOG.error(NEW_MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }

    if (partSizeThreshold < 5 * 1024 * 1024) {
        LOG.error(NEW_MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        partSizeThreshold = 5 * 1024 * 1024;
    }

    String cannedACLName = conf.get(NEW_CANNED_ACL, conf.get(OLD_CANNED_ACL, DEFAULT_CANNED_ACL));
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }

    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }

    boolean purgeExistingMultipart = conf.getBoolean(NEW_PURGE_EXISTING_MULTIPART,
            conf.getBoolean(OLD_PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART));
    long purgeExistingMultipartAge = conf.getLong(NEW_PURGE_EXISTING_MULTIPART_AGE,
            conf.getLong(OLD_PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE));

    if (purgeExistingMultipart) {
        TransferManager transferManager = new TransferManager(s3);
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        transferManager.abortMultipartUploads(bucket, purgeBefore);
        transferManager.shutdownNow(false);
    }

    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM, null);

    setConf(conf);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name./*from w w  w  . j  a  v  a 2s .c o  m*/
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }/*from w ww . j a  va2  s . co  m*/

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMultipartCopyPartSize(partSize);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }
}

From source file:org.apache.streams.s3.S3OutputStreamWrapper.java

License:Apache License

private void addFile() throws Exception {

    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();

    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);

    metadata.addUserMetadata("writer", "org.apache.streams");

    for (String s : metaData.keySet())
        metadata.addUserMetadata(s, metaData.get(s));

    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {/*from w  w  w . j  av  a2 s.  co  m*/
        upload.waitForUploadResult();

        is.close();
        transferManager.shutdownNow(false);
        LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception e) {
        // No Op
    }

}