List of usage examples for com.amazonaws.services.s3 AmazonS3Client shutdown
public void shutdown()
From source file:com.netflix.ice.common.AwsUtils.java
License:Apache License
/** * List all object summary with given prefix in the s3 bucket. * @param bucket//from ww w .j a va 2 s . c o m * @param prefix * @return */ public static List<S3ObjectSummary> listAllObjects(String bucket, String prefix, String accountId, String assumeRole, String externalId) { AmazonS3Client s3Client = AwsUtils.s3Client; try { ListObjectsRequest request = new ListObjectsRequest().withBucketName(bucket).withPrefix(prefix); List<S3ObjectSummary> result = Lists.newLinkedList(); if (!StringUtils.isEmpty(accountId) && !StringUtils.isEmpty(assumeRole)) { Credentials assumedCredentials = getAssumedCredentials(accountId, assumeRole, externalId); s3Client = new AmazonS3Client( new BasicSessionCredentials(assumedCredentials.getAccessKeyId(), assumedCredentials.getSecretAccessKey(), assumedCredentials.getSessionToken()), clientConfig); } ObjectListing page = null; do { if (page != null) request.setMarker(page.getNextMarker()); page = s3Client.listObjects(request); result.addAll(page.getObjectSummaries()); } while (page.isTruncated()); return result; } finally { if (s3Client != AwsUtils.s3Client) s3Client.shutdown(); } }
From source file:com.netflix.ice.common.AwsUtils.java
License:Apache License
public static boolean downloadFileIfChangedSince(String bucketName, String bucketFilePrefix, File file, long milles, String accountId, String assumeRole, String externalId) { AmazonS3Client s3Client = AwsUtils.s3Client; try {// w ww. ja va 2s . c om if (!StringUtils.isEmpty(accountId) && !StringUtils.isEmpty(assumeRole)) { Credentials assumedCredentials = getAssumedCredentials(accountId, assumeRole, externalId); s3Client = new AmazonS3Client( new BasicSessionCredentials(assumedCredentials.getAccessKeyId(), assumedCredentials.getSecretAccessKey(), assumedCredentials.getSessionToken()), clientConfig); } ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, bucketFilePrefix + file.getName()); boolean download = !file.exists() || metadata.getLastModified().getTime() > milles; if (download) { return download(s3Client, bucketName, bucketFilePrefix + file.getName(), file); } else return download; } finally { if (s3Client != AwsUtils.s3Client) s3Client.shutdown(); } }
From source file:com.qubole.qds.sdk.java.client.ResultStreamer.java
License:Apache License
@VisibleForTesting protected S3Client newS3Client() throws Exception { Account account = getAccount();/*from w w w .j a va2s . c om*/ AWSCredentials awsCredentials = new BasicAWSCredentials(account.getStorage_access_key(), account.getStorage_secret_key()); final AmazonS3Client client = new AmazonS3Client(awsCredentials); return new S3Client() { @Override public void shutdown() { client.shutdown(); } @Override public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) { return client.listObjects(listObjectsRequest); } @Override public S3Object getObject(String bucket, String key) { return client.getObject(bucket, key); } }; }
From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java
License:Apache License
/** * Instead of providing support bundle directly to user, upload it to StreamSets backend services. */// ww w . j a v a 2 s . c o m public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType) throws IOException { // Generate bundle SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType); boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED); String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS); String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET); String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET); int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE); if (!enabled) { throw new IOException("Uploading support bundles was disabled by administrator."); } AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider( new BasicAWSCredentials(accessKey, secretKey)); AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration()); s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true)); s3Client.setRegion(Region.getRegion(Regions.US_WEST_2)); // Object Metadata ObjectMetadata s3Metadata = new ObjectMetadata(); for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) { s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue()); } List<PartETag> partETags; InitiateMultipartUploadResult initResponse = null; try { // Uploading part by part LOG.info("Initiating multi-part support bundle upload"); partETags = new ArrayList<>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, bundle.getBundleKey()); initRequest.setObjectMetadata(s3Metadata); initResponse = s3Client.initiateMultipartUpload(initRequest); } catch (AmazonClientException e) { LOG.error("Support bundle upload failed: ", e); throw new IOException("Support bundle upload failed", e); } try { byte[] buffer = new byte[bufferSize]; int partId = 1; int size = -1; while ((size = readFully(bundle.getInputStream(), buffer)) != -1) { LOG.debug("Uploading part {} of size {}", partId, size); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket) .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId()) .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer)) .withPartSize(size); partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); LOG.info("Support bundle upload finished"); } catch (Exception e) { LOG.error("Support bundle upload failed", e); s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId())); throw new IOException("Can't upload support bundle", e); } finally { // Close the client s3Client.shutdown(); } }
From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java
License:Apache License
/** * @param endpoint S3 endpoint URL//w ww . j av a2s . c o m * @param accessKey Access key * @param secretKey Secret key * @param bucket Bucket name * @param path Key name of the object to download (path + file name) * @param file Local file to download to * @throws Exception On any error */ public static void download(String endpoint, String accessKey, String secretKey, String bucket, String path, File file) throws Exception { AmazonS3Client amazonS3Client = null; InputStream in = null; OutputStream out = null; try { AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration.setMaxConnections(MAX_CONNECTIONS); clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY); clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT); amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration); S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true); amazonS3Client.setS3ClientOptions(clientOptions); amazonS3Client.setEndpoint(endpoint); S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucket, path)); in = object.getObjectContent(); byte[] buf = new byte[BUFFER_SIZE]; out = new FileOutputStream(file); int count; while ((count = in.read(buf)) != -1) out.write(buf, 0, count); out.close(); in.close(); } catch (AmazonServiceException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } catch (AmazonClientException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } catch (IOException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } finally { if (in != null) { try { in.close(); } catch (Exception e) { } } if (out != null) { try { out.close(); } catch (Exception e) { } } if (amazonS3Client != null) { try { amazonS3Client.shutdown(); } catch (Exception e) { } } } }
From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java
License:Apache License
/** * @param file Local file to upload/*from w w w .ja va2 s .c o m*/ * @param endpoint S3 endpoint URL * @param accessKey Access key * @param secretKey Secret key * @param bucket Bucket name * @param path Key name (path + file name) * @throws Exception On any error */ public static void upload(File file, String endpoint, String accessKey, String secretKey, String bucket, String path) throws Exception { AmazonS3Client amazonS3Client = null; try { AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration.setMaxConnections(MAX_CONNECTIONS); clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY); clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT); amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration); S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true); amazonS3Client.setS3ClientOptions(clientOptions); amazonS3Client.setEndpoint(endpoint); // amazonS3Client.putObject(new PutObjectRequest(bucket, path, file)); // up to 5GB TransferManager tm = new TransferManager(amazonS3Client); // up to 5TB Upload upload = tm.upload(bucket, path, file); // while (!upload.isDone()) { upload.getProgress().getBytesTransferred(); Thread.sleep(1000); } // to get progress upload.waitForCompletion(); tm.shutdownNow(); } catch (AmazonServiceException x) { Shrinker.myLogger.info("upload error: " + x.getMessage()); throw new Exception("upload exception", x); } catch (AmazonClientException x) { Shrinker.myLogger.info("upload error: " + x.getMessage()); throw new Exception("upload exception", x); } finally { if (amazonS3Client != null) { try { amazonS3Client.shutdown(); } catch (Exception e) { } } } }
From source file:org.elasticsearch.cloud.aws.AwsS3Service.java
License:Apache License
@Override protected void doClose() throws ElasticsearchException { for (AmazonS3Client client : clients.values()) { client.shutdown(); }//w ww.j a v a2 s. com }
From source file:org.elasticsearch.cloud.aws.InternalAwsS3Service.java
License:Apache License
@Override protected void doClose() throws ElasticsearchException { for (AmazonS3Client client : clients.values()) { client.shutdown(); }//from w ww .ja va 2s.c om // Ensure that IdleConnectionReaper is shutdown IdleConnectionReaper.shutdown(); }
From source file:org.elasticsearch.repositories.s3.InternalAwsS3Service.java
License:Apache License
@Override protected void doClose() throws ElasticsearchException { for (AmazonS3Client client : clientsCache.values()) { client.shutdown(); }/*from ww w . j a v a 2 s. c o m*/ // Ensure that IdleConnectionReaper is shutdown IdleConnectionReaper.shutdown(); }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public ObjectMetadata getObjectMetadata(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = null; try {//w w w .j a va 2 s . c o m s3Client = getAmazonS3(params); return s3Operations.getObjectMetadata(params.getS3BucketName(), params.getS3KeyPrefix(), s3Client); } catch (AmazonServiceException e) { if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) { return null; } throw new IllegalStateException( String.format("Failed to get S3 metadata for object key \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }