List of usage examples for com.amazonaws.services.s3 AmazonS3URI getBucket
public String getBucket()
From source file:br.com.ingenieux.mojo.cloudformation.PushStackMojo.java
License:Apache License
protected String generateExternalUrl(AmazonS3URI destinationS3Uri) throws Exception { return s3Client.getResourceUrl(destinationS3Uri.getBucket(), destinationS3Uri.getKey()); }
From source file:br.com.ingenieux.mojo.cloudformation.PushStackMojo.java
License:Apache License
private void uploadContents(File templateLocation, AmazonS3URI destinationS3Uri) throws Exception { s3Client.putObject(destinationS3Uri.getBucket(), destinationS3Uri.getKey(), new FileInputStream(this.templateLocation), null); }
From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Call S3 to get the most recent object list. * * This is an object list request to AWS in the given "directory". * * @return/*from w w w . j a va 2s. co m*/ */ protected List<S3ObjectSummary> getObjectSummaries() { AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); ArrayList<S3ObjectSummary> returnList = new ArrayList<S3ObjectSummary>(); try { log.info("Getting the listing of objects in the S3 table config directory: bucket %s prefix %s :", directoryURI.getBucket(), directoryURI.getKey()); ListObjectsRequest req = new ListObjectsRequest().withBucketName(directoryURI.getBucket()) .withPrefix(directoryURI.getKey() + "/").withDelimiter("/").withMaxKeys(25); ObjectListing result; do { result = s3client.listObjects(req); returnList.addAll(result.getObjectSummaries()); req.setMarker(result.getNextMarker()); } while (result.isTruncated()); log.info("Completed getting S3 object listing."); } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } return returnList; }
From source file:com.netflix.genie.common.internal.aws.s3.S3ClientFactory.java
License:Apache License
/** * Get an {@link AmazonS3} client instance appropriate for the given {@link AmazonS3URI}. * * @param s3URI The URI of the S3 resource this client is expected to access. * @return A S3 client instance which should be used to access the S3 resource *//* www . jav a2 s.c om*/ public AmazonS3 getClient(final AmazonS3URI s3URI) { final String bucketName = s3URI.getBucket(); final S3ClientKey s3ClientKey; /* * The purpose of the dual maps is to make sure we don't create an unnecessary number of S3 clients. * If we made the client cache just bucketName -> client directly we'd have no way to make know if an already * created instance for another bucket could be re-used for this bucket since it could be same region/role * combination. This way we first map the bucket name to a key of role/region and then use that key * to find a re-usable client for those dimensions. */ s3ClientKey = this.bucketToClientKey.computeIfAbsent(bucketName, key -> { // We've never seen this bucket before. Calculate the key. /* * Region Resolution rules: * 1. Is it part of the S3 URI already? Use that * 2. Is it part of the properties passed in by admin/user Use that * 3. Fall back to whatever the default is for this process */ final Regions bucketRegion; final String uriBucketRegion = s3URI.getRegion(); if (StringUtils.isNotBlank(uriBucketRegion)) { bucketRegion = Regions.fromName(uriBucketRegion); } else { final String propertyBucketRegion = this.bucketProperties.containsKey(key) ? this.bucketProperties.get(key).getRegion().orElse(null) : null; if (StringUtils.isNotBlank(propertyBucketRegion)) { bucketRegion = Regions.fromName(propertyBucketRegion); } else { bucketRegion = this.defaultRegion; } } // Anything special in the bucket we need to reference final String roleARN = this.bucketProperties.containsKey(key) ? this.bucketProperties.get(key).getRoleARN().orElse(null) : null; return new S3ClientKey(bucketRegion, roleARN); }); return this.clientCache.computeIfAbsent(s3ClientKey, this::buildS3Client); }
From source file:com.netflix.genie.common.internal.aws.s3.S3ProtocolResolver.java
License:Apache License
/** * {@inheritDoc}// w ww . j a v a 2 s .c o m */ @Override public Resource resolve(final String location, final ResourceLoader resourceLoader) { log.debug("Attempting to resolve if {} is a S3 resource or not", location); final AmazonS3URI s3URI; try { s3URI = new AmazonS3URI(location); } catch (final IllegalArgumentException iae) { log.debug("{} is not a valid S3 resource (Error message: {}).", location, iae.getMessage()); return null; } final AmazonS3 client = this.s3ClientFactory.getClient(s3URI); log.debug("{} is a valid S3 resource.", location); // TODO: This implementation from Spring Cloud AWS always wraps the passed in client with a proxy that follows // redirects. I'm not sure if we want that or not. Probably ok for now but maybe revisit later? return new SimpleStorageResource(client, s3URI.getBucket(), s3URI.getKey(), this.s3TaskExecutor, s3URI.getVersionId()); }
From source file:com.netflix.genie.common.internal.services.impl.S3JobArchiverImpl.java
License:Apache License
/** * {@inheritDoc}//ww w .j av a 2s.c o m */ @Override public boolean archiveDirectory(@NotNull final Path directory, @NotNull final URI target) throws JobArchiveException { final String uriString = target.toString(); final AmazonS3URI s3URI; try { s3URI = new AmazonS3URI(target); } catch (final IllegalArgumentException iae) { log.debug("{} is not a valid S3 URI", uriString); return false; } final String directoryString = directory.toString(); log.debug("{} is a valid S3 location. Proceeding to archive {} to location: {}", uriString, directoryString, uriString); try { final TransferManager transferManager = this.s3ClientFactory.getTransferManager(s3URI); final MultipleFileUpload upload = transferManager.uploadDirectory(s3URI.getBucket(), s3URI.getKey(), directory.toFile(), true); upload.waitForCompletion(); return true; } catch (final Exception e) { log.error("Error archiving to S3 location: {} ", uriString, e); throw new JobArchiveException("Error archiving " + directoryString, e); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImpl.java
License:Apache License
/** * {@inheritDoc}/*from w ww. ja v a 2 s . c o m*/ */ @Override public void getFile(@NotBlank(message = "Source file path cannot be empty.") final String srcRemotePath, @NotBlank(message = "Destination local path cannot be empty") final String dstLocalPath) throws GenieException { final long start = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try { log.debug("Called with src path {} and destination path {}", srcRemotePath, dstLocalPath); final AmazonS3URI s3Uri = getS3Uri(srcRemotePath); try { this.s3ClientFactory.getClient(s3Uri) .getObject(new GetObjectRequest(s3Uri.getBucket(), s3Uri.getKey()), new File(dstLocalPath)); } catch (final AmazonS3Exception ase) { log.error("Error fetching file {} from s3 due to exception {}", srcRemotePath, ase.toString()); throw new GenieServerException("Error downloading file from s3. Filename: " + srcRemotePath, ase); } MetricsUtils.addSuccessTags(tags); } catch (Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(DOWNLOAD_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImpl.java
License:Apache License
/** * {@inheritDoc}//from ww w . j a v a 2 s . c o m */ @Override public void putFile(@NotBlank(message = "Source local path cannot be empty.") final String srcLocalPath, @NotBlank(message = "Destination remote path cannot be empty") final String dstRemotePath) throws GenieException { final long start = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try { log.debug("Called with src path {} and destination path {}", srcLocalPath, dstRemotePath); final AmazonS3URI s3Uri = getS3Uri(dstRemotePath); try { this.s3ClientFactory.getClient(s3Uri).putObject(s3Uri.getBucket(), s3Uri.getKey(), new File(srcLocalPath)); } catch (final AmazonS3Exception ase) { log.error("Error posting file {} to s3 due to exception {}", dstRemotePath, ase.toString()); throw new GenieServerException("Error uploading file to s3. Filename: " + dstRemotePath, ase); } MetricsUtils.addSuccessTags(tags); } catch (Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(UPLOAD_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImpl.java
License:Apache License
/** * {@inheritDoc}//from ww w . j a va 2s .c o m */ @Override public long getLastModifiedTime(final String path) throws GenieException { final long start = System.nanoTime(); final long lastModTime; final Set<Tag> tags = Sets.newHashSet(); try { final AmazonS3URI s3Uri = this.getS3Uri(path); try { final ObjectMetadata o = this.s3ClientFactory.getClient(s3Uri).getObjectMetadata(s3Uri.getBucket(), s3Uri.getKey()); lastModTime = o.getLastModified().getTime(); } catch (final Exception ase) { final String message = String.format("Failed getting the metadata of the s3 file %s", path); log.error(message); throw new GenieServerException(message, ase); } MetricsUtils.addSuccessTags(tags); } catch (Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(GET_METADATA_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } return lastModTime; }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImpl.java
License:Apache License
@VisibleForTesting AmazonS3URI getS3Uri(final String path) throws GenieBadRequestException { if (!S3_PREFIX_PATTERN.matcher(path).matches()) { throw new GenieBadRequestException(String.format("Invalid prefix in path for s3 file %s", path)); }//from w ww .jav a 2 s . co m // Delegate validation and parsing to AmazonS3URI. // However it cannot handle "s3n://", so strip the 'n' final String adjustedPath = path.replaceFirst("^s3n://", "s3://"); final AmazonS3URI uri; try { uri = new AmazonS3URI(adjustedPath, false); } catch (IllegalArgumentException e) { throw new GenieBadRequestException(String.format("Invalid path for s3 file %s", path), e); } if (StringUtils.isBlank(uri.getBucket()) || StringUtils.isBlank(uri.getKey())) { throw new GenieBadRequestException( String.format("Invalid blank components in path for s3 file %s", path)); } final boolean bucketPassesStrictValidation = S3_BUCKET_PATTERN.matcher(uri.getBucket()).matches(); final boolean keyPassesStrictValidation = S3_KEY_PATTERN.matcher(uri.getKey()).matches(); // URL fails strict validation check! if (!bucketPassesStrictValidation || !keyPassesStrictValidation) { if (this.s3FileTransferProperties.isStrictUrlCheckEnabled()) { throw new GenieBadRequestException( String.format("Invalid bucket %s in path for s3 file %s", uri.getBucket(), path)); } else { log.warn("S3 URL fails strict validation: \"{}\"", path); this.urlFailingStrictValidationCounter.increment(); } } return uri; }