List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata
public ObjectMetadata getObjectMetadata()
From source file:com.nike.cerberus.operation.core.EnableConfigReplicationOperation.java
License:Apache License
private void touchCurrentFiles() { final String bucketName = environmentMetadata.getBucketName(); final ObjectListing objectListing = s3Client.listObjects(bucketName); logger.info("Touching config files that already exist so they are replicated."); objectListing.getObjectSummaries().forEach(os -> { if (!StringUtils.startsWith(os.getKey(), "consul")) { logger.debug("Touching {}.", os.getKey()); final S3Object object = s3Client.getObject(bucketName, os.getKey()); s3Client.putObject(bucketName, object.getKey(), object.getObjectContent(), object.getObjectMetadata()); }/*from www. java 2 s .c o m*/ }); }
From source file:com.sangupta.urn.service.impl.AmazonS3UrnStorageServiceImpl.java
License:Apache License
@Override protected UrnObject get(String objectKey) { S3Object object = this.client.getObject(this.bucketName, objectKey); if (object == null) { return null; }/*from w w w . ja v a 2 s. c o m*/ try { InputStream stream = object.getObjectContent(); byte[] bytes = IOUtils.toByteArray(stream); UrnObject urnObject = new UrnObject(objectKey, bytes); // TODO: read and populate metadata ObjectMetadata metadata = object.getObjectMetadata(); if (metadata != null) { if (metadata.getHttpExpiresDate() != null) { urnObject.expiry = metadata.getHttpExpiresDate().getTime(); } urnObject.mime = metadata.getContentType(); urnObject.stored = metadata.getLastModified().getTime(); // TODO:parse the value to extract the filename if available urnObject.name = metadata.getContentDisposition(); } // return the object return urnObject; } catch (IOException e) { // happens when we cannot read data from S3 LOGGER.debug("Exception reading data from S3 for object key: " + objectKey, e); return null; } finally { if (object != null) { try { object.close(); } catch (IOException e) { LOGGER.warn("Unable to close S3 object during/after reading the object"); } } } }
From source file:com.shareplaylearn.models.UserItemManager.java
License:Open Source License
public Response getItem(String contentType, ItemSchema.PresentationType presentationType, String name, String encoding) {/* w w w .j a v a2s .c o m*/ if (encoding != null && encoding.length() > 0 && !AvailableEncodings.isAvailable(encoding)) { return Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE) .entity("Inner Encoding Type: " + encoding + " not available").build(); } AmazonS3Client s3Client = new AmazonS3Client( new BasicAWSCredentials(SecretsService.amazonClientId, SecretsService.amazonClientSecret)); try { S3Object object = s3Client.getObject(ItemSchema.S3_BUCKET, getItemLocation(name, contentType, presentationType)); try (S3ObjectInputStream inputStream = object.getObjectContent()) { long contentLength = object.getObjectMetadata().getContentLength(); if (contentLength > Limits.MAX_RETRIEVE_SIZE) { throw new IOException("Object is to large: " + contentLength + " bytes."); } int bufferSize = Math.min((int) contentLength, 10 * 8192); byte[] buffer = new byte[bufferSize]; ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); int bytesRead = 0; int totalBytesRead = 0; while ((bytesRead = inputStream.read(buffer)) > 0) { outputStream.write(buffer, 0, bytesRead); totalBytesRead += bytesRead; } log.debug("GET in file resource read: " + totalBytesRead + " bytes."); if (encoding == null || encoding.length() == 0 || encoding.equals(AvailableEncodings.IDENTITY)) { return Response.status(Response.Status.OK).entity(outputStream.toByteArray()).build(); } else if (encoding.equals(AvailableEncodings.BASE64)) { return Response.status(Response.Status.OK) .entity(Base64.encodeAsString(outputStream.toByteArray())).build(); } else { return Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE) .entity("Inner Encoding Type: " + encoding + " not available").build(); } } } catch (Exception e) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); pw.println("\nFailed to retrieve: " + name); e.printStackTrace(pw); log.warn("Failed to retrieve: " + name); log.info(Exceptions.asString(e)); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(sw.toString()).build(); } }
From source file:com.shelfmap.simplequery.domain.impl.DefaultBlobReference.java
License:Apache License
@Override public InputStream getInputStream() { S3Object resource = getS3ObjectRemote(); this.metadata = resource.getObjectMetadata(); return resource.getObjectContent(); }
From source file:com.shelfmap.simplequery.domain.impl.DefaultBlobReference.java
License:Apache License
@Override public ObjectMetadata getObjectMetadata() { if (this.metadata == null) { S3Object resource = getS3ObjectRemote(); this.metadata = resource.getObjectMetadata(); }//from w ww . j av a 2 s. c o m return this.metadata; }
From source file:com.sjsu.faceit.example.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w . j ava2 s.c o m*/ * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ System.out.println(new File(".").getAbsolutePath()); AmazonS3 s3 = new AmazonS3Client( new PropertiesCredentials(S3Sample.class.getResourceAsStream("AwsCredentials.properties"))); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, "abc/" + key, new File("/Users/prayag/Desktop/2.jpg"))); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, "abc/" + key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.smoketurner.pipeline.application.core.AmazonS3Downloader.java
License:Apache License
/** * Retrieves a file from S3//from ww w .j av a 2s.c o m * * @param record * S3 event notification record to download * @return S3 object * @throws AmazonS3ConstraintException * if the etag constraints weren't met * @throws AmazonS3ZeroSizeException * if the file size of the object is zero */ public S3Object fetch(@Nonnull final S3EventNotificationRecord record) throws AmazonS3ConstraintException, AmazonS3ZeroSizeException { final AmazonS3Object object = converter.convert(Objects.requireNonNull(record)); final GetObjectRequest request = new GetObjectRequest(object.getBucketName(), object.getKey()); object.getVersionId().ifPresent(request::setVersionId); object.getETag().ifPresent(etag -> request.setMatchingETagConstraints(Collections.singletonList(etag))); LOGGER.debug("Fetching key: {}/{}", object.getBucketName(), object.getKey()); final S3Object download; try { download = s3.getObject(request); } catch (AmazonServiceException e) { LOGGER.error("Service error while fetching object from S3", e); throw e; } catch (AmazonClientException e) { LOGGER.error("Client error while fetching object from S3", e); throw e; } if (download == null) { LOGGER.error("eTag from object did not match for key: {}/{}", object.getBucketName(), object.getKey()); throw new AmazonS3ConstraintException(object.getKey()); } final long contentLength = download.getObjectMetadata().getContentLength(); if (contentLength < 1) { try { download.close(); } catch (IOException e) { LOGGER.error(String.format("Failed to close S3 stream for key: %s/%s", download.getBucketName(), download.getKey()), e); } LOGGER.debug("Object size is zero for key: {}/{}", download.getBucketName(), download.getKey()); throw new AmazonS3ZeroSizeException(object.getKey()); } LOGGER.debug("Streaming key ({} bytes): {}/{}", contentLength, download.getBucketName(), download.getKey()); return download; }
From source file:com.smoketurner.pipeline.application.core.AmazonS3Downloader.java
License:Apache License
/** * Determine whether the object is gzipped or not by inspecting the * ContentEncoding object property or whether the key ends in .gz * //from w w w . j a v a 2 s .co m * @param object * S3 object to inspect * @return true if the file is gzipped, otherwise false */ public static boolean isGZipped(@Nullable final S3Object object) { if (object == null) { return false; } final String encoding = Strings.nullToEmpty(object.getObjectMetadata().getContentEncoding()); if (GZIP_ENCODING.equalsIgnoreCase(encoding.trim())) { return true; } final String key = Strings.nullToEmpty(object.getKey()); return key.trim().toLowerCase().endsWith(GZIP_EXTENSION); }
From source file:com.springboot.demo.framework.aws.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w .jav a 2 s . c o m*/ * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials basicCredentials = new BasicAWSCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY); AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } /* * Create S3 Client */ AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Returns an URL for the object stored in the specified bucket and key */ URL url = s3.getUrl(bucketName, key); System.out.println("upload file url : " + url.toString()); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.streamsets.pipeline.stage.origin.s3.AmazonS3Runnable.java
License:Apache License
private void handleWholeFileDataFormat(S3ObjectSummary s3ObjectSummary, String recordId) throws StageException { S3Object partialS3ObjectForMetadata; //partialObject with fetchSize 1 byte. //This is mostly used for extracting metadata and such. partialS3ObjectForMetadata = AmazonS3Util.getObjectRange(s3Client, s3ConfigBean.s3Config.bucket, s3ObjectSummary.getKey(), 1, s3ConfigBean.sseConfig.useCustomerSSEKey, s3ConfigBean.sseConfig.customerKey, s3ConfigBean.sseConfig.customerKeyMd5); S3FileRef.Builder s3FileRefBuilder = new S3FileRef.Builder().s3Client(s3Client) .s3ObjectSummary(s3ObjectSummary).useSSE(s3ConfigBean.sseConfig.useCustomerSSEKey) .customerKey(s3ConfigBean.sseConfig.customerKey) .customerKeyMd5(s3ConfigBean.sseConfig.customerKeyMd5) .bufferSize((int) dataParser.suggestedWholeFileBufferSize()).createMetrics(true) .totalSizeInBytes(s3ObjectSummary.getSize()).rateLimit(dataParser.wholeFileRateLimit()); if (dataParser.isWholeFileChecksumRequired()) { s3FileRefBuilder.verifyChecksum(true).checksumAlgorithm(HashingUtil.HashType.MD5) //128 bit hex encoded md5 checksum. .checksum(partialS3ObjectForMetadata.getObjectMetadata().getETag()); }//from w w w . j av a2s . c o m Map<String, Object> metadata = AmazonS3Util.getMetaData(partialS3ObjectForMetadata); metadata.put(S3Constants.BUCKET, s3ObjectSummary.getBucketName()); metadata.put(S3Constants.OBJECT_KEY, s3ObjectSummary.getKey()); metadata.put(S3Constants.OWNER, s3ObjectSummary.getOwner()); metadata.put(S3Constants.SIZE, s3ObjectSummary.getSize()); metadata.put(HeaderAttributeConstants.FILE_NAME, s3ObjectSummary.getKey()); metadata.remove(S3Constants.CONTENT_LENGTH); parser = dataParser.getParser(recordId, metadata, s3FileRefBuilder.build()); //Object is assigned so that setHeaders() function can use this to get metadata //information about the object object = partialS3ObjectForMetadata; }