List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata
public ObjectMetadata getObjectMetadata()
From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java
License:MIT License
@Override public List<BinaryInfo> list(StorageInfo storage, BinaryInfo binary) throws StoreException { logger.debug("={}", storage); logger.debug("?={}", binary); List<BinaryInfo> objInfoList = new ArrayList<BinaryInfo>(); AmazonS3 s3client = getS3Client(binary.getBucketName()); try {//from w w w . j av a2s .c om logger.debug("Listing binaries"); final ListObjectsV2Request req = new ListObjectsV2Request().withBucketName(binary.getBucketName()) .withMaxKeys(2); ListObjectsV2Result result; do { result = s3client.listObjectsV2(req); for (S3ObjectSummary binarySummary : result.getObjectSummaries()) { logger.debug(" - {}(size={})", binarySummary.getKey(), binarySummary.getSize()); if (binarySummary.getSize() != 0) { BinaryInfo objInfo = new BinaryInfo(binary.getBucketName()); objInfo.setFileName(binarySummary.getKey()); objInfo.setSize(binarySummary.getSize()); S3Object s3Object = s3client .getObject(new GetObjectRequest(binary.getBucketName(), binarySummary.getKey())); objInfo.setContentType(s3Object.getObjectMetadata().getContentType()); objInfo.setUrl(s3client.getUrl(binary.getBucketName(), binarySummary.getKey()).toString()); logger.debug("Generating pre-signed URL."); URL PresignedUrl = getPresignedUrl(s3client, binary.getBucketName(), binarySummary.getKey()); objInfo.setPresignedUrl(PresignedUrl.toString()); logger.debug("Pre-Signed URL = " + PresignedUrl.toString()); objInfoList.add(objInfo); } } logger.debug("Next Continuation Token : " + result.getNextContinuationToken()); req.setContinuationToken(result.getNextContinuationToken()); } while (result.isTruncated() == true); } catch (AmazonServiceException ase) { throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.LIST_FAIL, ase, binary.getFileName()); } catch (AmazonClientException ace) { throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.LIST_FAIL, ace, binary.getFileName()); } logger.info(" ={}", objInfoList.size()); return objInfoList; }
From source file:com.imos.sample.S3SampleCheck.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w ww. jav a 2 s . co m*/ * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/home/alok/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/alok/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); // Region usWest2 = Region.getRegion(Regions.US_WEST_2); Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_1); s3.setRegion(usWest2); String bucketName = "alok-test"; String key = "sample.json"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ // System.out.println("Creating bucket " + bucketName + "\n"); // s3.createBucket(bucketName); /* * List the buckets in your account */ // System.out.println("Listing buckets"); // for (Bucket bucket : s3.listBuckets()) { // System.out.println(" - " + bucket.getName()); // } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); //s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); // S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); S3Object object = s3.getObject(new GetObjectRequest("alok-test", key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3.listObjects(new ListObjectsRequest() // .withBucketName(bucketName) .withBucketName("alok-test")); // .withPrefix("My")); objectListing.getObjectSummaries().forEach((objectSummary) -> { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); }); System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.intuit.s3encrypt.S3Encrypt.java
License:Open Source License
private static String inspectS3Object(CommandLine cmd, AmazonS3EncryptionClient s3, String bucket, String filename, String keyname) { System.out.println("Supposed to inspect the BUCKET = " + bucket + " OBJECT = " + filename); S3Object s3object = s3.getObject(new GetObjectRequest(bucket, filename)); String metadata = s3object.getObjectMetadata().getUserMetadata().get(keyname); return metadata; }
From source file:com.jfixby.scarabei.red.aws.test.S3Sample.java
License:Open Source License
public static void main(final String[] args) throws IOException { /*/*ww w . jav a 2 s . com*/ * The ProfileCredentialsProvider will return your [default] credential profile by reading from the credentials file located * at (C:\\Users\\JCode\\.aws\\credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (final Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\%USERNAME%\\.aws\\credentials), and is in valid format.", e); } final AmazonS3 s3 = new AmazonS3Client(credentials); final Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); final String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); final String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, so once a bucket name has been taken by any user, * you can't create another bucket with that same name. * * You can optionally specify a location for your bucket if you want to keep your data closer to your applications or * users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (final Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to S3, or upload directly an InputStream if you know * the length of the data in the stream. You can also specify your own metadata when uploading to S3, which allows you * set a variety of options like content-type and content-encoding, plus additional metadata specific to your * applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of the object's metadata and a stream from which to read * the contents. It's important to read the contents of the stream as quickly as possibly since the data is streamed * directly from Amazon S3 and your network connection will remain open until you read all the data or close the input * stream. * * GetObjectRequest also supports several other options, including conditional downloading of objects based on * modification times, ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); final S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for listing the objects in your bucket. Keep in mind * that buckets with many objects might truncate their results when listing their objects, so be sure to check if the * returned object listing is truncated, and use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); final ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (final S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, there is no way to undelete an object, so use * caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be deleted, so remember to delete any objects from * your buckets before you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (final AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (final AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.liferay.portal.store.s3.S3FileCacheImpl.java
License:Open Source License
@Override public File getCacheFile(S3Object s3Object, String fileName) throws IOException { StringBundler sb = new StringBundler(4); sb.append(getCacheDirName());/*from ww w . j a v a 2s.co m*/ sb.append(DateUtil.getCurrentDate(_CACHE_DIR_PATTERN, LocaleUtil.getDefault())); sb.append(_s3KeyTransformer.getNormalizedFileName(fileName)); ObjectMetadata objectMetadata = s3Object.getObjectMetadata(); Date lastModifiedDate = objectMetadata.getLastModified(); sb.append(lastModifiedDate.getTime()); String cacheFileName = sb.toString(); File cacheFile = new File(cacheFileName); try (InputStream inputStream = s3Object.getObjectContent()) { if (cacheFile.exists() && (cacheFile.lastModified() >= lastModifiedDate.getTime())) { return cacheFile; } if (inputStream == null) { throw new IOException("S3 object input stream is null"); } File parentFile = cacheFile.getParentFile(); FileUtil.mkdirs(parentFile); try (OutputStream outputStream = new FileOutputStream(cacheFile)) { StreamUtil.transfer(inputStream, outputStream); } } return cacheFile; }
From source file:com.lithium.flow.filer.S3Filer.java
License:Apache License
@Override public void setFileTime(@Nonnull String path, long time) throws IOException { S3Object object = s3.getObject(bucket, path.substring(1)); ObjectMetadata metadata = object.getObjectMetadata(); metadata.setLastModified(new Date(time)); object.setObjectMetadata(metadata);/* ww w.j ava2 s .c o m*/ }
From source file:com.mateusz.mateuszsqs.SQSConfig.java
public static void processFile(String key, AmazonS3 s3, String bucketName) throws IOException { System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key);/*from w w w . j a va 2 s . com*/ System.out.println("Processing..."); System.out.println("Uploading a new object to S3 from a file\n"); InputStream changedStream = procesIamge(object.getObjectContent()); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(changedStream.available()); metadata.setLastModified(new Date(System.currentTimeMillis())); s3.putObject(new PutObjectRequest(bucketName, key, changedStream, metadata)); }
From source file:com.naryx.tagfusion.cfm.tag.cfCONTENT.java
License:Open Source License
/** * Fetchs a remote object from S3; datasource, bucket, key, aes256key supported * //from w ww .ja v a 2 s .co m * @param props * @param _Session * @throws cfmRunTimeException */ private void remoteFetchS3(cfStructData props, cfSession _Session) throws cfmRunTimeException { if (!props.containsKey("datasource") || !props.containsKey("bucket") || !props.containsKey("key")) throw newRunTimeException("'remote'.type=s3; minimum keys are datasource, bucket and key"); String datasource = props.getData("datasource").getString(); String bucket = props.getData("bucket").getString(); String key = props.getData("key").getString(); // Get the Amazon datasource AmazonKey amazonKey = AmazonKeyFactory.getDS(datasource); if (amazonKey == null) throw newRunTimeException("Amazon Datasource [" + datasource + "] has not been registered; use AmazonRegisterDataSource()"); amazonKey.setDataSource(datasource); AmazonS3 s3Client = new AmazonBase().getAmazonS3(amazonKey); GetObjectRequest gor = new GetObjectRequest(bucket, key); if (props.containsKey("aes256key")) { String aes256key = props.getData("aes256key").getString(); if (!aes256key.isEmpty()) gor.setSSECustomerKey(new SSECustomerKey(aes256key)); } // Get the object try { S3Object s3object = s3Client.getObject(gor); _Session.setContentType(s3object.getObjectMetadata().getContentType()); InputStream in = s3object.getObjectContent(); byte[] buffer = new byte[65536]; int readCount = 0; while ((readCount = in.read(buffer)) != -1) { _Session.write(buffer, 0, readCount); _Session.pageFlush(); } } catch (Exception e) { if (e.getMessage().indexOf("404") != -1) { _Session.setStatus(404); return; } else { cfEngine.log(e.getMessage()); throw newRunTimeException(e.getMessage() + "; key=" + key + "; bucket=" + bucket); } } }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Restore.java
License:Apache License
/** * Uses the Amazon S3 API to restore from S3 *///from w ww.j a v a 2 s . co m @Override public boolean restoreData(String dateString) { long time = restoreTime(dateString); if (time > -1) { logger.info("Restoring data from S3."); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { /* construct the key for the backup data */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + time; logger.info("S3 Bucket Name: " + config.getBucketName()); logger.info("Key in Bucket: " + keyName); // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); } else { S3Object s3object = s3Client.getObject(new GetObjectRequest(config.getBucketName(), keyName)); logger.info("Content-Type: " + s3object.getObjectMetadata().getContentType()); String filepath = null; if (config.isAof()) { filepath = config.getPersistenceLocation() + "/appendonly.aof"; } else { filepath = config.getPersistenceLocation() + "/nfredis.rdb"; } IOUtils.copy(s3object.getObjectContent(), new FileOutputStream(new File(filepath))); } return true; } catch (AmazonServiceException ase) { logger.error("AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); } catch (IOException io) { logger.error("File storing error: " + io.getMessage()); } } else { logger.error("Date in FP: " + dateString); } return false; }
From source file:com.netflix.exhibitor.core.backup.s3.MockS3Client.java
License:Apache License
public MockS3Client(S3Object object, ObjectListing listing) { if (object != null) { S3Object value = new S3Object(); value.setKey(object.getKey());/*from w ww .ja v a2 s .c o m*/ value.setObjectMetadata(object.getObjectMetadata()); value.setObjectContent(object.getObjectContent()); uploads.put(object.getKey(), value); } this.listing = listing; }