List of usage examples for com.amazonaws.services.s3.model GetObjectRequest GetObjectRequest
public GetObjectRequest(String bucketName, String key)
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void download(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util download"); File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.exists() && filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { throw new AWSUtilException("Invalid local path"); }//from w w w . ja va 2 s.com boolean fail_if_exist = false; int retryAttempt = 0; int i; String amazonFileUploadLocationOriginal = null; String keyName = null; if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); if (filepath.lastIndexOf("/") != -1) { s3folderName = filepath.substring(0, filepath.lastIndexOf("/")); keyName = filepath.substring(filepath.lastIndexOf("/") + 1); } else { keyName = filepath; } log.debug("keyName is: " + keyName); log.debug("bucket name is:" + runFileTransferEntity.getBucketName()); log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); if (s3folderName != null) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { String outputPath = runFileTransferEntity.getLocalPath(); String s1 = outputPath.substring(7, outputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File f = new File("/tmp"); if (!f.exists()) f.mkdir(); GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); String s = outputPath.substring(7, outputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path local = new Path("/tmp/" + keyName); Path hdfs = new Path(hdfspath); hdfsFileSystem.copyFromLocalFile(local, hdfs); } else { GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } } } catch (AmazonServiceException e) { log.error("Amazon Service Exception", e); if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) { Log.error("Incorrect details provided.Please provide correct details", e); throw new AWSUtilException("Incorrect details provided"); } else { Log.error("Unknown amezon exception occured", e); } } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Error e) { Log.error("Error occured while sleeping the thread"); throw new AWSUtilException(e); } catch (Exception e) { log.error("error while transfering file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { } catch (Error err) { Log.error("Error occured while downloading"); throw new AWSUtilException(err); } continue; } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) { throw new AWSUtilException("File transfer failed"); } } log.debug("Finished AWSS3Util download"); }
From source file:ics.uci.edu.amazons3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w ww.j a v a 2s .c o m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ final AmazonS3 s3 = new AmazonS3Client( new BasicAWSCredentials("AKIAJTW5BOY6EXOGV2YQ", "PDcnFYIf9Hdo9GsKTEjLXretZ3yEg4mRCDQKjxu6")); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:imperial.modaclouds.monitoring.datacollectors.monitors.DetailedCostMonitor.java
License:BSD License
@Override public void run() { String accessKeyId = null;/*from ww w . j a v a 2 s .c o m*/ String secretKey = null; ObjectListing objects = null; AmazonS3Client s3Client = null; String key = null; long startTime = 0; while (!dcmt.isInterrupted()) { if (System.currentTimeMillis() - startTime > 10000) { cost_nonspot = new HashMap<String, Double>(); cost_spot = new HashMap<String, Double>(); for (String metric : getProvidedMetrics()) { try { VM resource = new VM(Config.getInstance().getVmType(), Config.getInstance().getVmId()); if (dcAgent.shouldMonitor(resource, metric)) { Map<String, String> parameters = dcAgent.getParameters(resource, metric); accessKeyId = parameters.get("accessKey"); secretKey = parameters.get("secretKey"); bucketName = parameters.get("bucketName"); filePath = parameters.get("filePath"); period = Integer.valueOf(parameters.get("samplingTime")) * 1000; } } catch (NumberFormatException e) { e.printStackTrace(); } catch (ConfigurationException e) { e.printStackTrace(); } } startTime = System.currentTimeMillis(); AWSCredentials credentials = new BasicAWSCredentials(accessKeyId, secretKey); s3Client = new AmazonS3Client(credentials); objects = s3Client.listObjects(bucketName); key = "aws-billing-detailed-line-items-with-resources-and-tags-"; SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM"); String date = sdf.format(new Date()); key = key + date + ".csv.zip"; } String fileName = null; do { for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) { System.out.println(objectSummary.getKey() + "\t" + objectSummary.getSize() + "\t" + StringUtils.fromDate(objectSummary.getLastModified())); if (objectSummary.getKey().contains(key)) { fileName = objectSummary.getKey(); s3Client.getObject(new GetObjectRequest(bucketName, fileName), new File(filePath + fileName)); break; } } objects = s3Client.listNextBatchOfObjects(objects); } while (objects.isTruncated()); try { ZipFile zipFile = new ZipFile(filePath + fileName); zipFile.extractAll(filePath); } catch (ZipException e) { e.printStackTrace(); } String csvFileName = fileName.replace(".zip", ""); AnalyseFile(filePath + csvFileName); try { Thread.sleep(period); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } } }
From source file:io.andromeda.logcollector.S3FileReader.java
License:Apache License
@Override public Observable<String> source(String path) { ParsePath parsePath = new ParsePath(path).invoke(); if (parsePath.isAvailable()) return Observable.empty(); String bucket = parsePath.getBucket(); String key = parsePath.getKey(); return Observable.create(subscriber -> { try {/*www.j a v a 2s. c om*/ S3ObjectInputStream s3ObjectInputStream = s3.getObject(new GetObjectRequest(bucket, key)) .getObjectContent(); InputStream inputStream; if (path.endsWith(".gz")) { inputStream = new GZIPInputStream(s3ObjectInputStream); } else { inputStream = s3ObjectInputStream; } BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream)); while (!subscriber.isUnsubscribed()) { String line = reader.readLine(); if (line == null) { subscriber.onCompleted(); reader.close(); inputStream.close(); break; } subscriber.onNext(line); } } catch (IOException e) { subscriber.onError(e); } }); }
From source file:io.dockstore.common.FileProvisioning.java
License:Apache License
private void downloadFromS3(String path, String targetFilePath) { AmazonS3 s3Client = getAmazonS3Client(config); String trimmedPath = path.replace("s3://", ""); List<String> splitPathList = Lists.newArrayList(trimmedPath.split("/")); String bucketName = splitPathList.remove(0); S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, Joiner.on("/").join(splitPathList))); try {/* ww w.j a v a2 s . c o m*/ FileOutputStream outputStream = new FileOutputStream(new File(targetFilePath)); S3ObjectInputStream inputStream = object.getObjectContent(); long inputSize = object.getObjectMetadata().getContentLength(); copyFromInputStreamToOutputStream(inputStream, inputSize, outputStream); } catch (IOException e) { LOG.error(e.getMessage()); throw new RuntimeException("Could not provision input files from S3", e); } }
From source file:io.druid.firehose.s3.StaticS3FirehoseFactory.java
License:Apache License
@Override protected InputStream openObjectStream(S3ObjectSummary object, long start) throws IOException { final GetObjectRequest request = new GetObjectRequest(object.getBucketName(), object.getKey()); request.setRange(start);//from www . j a v a 2 s . c o m try { final S3Object s3Object = s3Client.getObject(request); if (s3Object == null) { throw new ISE("Failed to get an s3 object for bucket[%s], key[%s], and start[%d]", object.getBucketName(), object.getKey(), start); } return s3Object.getObjectContent(); } catch (AmazonS3Exception e) { throw new IOException(e); } }
From source file:io.druid.storage.s3.S3TaskLogs.java
License:Apache License
@Override public Optional<ByteSource> streamTaskLog(final String taskid, final long offset) throws IOException { final String taskKey = getTaskLogKey(taskid); try {//from w w w .ja va 2s . c o m final ObjectMetadata objectMetadata = service.getObjectMetadata(config.getS3Bucket(), taskKey); return Optional.of(new ByteSource() { @Override public InputStream openStream() throws IOException { try { final long start; final long end = objectMetadata.getContentLength() - 1; if (offset > 0 && offset < objectMetadata.getContentLength()) { start = offset; } else if (offset < 0 && (-1 * offset) < objectMetadata.getContentLength()) { start = objectMetadata.getContentLength() + offset; } else { start = 0; } final GetObjectRequest request = new GetObjectRequest(config.getS3Bucket(), taskKey) .withMatchingETagConstraint(objectMetadata.getETag()).withRange(start, end); return service.getObject(request).getObjectContent(); } catch (AmazonServiceException e) { throw new IOException(e); } } }); } catch (AmazonS3Exception e) { if (404 == e.getStatusCode() || "NoSuchKey".equals(e.getErrorCode()) || "NoSuchBucket".equals(e.getErrorCode())) { return Optional.absent(); } else { throw new IOE(e, "Failed to stream logs from: %s", taskKey); } } }
From source file:io.druid.storage.s3.ServerSideEncryptingAmazonS3.java
License:Apache License
public S3Object getObject(String bucket, String key) { return getObject(new GetObjectRequest(bucket, key)); }
From source file:io.konig.camel.aws.s3.DeleteObjectConsumer.java
License:Apache License
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null;/*from w w w.j a v a 2 s .com*/ pendingExchanges = 0; String fileName = getConfiguration().getFileName(); String bucketName = getConfiguration().getBucketName(); Queue<Exchange> exchanges; if (fileName != null) { LOG.trace("Getting object in bucket [{}] with file name [{}]...", bucketName, fileName); S3Object s3Object = getAmazonS3Client().getObject(new GetObjectRequest(bucketName, fileName)); exchanges = createExchanges(s3Object); } else { LOG.trace("Queueing objects in bucket [{}]...", bucketName); ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); listObjectsRequest.setBucketName(bucketName); listObjectsRequest.setPrefix(getConfiguration().getPrefix()); if (maxMessagesPerPoll > 0) { listObjectsRequest.setMaxKeys(maxMessagesPerPoll); } // if there was a marker from previous poll then use that to continue from where we left last time if (marker != null) { LOG.trace("Resuming from marker: {}", marker); listObjectsRequest.setMarker(marker); } ObjectListing listObjects = getAmazonS3Client().listObjects(listObjectsRequest); if (listObjects.isTruncated()) { marker = listObjects.getNextMarker(); LOG.trace("Returned list is truncated, so setting next marker: {}", marker); } else { // no more data so clear marker marker = null; } if (LOG.isTraceEnabled()) { LOG.trace("Found {} objects in bucket [{}]...", listObjects.getObjectSummaries().size(), bucketName); } exchanges = createExchanges(listObjects.getObjectSummaries()); } return processBatch(CastUtils.cast(exchanges)); }
From source file:io.milton.s3.AmazonS3ManagerImpl.java
License:Open Source License
@Override public boolean downloadEntity(String bucketName, String keyNotAvailable, File destinationFile) { LOG.info("Gets the object metadata for the object stored in Amazon S3 under the specified bucket " + bucketName + " and key " + keyNotAvailable + ", and saves the object contents to the specified file " + destinationFile); try {/*from ww w .j a v a 2 s. co m*/ ObjectMetadata objectMetadata = amazonS3Client .getObject(new GetObjectRequest(bucketName, keyNotAvailable), destinationFile); if (objectMetadata != null) { return true; } } catch (AmazonServiceException ase) { LOG.warn(ase.getMessage(), ase); } catch (AmazonClientException ace) { LOG.warn(ace.getMessage(), ace); } return false; }