List of usage examples for com.amazonaws.services.s3.transfer Upload getDescription
public String getDescription();
From source file:aws.example.s3.XferMgrProgress.java
License:Open Source License
public static void showMultiUploadProgress(MultipleFileUpload multi_upload) { // print the upload's human-readable description System.out.println(multi_upload.getDescription()); Collection<? extends Upload> sub_xfers = new ArrayList<Upload>(); sub_xfers = multi_upload.getSubTransfers(); do {/*from w w w .java 2s . co m*/ System.out.println("\nSubtransfer progress:\n"); for (Upload u : sub_xfers) { System.out.println(" " + u.getDescription()); if (u.isDone()) { TransferState xfer_state = u.getState(); System.out.println(" " + xfer_state); } else { TransferProgress progress = u.getProgress(); double pct = progress.getPercentTransferred(); printProgressBar(pct); System.out.println(); } } // wait a bit before the next update. try { Thread.sleep(200); } catch (InterruptedException e) { return; } } while (multi_upload.isDone() == false); // print the final state of the transfer. TransferState xfer_state = multi_upload.getState(); System.out.println("\nMultipleFileUpload " + xfer_state); }
From source file:ch.entwine.weblounge.maven.S3DeployMojo.java
License:Open Source License
/** * /*from ww w .ja v a 2s.c om*/ * {@inheritDoc} * * @see org.apache.maven.plugin.Mojo#execute() */ public void execute() throws MojoExecutionException, MojoFailureException { // Setup AWS S3 client AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3Client uploadClient = new AmazonS3Client(credentials); TransferManager transfers = new TransferManager(credentials); // Make sure key prefix does not start with a slash but has one at the // end if (keyPrefix.startsWith("/")) keyPrefix = keyPrefix.substring(1); if (!keyPrefix.endsWith("/")) keyPrefix = keyPrefix + "/"; // Keep track of how much data has been transferred long totalBytesTransferred = 0L; int items = 0; Queue<Upload> uploads = new LinkedBlockingQueue<Upload>(); try { // Check if S3 bucket exists getLog().debug("Checking whether bucket " + bucket + " exists"); if (!uploadClient.doesBucketExist(bucket)) { getLog().error("Desired bucket '" + bucket + "' does not exist!"); return; } getLog().debug("Collecting files to transfer from " + resources.getDirectory()); List<File> res = getResources(); for (File file : res) { // Make path of resource relative to resources directory String filename = file.getName(); String extension = FilenameUtils.getExtension(filename); String path = file.getPath().substring(resources.getDirectory().length()); String key = concat("/", keyPrefix, path).substring(1); // Delete old file version in bucket getLog().debug("Removing existing object at " + key); uploadClient.deleteObject(bucket, key); // Setup meta data ObjectMetadata meta = new ObjectMetadata(); meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600)); FileInputStream fis = null; GZIPOutputStream gzipos = null; final File fileToUpload; if (gzip && ("js".equals(extension) || "css".equals(extension))) { try { fis = new FileInputStream(file); File gzFile = File.createTempFile(file.getName(), null); gzipos = new GZIPOutputStream(new FileOutputStream(gzFile)); IOUtils.copy(fis, gzipos); fileToUpload = gzFile; meta.setContentEncoding("gzip"); if ("js".equals(extension)) meta.setContentType("text/javascript"); if ("css".equals(extension)) meta.setContentType("text/css"); } catch (FileNotFoundException e) { getLog().error(e); continue; } catch (IOException e) { getLog().error(e); continue; } finally { IOUtils.closeQuietly(fis); IOUtils.closeQuietly(gzipos); } } else { fileToUpload = file; } // Do a random check for existing errors before starting the next upload if (erroneousUpload != null) break; // Create put object request long bytesToTransfer = fileToUpload.length(); totalBytesTransferred += bytesToTransfer; PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload); request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer)); request.setMetadata(meta); // Schedule put object request getLog().info( "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")"); Upload upload = transfers.upload(request); uploads.add(upload); items++; } } catch (AmazonServiceException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } catch (AmazonClientException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } // Wait for uploads to be finished String currentUpload = null; try { Thread.sleep(1000); getLog().info("Waiting for " + uploads.size() + " uploads to finish..."); while (!uploads.isEmpty()) { Upload upload = uploads.poll(); currentUpload = upload.getDescription().substring("Uploading to ".length()); if (TransferState.InProgress.equals(upload.getState())) getLog().debug("Waiting for upload " + currentUpload + " to finish"); upload.waitForUploadResult(); } } catch (AmazonServiceException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (AmazonClientException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (InterruptedException e) { getLog().debug("Interrupted while waiting for upload to finish"); } // Check for errors that happened outside of the actual uploading if (erroneousUpload != null) { throw new MojoExecutionException("Error while uploading " + erroneousUpload); } getLog().info("Deployed " + items + " files (" + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket); }
From source file:com.davidsoergel.s3napback.S3ops.java
License:Apache License
public static void upload(TransferManager tx, String bucket, String filename, int chunkSize) throws InterruptedException, IOException { //throw new NotImplementedException(); // break input stream into chunks // fully read each chunk into memory before sending, in order to know the size and the md5 // ** prepare the next chunk while the last is sending; need to deal with multithreading properly // ** 4 concurrent streams? InputStream in = new BufferedInputStream(System.in); int chunkNum = 0; while (in.available() > 0) { byte[] buf = new byte[chunkSize]; int bytesRead = in.read(buf); String md5 = new MD5(buf); // presume AWS does its own buffering, no need for BufferedInputStream (?) ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(bytesRead); meta.setContentMD5(md5);/* w w w . j a v a 2s .c o m*/ Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta); UploadResult result = myUpload.waitForUploadResult(); while (myUpload.isDone() == false) { System.out.println("Transfer: " + myUpload.getDescription()); System.out.println(" - State: " + myUpload.getState()); System.out.println(" - Progress: " + myUpload.getProgress().getBytesTransfered()); // Do work while we wait for our upload to complete... Thread.sleep(500); } } }
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName, final InputStream inputStream, final CannedAccessControlList cannedAcl) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info(/*from w w w.j av a 2s .c o m*/ "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}", bucketName, fileName, cannedAcl); File tempFile = null; PutObjectRequest putObjectRequest = null; Upload upload = null; try { // Create temporary file from stream to avoid 'out of memory' exception tempFile = AWSUtil.createTempFileFromStream(inputStream); putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile).withCannedAcl(cannedAcl); final TransferManager transferMgr = new TransferManager(s3client); upload = transferMgr.upload(putObjectRequest); // You can poll your transfer's status to check its progress if (upload.isDone()) { LOGGER.info("Start: {} , State: {} and Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); } // Add progressListener to listen asynchronous notifications about your transfer's progress // Uncomment below code snippet during development /*upload.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent event) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred()); } } });*/ try { //Block the current thread and wait for completion //If the transfer fails AmazonClientException will be thrown upload.waitForCompletion(); } catch (AmazonClientException | InterruptedException excp) { LOGGER.error("Exception occured while waiting for transfer: ", excp); } } finally { AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded } LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); return upload.isDone(); }
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName, final InputStream inputStream, final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info(/*from w ww .j a v a 2 s .com*/ "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName, fileName, isPublicAccessible); File tempFile = null; PutObjectRequest putObjectRequest = null; Upload upload = null; try { // Create temporary file from stream to avoid 'out of memory' exception tempFile = AWSUtil.createTempFileFromStream(inputStream); putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile); if (isPublicAccessible) { putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); } final TransferManager transferMgr = new TransferManager(s3client); upload = transferMgr.upload(putObjectRequest); // You can poll your transfer's status to check its progress if (upload.isDone()) { LOGGER.info("Start: {} , State: {} and Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); } // Add progressListener to listen asynchronous notifications about your transfer's progress // Uncomment below code snippet during development /*upload.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent event) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred()); } } });*/ try { //Block the current thread and wait for completion //If the transfer fails AmazonClientException will be thrown upload.waitForCompletion(); } catch (AmazonClientException | InterruptedException excp) { LOGGER.error("Exception occured while waiting for transfer: ", excp); } } finally { AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded } LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); return upload.isDone(); }
From source file:surrey.repository.impl.S3RepositoryFile.java
License:Open Source License
@Override public void write(InputStream source, long size) throws IOException { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(size);//from w w w .j a v a 2 s. c o m Upload upload = transferManager.upload(bucketName, key, source, meta); logger.info("Uploading to S3: " + upload.getDescription()); try { upload.waitForUploadResult(); } catch (Exception e) { logger.error("Failed to upload: " + upload.getDescription() + "\n" + e, e); } }
From source file:surrey.repository.impl.S3RepositoryFile.java
License:Open Source License
@Override public void write(File source) throws IOException { Upload upload = transferManager.upload(bucketName, key, source); logger.info("Uploading to S3: " + upload.getDescription()); try {/* w ww. j a va 2s.c om*/ upload.waitForUploadResult(); } catch (Exception e) { logger.error("Failed to upload: " + upload.getDescription() + "\n" + e, e); } }