List of usage examples for com.amazonaws.services.s3.transfer Upload waitForUploadResult
public UploadResult waitForUploadResult() throws AmazonClientException, AmazonServiceException, InterruptedException;
From source file:org.apache.hadoop.fs.s3r.S3ROutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;//w w w . j av a 2s . com } backupStream.close(); if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); } try { final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: {}", backupFile); } super.close(); closed = true; } if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' upload complete"); } }
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from w w w . j a v a 2 s. c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (ase.getStatusCode() != 404) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); s3service.copyObject(copReq); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(new PutObjectRequest(bucket, key, file)); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { if (!asyncUpload) { callback.onAbort(new AsyncUploadResult(identifier, file)); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {// ww w . j a va2 s . c o m Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata()))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Error in uploading", e); throw new DataStoreException("Error in uploading", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {//from ww w . j a v a2s. c o m Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); try { copy.waitForCopyResult(); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file))); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.storm.s3.output.BlockingTransferManagerUploader.java
License:Apache License
@Override public void upload(String bucketName, String name, InputStream input, ObjectMetadata meta) throws IOException { final Upload myUpload = tx.upload(bucketName, name, input, meta); try {/*from www. j av a2 s. c o m*/ UploadResult uploadResult = myUpload.waitForUploadResult(); LOG.info("Upload completed, bucket={}, key={}", uploadResult.getBucketName(), uploadResult.getKey()); } catch (InterruptedException e) { throw new IOException(e); } }
From source file:org.apache.streams.s3.S3OutputStreamWrapper.java
License:Apache License
private void addFile() throws Exception { InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray()); int contentLength = outputStream.size(); TransferManager transferManager = new TransferManager(amazonS3Client); ObjectMetadata metadata = new ObjectMetadata(); metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate()); metadata.setContentLength(contentLength); metadata.addUserMetadata("writer", "org.apache.streams"); for (String s : metaData.keySet()) metadata.addUserMetadata(s, metaData.get(s)); String fileNameToWrite = path + fileName; Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata); try {//from w ww. j ava 2 s . c o m upload.waitForUploadResult(); is.close(); transferManager.shutdownNow(false); LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName); } catch (Exception e) { // No Op } }
From source file:surrey.repository.impl.S3RepositoryFile.java
License:Open Source License
@Override public void write(InputStream source, long size) throws IOException { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(size);//w ww . ja va 2 s. co m Upload upload = transferManager.upload(bucketName, key, source, meta); logger.info("Uploading to S3: " + upload.getDescription()); try { upload.waitForUploadResult(); } catch (Exception e) { logger.error("Failed to upload: " + upload.getDescription() + "\n" + e, e); } }
From source file:surrey.repository.impl.S3RepositoryFile.java
License:Open Source License
@Override public void write(File source) throws IOException { Upload upload = transferManager.upload(bucketName, key, source); logger.info("Uploading to S3: " + upload.getDescription()); try {// www.j ava 2 s.c o m upload.waitForUploadResult(); } catch (Exception e) { logger.error("Failed to upload: " + upload.getDescription() + "\n" + e, e); } }