List of usage examples for com.amazonaws.services.s3.transfer TransferManager setConfiguration
@Deprecated public void setConfiguration(TransferManagerConfiguration configuration)
TransferManager
processes requests. From source file:com.emc.ecs.sync.target.S3Target.java
License:Open Source License
protected void putObject(SyncObject obj, String targetKey) { ObjectMetadata om = AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata()); if (obj.isDirectory()) om.setContentType(AwsS3Util.TYPE_DIRECTORY); PutObjectRequest req;/*from ww w .j av a 2 s .c o m*/ if (obj.isDirectory()) { req = new PutObjectRequest(bucketName, targetKey, new ByteArrayInputStream(new byte[0]), om); } else if (obj instanceof FileSyncObject) { req = new PutObjectRequest(bucketName, targetKey, ((FileSyncObject) obj).getRawSourceIdentifier()); } else { req = new PutObjectRequest(bucketName, targetKey, obj.getInputStream(), om); } if (includeAcl) req.setAccessControlList(AwsS3Util.s3AclFromSyncAcl(obj.getMetadata().getAcl(), ignoreInvalidAcls)); // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary, // and abort if it fails TransferManagerConfiguration xferConfig = new TransferManagerConfiguration(); xferConfig.setMultipartUploadThreshold((long) mpuThresholdMB * 1024 * 1024); xferConfig.setMinimumUploadPartSize((long) mpuPartSizeMB * 1024 * 1024); TransferManager xferManager = new TransferManager(s3, Executors.newFixedThreadPool(mpuThreadCount)); xferManager.setConfiguration(xferConfig); Upload upload = xferManager.upload(req); try { log.debug("Wrote {}, etag: {}", targetKey, upload.waitForUploadResult().getETag()); } catch (InterruptedException e) { throw new RuntimeException("upload thread was interrupted", e); } finally { // make sure bytes read is accurate if we bypassed the counting stream if (obj instanceof FileSyncObject) { try { ((FileSyncObject) obj).setOverrideBytesRead(upload.getProgress().getBytesTransferred()); } catch (Throwable t) { log.warn("could not get bytes transferred from upload", t); } } } }
From source file:com.liferay.portal.store.s3.S3Store.java
License:Open Source License
protected TransferManager getTransferManager(AmazonS3 amazonS3) { ExecutorService executorService = new ThreadPoolExecutor(_s3StoreConfiguration.corePoolSize(), _s3StoreConfiguration.maxPoolSize()); TransferManager transferManager = new TransferManager(amazonS3, executorService, false); TransferManagerConfiguration transferManagerConfiguration = new TransferManagerConfiguration(); transferManagerConfiguration.setMinimumUploadPartSize(_s3StoreConfiguration.minimumUploadPartSize()); transferManagerConfiguration.setMultipartUploadThreshold(_s3StoreConfiguration.multipartUploadThreshold()); transferManager.setConfiguration(transferManagerConfiguration); return transferManager; }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name./*from w ww . j av a2s . c o m*/ * * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be * used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } LOG.info("Copying local file from " + src + " to " + dst); // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); }//from ww w.jav a 2 s . c o m TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMultipartCopyPartSize(partSize); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } }
From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;/*w w w . j av a 2s . c o m*/ } backupStream.close(); LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); try { TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(client); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: " + backupFile); } super.close(); closed = true; } LOG.info("OutputStream for key '" + key + "' upload complete"); }