List of usage examples for com.amazonaws.services.s3.model CopyObjectRequest CopyObjectRequest
public CopyObjectRequest(String sourceBucketName, String sourceKey, String destinationBucketName,
String destinationKey)
Constructs with basic options.
From source file:org.apache.beam.sdk.io.aws.s3.S3FileSystem.java
License:Apache License
@VisibleForTesting CopyObjectResult atomicCopy(S3ResourceId sourcePath, S3ResourceId destinationPath, ObjectMetadata sourceObjectMetadata) throws AmazonClientException { CopyObjectRequest copyObjectRequest = new CopyObjectRequest(sourcePath.getBucket(), sourcePath.getKey(), destinationPath.getBucket(), destinationPath.getKey()); copyObjectRequest.setNewObjectMetadata(sourceObjectMetadata); copyObjectRequest.setStorageClass(options.getS3StorageClass()); copyObjectRequest.setSourceSSECustomerKey(options.getSSECustomerKey()); copyObjectRequest.setDestinationSSECustomerKey(options.getSSECustomerKey()); return amazonS3.get().copyObject(copyObjectRequest); }
From source file:org.apache.druid.storage.s3.S3DataSegmentMover.java
License:Apache License
/** * Copies an object and after that checks that the object is present at the target location, via a separate API call. * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check * is added after it was observed that S3 may report a successful move, and the object is not found at the target * location.//from w ww . j ava2 s . c o m */ private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path, String copyMsg) throws IOException, SegmentLoadingException { if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) { log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path); return; } if (s3Client.doesObjectExist(s3Bucket, s3Path)) { final ListObjectsV2Result listResult = s3Client.listObjectsV2( new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1)); // Using getObjectSummaries().size() instead of getKeyCount as, in some cases // it is observed that even though the getObjectSummaries returns some data // keyCount is still zero. if (listResult.getObjectSummaries().size() == 0) { // should never happen throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path); } final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0); if (objectSummary.getStorageClass() != null && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass())) .equals(StorageClass.Glacier)) { throw new AmazonServiceException(StringUtils.format( "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path)); } else { log.info("Moving file %s", copyMsg); final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket, targetS3Path); if (!config.getDisableAcl()) { copyRequest .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket)); } s3Client.copyObject(copyRequest); if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) { throw new IOE( "After copy was reported as successful the file doesn't exist in the target location [%s]", copyMsg); } deleteWithRetriesSilent(s3Bucket, s3Path); log.debug("Finished moving file %s", copyMsg); } } else { // ensure object exists in target location if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) { log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket, s3Path, targetS3Bucket, targetS3Path); } else { throw new SegmentLoadingException( "Unable to move file %s, not present in either source or target location", copyMsg); } } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); }/*from w ww . j ava 2 s . com*/ TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMultipartCopyPartSize(partSize); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); }/*from ww w . ja v a 2s . c o m*/ ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
@Override public boolean exists(DataIdentifier identifier, boolean touch) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; boolean retVal = false; ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from w ww.ja v a2 s . c om*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); objectMetaData = s3service.getObjectMetadata(bucket, key); if (objectMetaData != null) { retVal = true; if (touch) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); s3service.copyObject(copReq); LOG.debug("[{}] touched took [{}] ms. ", identifier, (System.currentTimeMillis() - start)); } } else { retVal = false; } } catch (AmazonServiceException e) { if (e.getStatusCode() == 404) { retVal = false; } else { throw new DataStoreException("Error occured to find exists for key [" + identifier.toString() + "]", e); } } catch (Exception e) { throw new DataStoreException("Error occured to find exists for key " + identifier.toString(), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("exists [{}]: [{}] took [{}] ms.", new Object[] { identifier, retVal, (System.currentTimeMillis() - start) }); return retVal; }
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from w ww. ja v a2 s . c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (ase.getStatusCode() != 404) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); s3service.copyObject(copReq); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(new PutObjectRequest(bucket, key, file)); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { if (!asyncUpload) { callback.onAbort(new AsyncUploadResult(identifier, file)); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public boolean exists(DataIdentifier identifier, boolean touch) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; boolean retVal = false; ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from ww w .ja v a 2 s. c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); objectMetaData = s3service.getObjectMetadata(bucket, key); if (objectMetaData != null) { retVal = true; if (touch) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); copy.waitForCopyResult(); LOG.debug("[{}] touched took [{}] ms. ", identifier, (System.currentTimeMillis() - start)); } } else { retVal = false; } } catch (AmazonServiceException e) { if (e.getStatusCode() == 404 || e.getStatusCode() == 403) { retVal = false; } else { throw new DataStoreException("Error occured to find exists for key [" + identifier.toString() + "]", e); } } catch (Exception e) { throw new DataStoreException("Error occured to find exists for key " + identifier.toString(), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("exists [{}]: [{}] took [{}] ms.", new Object[] { identifier, retVal, (System.currentTimeMillis() - start) }); return retVal; }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public void touch(DataIdentifier identifier, long minModifiedDate) throws DataStoreException { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*w w w . ja va 2 s.co m*/ final long start = System.currentTimeMillis(); final String key = getKeyName(identifier); if (minModifiedDate > 0 && minModifiedDate > getLastModified(identifier)) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(new ObjectMetadata()); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); copy.waitForCompletion(); LOG.debug("[{}] touched. time taken [{}] ms ", new Object[] { identifier, (System.currentTimeMillis() - start) }); } else { LOG.trace("[{}] touch not required. time taken [{}] ms ", new Object[] { identifier, (System.currentTimeMillis() - start) }); } } catch (Exception e) { throw new DataStoreException("Error occured in touching key [" + identifier.toString() + "]", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from w ww . j a v a 2s . c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); try { copy.waitForCopyResult(); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file))); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java
License:Apache License
private void copyOrMoveObjectImpl(String srcKey, String destKey, boolean move, boolean checkAndSucceedIfAlreadyMoved) { final S3Configuration s3config = s3ConfigurationService.lookupS3Configuration(); final AmazonS3Client s3Client = getAmazonS3Client(s3config); final String bucketName = s3config.getDefaultBucketName(); // copy/*from ww w .ja v a 2 s. c om*/ final CopyObjectRequest objToCopy = new CopyObjectRequest(bucketName, srcKey, bucketName, destKey); if ((s3config.getStaticAssetFileExtensionPattern() != null) && s3config.getStaticAssetFileExtensionPattern().matcher(getExtension(destKey)).matches()) { objToCopy.setCannedAccessControlList(CannedAccessControlList.PublicRead); } try { s3Client.copyObject(objToCopy); } catch (AmazonS3Exception s3e) { if (s3e.getStatusCode() == 404 && checkAndSucceedIfAlreadyMoved) { // it's not in the srcKey. Check if something is at the destKey if (s3Client.doesObjectExist(bucketName, destKey)) { final String msg = String.format("src(%s) doesn't exist but dest(%s) does, so assuming success", srcKey, destKey); LOG.warn(msg); return; } else { final String msg = String.format("neither src(%s) or dest(%s) exist", srcKey, destKey); throw new RuntimeException(msg); } } } catch (AmazonClientException e) { throw new RuntimeException("Unable to copy object from: " + srcKey + " to: " + destKey, e); } if (move) { // delete the old ones in sandbox folder (those with srcKey) DeleteObjectRequest objToDelete = new DeleteObjectRequest(bucketName, srcKey); try { s3Client.deleteObject(objToDelete); } catch (AmazonClientException e) { //throw new RuntimeException("Moving objects to production folder but unable to delete old object: " + srcKey, e); LOG.error("Moving objects to production folder but unable to delete old object: " + srcKey, e); } } }