List of usage examples for com.amazonaws.services.s3.model DeleteObjectsRequest DeleteObjectsRequest
public DeleteObjectsRequest(String bucketName)
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
/** * This method rename object keys in S3 concurrently. The number of * concurrent threads is defined by 'maxConnections' property in * aws.properties. As S3 doesn't have "move" command, this method simulate * move as copy object object to new key and then delete older key. *//* w w w . j av a 2s .com*/ private void renameKeys() throws DataStoreException { long startTime = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); long count = 0; try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectListing prevObjectListing = s3service.listObjects(bucket, KEY_PREFIX); List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>(); int nThreads = Integer.parseInt(properties.getProperty("maxConnections")); ExecutorService executor = Executors.newFixedThreadPool(nThreads, new NamedThreadFactory("s3-object-rename-worker")); boolean taskAdded = false; while (true) { for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) { executor.execute(new KeyRenameThread(s3ObjSumm.getKey())); taskAdded = true; count++; deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey())); } if (!prevObjectListing.isTruncated()) break; prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing); } // This will make the executor accept no new threads // and finish all existing threads in the queue executor.shutdown(); try { // Wait until all threads are finish while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) { LOG.info("Rename S3 keys tasks timedout. Waiting again"); } } catch (InterruptedException ie) { } LOG.info("Renamed [{}] keys, time taken [{}]sec", count, ((System.currentTimeMillis() - startTime) / 1000)); // Delete older keys. if (deleteList.size() > 0) { DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket); int batchSize = 500, startIndex = 0, size = deleteList.size(); int endIndex = batchSize < size ? batchSize : size; while (endIndex <= size) { delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex))); DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq); LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]", new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) }); if (endIndex == size) { break; } else { startIndex = endIndex; endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size; } } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException { long start = System.currentTimeMillis(); // S3 stores lastModified to lower boundary of timestamp in ms. // and hence min is reduced by 1000ms. min = min - 1000;// w w w . java2 s .c om Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectListing prevObjectListing = s3service.listObjects(bucket); while (true) { List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>(); for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) { if (!s3ObjSumm.getKey().startsWith(META_KEY_PREFIX)) { DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey())); long lastModified = s3ObjSumm.getLastModified().getTime(); LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified); if (lastModified < min && store.confirmDelete(identifier) // confirm once more that record's lastModified < min // order is important here && s3service.getObjectMetadata(bucket, s3ObjSumm.getKey()).getLastModified() .getTime() < min) { store.deleteFromCache(identifier); LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey()); deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey())); deleteIdSet.add(identifier); } } } if (deleteList.size() > 0) { DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket); delObjsReq.setKeys(deleteList); DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq); if (dobjs.getDeletedObjects().size() != deleteList.size()) { throw new DataStoreException( "Incomplete delete object request. only " + dobjs.getDeletedObjects().size() + " out of " + deleteList.size() + " are deleted"); } else { LOG.debug("[{}] records deleted from datastore", deleteList); } } if (!prevObjectListing.isTruncated()) { break; } prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing); } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.info( "deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms", new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) }); return deleteIdSet; }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
public void deleteAllMetadataRecords(String prefix) { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {// ww w. ja v a 2 s. c o m Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket) .withPrefix(addMetaKeyPrefix(prefix)); ObjectListing metaList = s3service.listObjects(listObjectsRequest); List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>(); for (S3ObjectSummary s3ObjSumm : metaList.getObjectSummaries()) { deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey())); } if (deleteList.size() > 0) { DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket); delObjsReq.setKeys(deleteList); DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq); } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
/** * This method rename object keys in S3 concurrently. The number of * concurrent threads is defined by 'maxConnections' property in * aws.properties. As S3 doesn't have "move" command, this method simulate * move as copy object object to new key and then delete older key. *//* w w w .ja v a2 s .c o m*/ private void renameKeys() throws DataStoreException { long startTime = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); long count = 0; try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectListing prevObjectListing = s3service.listObjects(bucket); List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>(); int nThreads = Integer.parseInt(properties.getProperty("maxConnections")); ExecutorService executor = Executors.newFixedThreadPool(nThreads, new NamedThreadFactory("s3-object-rename-worker")); boolean taskAdded = false; while (true) { for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) { executor.execute(new KeyRenameThread(s3ObjSumm.getKey())); taskAdded = true; count++; // delete the object if it follows old key name format if (s3ObjSumm.getKey().startsWith(KEY_PREFIX)) { deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey())); } } if (!prevObjectListing.isTruncated()) break; prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing); } // This will make the executor accept no new threads // and finish all existing threads in the queue executor.shutdown(); try { // Wait until all threads are finish while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) { LOG.info("Rename S3 keys tasks timedout. Waiting again"); } } catch (InterruptedException ie) { } LOG.info("Renamed [{}] keys, time taken [{}]sec", count, ((System.currentTimeMillis() - startTime) / 1000)); // Delete older keys. if (deleteList.size() > 0) { DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket); int batchSize = 500, startIndex = 0, size = deleteList.size(); int endIndex = batchSize < size ? batchSize : size; while (endIndex <= size) { delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex))); DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq); LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]", new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) }); if (endIndex == size) { break; } else { startIndex = endIndex; endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size; } } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java
License:Apache License
public void deleteMultipleObjects(List<String> listOfKeysToRemove) { if (listOfKeysToRemove == null || listOfKeysToRemove.isEmpty()) { return;// w w w. j ava 2 s . c om } S3Configuration s3config = s3ConfigurationService.lookupS3Configuration(); AmazonS3Client s3Client = getAmazonS3Client(s3config); String bucketName = s3config.getDefaultBucketName(); DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(bucketName); List<KeyVersion> keys = new ArrayList<KeyVersion>(); for (String targetKey : listOfKeysToRemove) { keys.add(new KeyVersion(targetKey)); } multiObjectDeleteRequest.setKeys(keys); try { DeleteObjectsResult delObjResult = s3Client.deleteObjects(multiObjectDeleteRequest); if (LOG.isTraceEnabled()) { String s = listOfKeysToRemove.stream().collect(Collectors.joining(",\n\t")); LOG.trace(String.format("Successfully deleted %d items:\n\t%s", delObjResult.getDeletedObjects().size(), s)); } } catch (MultiObjectDeleteException e) { if (LOG.isTraceEnabled()) { LOG.trace(String.format("%s \n", e.getMessage())); LOG.trace( String.format("No. of objects successfully deleted = %s\n", e.getDeletedObjects().size())); LOG.trace(String.format("No. of objects failed to delete = %s\n", e.getErrors().size())); LOG.trace(String.format("Printing error data...\n")); for (DeleteError deleteError : e.getErrors()) { if (LOG.isTraceEnabled()) { LOG.trace(String.format("Object Key: %s\t%s\t%s\n", deleteError.getKey(), deleteError.getCode(), deleteError.getMessage())); } } } throw new RuntimeException("No. of objects failed to delete = " + e.getErrors().size(), e); } }
From source file:org.chodavarapu.jgitaws.repositories.PackRepository.java
License:Eclipse Distribution License
public Observable<Void> deletePacks(Collection<DfsPackDescription> packs) { List<String> objectNames = getObjectNames(packs); return Async//w w w . jav a 2 s. co m .fromCallable( () -> configuration.getS3Client() .deleteObjects(new DeleteObjectsRequest(configuration.getPacksBucketName()) .withKeys(objectNames.toArray(new String[objectNames.size()])))) .map(r -> null); }
From source file:org.elasticsearch.cloud.aws.blobstore.S3BlobStore.java
License:Apache License
@Override public void delete(BlobPath path) { ObjectListing prevListing = null;/*from ww w .j a v a 2 s .c o m*/ //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html //we can do at most 1K objects per delete //We don't know the bucket name until first object listing DeleteObjectsRequest multiObjectDeleteRequest = null; ArrayList<KeyVersion> keys = new ArrayList<KeyVersion>(); while (true) { ObjectListing list; if (prevListing != null) { list = client.listNextBatchOfObjects(prevListing); } else { String keyPath = path.buildAsString("/"); if (!keyPath.isEmpty()) { keyPath = keyPath + "/"; } list = client.listObjects(bucket, keyPath); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } for (S3ObjectSummary summary : list.getObjectSummaries()) { keys.add(new KeyVersion(summary.getKey())); //Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); client.deleteObjects(multiObjectDeleteRequest); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } } if (list.isTruncated()) { prevListing = list; } else { break; } } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); client.deleteObjects(multiObjectDeleteRequest); } }
From source file:org.elasticsearch.repositories.s3.S3BlobStore.java
License:Apache License
@Override public void delete(BlobPath path) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html //we can do at most 1K objects per delete //We don't know the bucket name until first object listing DeleteObjectsRequest multiObjectDeleteRequest = null; ArrayList<KeyVersion> keys = new ArrayList<>(); while (true) { ObjectListing list;/*from w w w .j a va 2s . c o m*/ if (prevListing != null) { list = client.listNextBatchOfObjects(prevListing); } else { list = client.listObjects(bucket, path.buildAsString()); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); } for (S3ObjectSummary summary : list.getObjectSummaries()) { keys.add(new KeyVersion(summary.getKey())); //Every 500 objects batch the delete request if (keys.size() > 500) { multiObjectDeleteRequest.setKeys(keys); client.deleteObjects(multiObjectDeleteRequest); multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); keys.clear(); } } if (list.isTruncated()) { prevListing = list; } else { break; } } if (!keys.isEmpty()) { multiObjectDeleteRequest.setKeys(keys); client.deleteObjects(multiObjectDeleteRequest); } return null; }); }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteFileList(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = null;//from w w w . j a v a 2 s . c om LOGGER.info(String.format("Deleting %d keys/objects from s3://%s ...", params.getFiles().size(), params.getS3BucketName())); try { // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete. if (!params.getFiles().isEmpty()) { // Build a list of keys to be deleted. List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); for (File file : params.getFiles()) { keys.add(new DeleteObjectsRequest.KeyVersion(file.getPath().replaceAll("\\\\", "/"))); } DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(params.getS3BucketName()); s3Client = getAmazonS3(params); // The Multi-Object Delete request can contain a list of up to 1000 keys. for (int i = 0; i < keys.size() / MAX_KEYS_PER_DELETE_REQUEST + 1; i++) { List<DeleteObjectsRequest.KeyVersion> keysSubList = keys.subList( i * MAX_KEYS_PER_DELETE_REQUEST, Math.min(keys.size(), (i + 1) * MAX_KEYS_PER_DELETE_REQUEST)); multiObjectDeleteRequest.setKeys(keysSubList); s3Operations.deleteObjects(multiObjectDeleteRequest, s3Client); LOGGER.info(String.format( "Successfully requested the deletion of the following %d keys/objects from bucket \"%s\":", keysSubList.size(), params.getS3BucketName())); for (DeleteObjectsRequest.KeyVersion keyVersion : keysSubList) { LOGGER.info(String.format(" s3://%s/%s", params.getS3BucketName(), keyVersion.getKey())); } } } } catch (Exception e) { throw new IllegalStateException( String.format("Failed to delete a list of keys/objects from bucket \"%s\". Reason: %s", params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteDirectory(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = null;//w w w . j a v a 2s .c o m LOGGER.info(String.format("Deleting keys/objects from s3://%s/%s ...", params.getS3BucketName(), params.getS3KeyPrefix())); Assert.hasText(params.getS3KeyPrefix(), "Deleting from root directory is not allowed."); try { // List S3 object including any 0 byte objects that represent S3 directories. List<StorageFile> storageFiles = listObjectsMatchingKeyPrefix(params, false); LOGGER.info(String.format("Found %d keys/objects in s3://%s/%s ...", storageFiles.size(), params.getS3BucketName(), params.getS3KeyPrefix())); // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete. if (!storageFiles.isEmpty()) { DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(params.getS3BucketName()); s3Client = getAmazonS3(params); // The Multi-Object Delete request can contain a list of up to 1000 keys. for (int i = 0; i < storageFiles.size() / MAX_KEYS_PER_DELETE_REQUEST + 1; i++) { // Prepare a list of S3 object keys to be deleted. List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); for (StorageFile storageFile : storageFiles.subList(i * MAX_KEYS_PER_DELETE_REQUEST, Math.min(storageFiles.size(), (i + 1) * MAX_KEYS_PER_DELETE_REQUEST))) { keys.add(new DeleteObjectsRequest.KeyVersion(storageFile.getFilePath())); } // Delete the S3 objects. multiObjectDeleteRequest.setKeys(keys); s3Operations.deleteObjects(multiObjectDeleteRequest, s3Client); LOGGER.info(String.format( "Successfully deleted the following %d keys/objects with prefix \"%s\" from bucket \"%s\":", keys.size(), params.getS3KeyPrefix(), params.getS3BucketName())); for (DeleteObjectsRequest.KeyVersion keyVersion : keys) { LOGGER.info(String.format(" s3://%s/%s", params.getS3BucketName(), keyVersion.getKey())); } } } } catch (AmazonClientException e) { throw new IllegalStateException( String.format("Failed to delete keys/objects with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }