List of usage examples for com.amazonaws.services.s3.model DeleteObjectsRequest DeleteObjectsRequest
public DeleteObjectsRequest(String bucketName)
From source file:mail.server.storage.AWSStorageDelete.java
License:GNU General Public License
protected void deleteBucketContents(AmazonS3 s3, String bucketName) throws Exception { while (true) { List<String> keys = new ArrayList<String>(); log.debug("creating batch delete"); ObjectListing listing = s3.listObjects(bucketName); for (S3ObjectSummary i : listing.getObjectSummaries()) { log.debug("key", i.getKey()); keys.add(i.getKey());/*from w w w. j av a 2s . c o m*/ } if (keys.isEmpty()) break; DeleteObjectsRequest req = new DeleteObjectsRequest(bucketName).withKeys(keys.toArray(new String[0])); log.debug("deleting"); s3.deleteObjects(req); } }
From source file:net.solarnetwork.node.backup.s3.SdkS3Client.java
License:Open Source License
@Override public void deleteObjects(Set<String> keys) throws IOException { AmazonS3 client = getClient();/* w w w. j a v a2 s . c om*/ try { DeleteObjectsRequest req = new DeleteObjectsRequest(bucketName) .withKeys(keys.stream().map(k -> new KeyVersion(k)).collect(Collectors.toList())); client.deleteObjects(req); } catch (AmazonServiceException e) { log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId()); throw new RemoteServiceException("Error deleting S3 objects " + keys, e); } catch (AmazonClientException e) { log.debug("Error communicating with AWS: {}", e.getMessage()); throw new IOException("Error communicating with AWS", e); } }
From source file:org.alanwilliamson.amazon.s3.Delete.java
License:Open Source License
public cfData execute(cfSession _session, cfArgStructData argStruct) throws cfmRunTimeException { AmazonKey amazonKey = getAmazonKey(_session, argStruct); AmazonS3 s3Client = getAmazonS3(amazonKey); String bucket = getNamedStringParam(argStruct, "bucket", null); if (bucket == null) throwException(_session, "Please specify a bucket"); cfData key = getNamedParam(argStruct, "key"); try {// ww w . j a v a 2 s .c o m if (key.getDataType() == cfData.CFARRAYDATA) { DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(bucket); List keysT = new ArrayList(); cfArrayData arrData = (cfArrayData) key; for (int x = 0; x < arrData.size(); x++) { String k = arrData.getData(x + 1).toString(); if (k.charAt(0) == '/') k = k.substring(1); keysT.add(new KeyVersion(k)); } multiObjectDeleteRequest.setKeys(keysT); DeleteObjectsResult delObjRes = s3Client.deleteObjects(multiObjectDeleteRequest); return new cfNumberData(delObjRes.getDeletedObjects().size()); } else { String k = key.toString(); if (k.charAt(0) == '/') k = k.substring(1); s3Client.deleteObject(new DeleteObjectRequest(bucket, k)); return new cfNumberData(1); } } catch (Exception e) { throwException(_session, "AmazonS3: " + e.getMessage()); return new cfNumberData(0); } }
From source file:org.apache.beam.sdk.io.aws.s3.S3FileSystem.java
License:Apache License
private void delete(String bucket, Collection<String> keys) throws IOException { checkArgument(keys.size() <= MAX_DELETE_OBJECTS_PER_REQUEST, "only %s keys can be deleted per request, but got %s", MAX_DELETE_OBJECTS_PER_REQUEST, keys.size()); List<KeyVersion> deleteKeyVersions = keys.stream().map(KeyVersion::new).collect(Collectors.toList()); DeleteObjectsRequest request = new DeleteObjectsRequest(bucket).withKeys(deleteKeyVersions); try {//from w w w . j av a2 s . c o m amazonS3.get().deleteObjects(request); } catch (AmazonClientException e) { throw new IOException(e); } }
From source file:org.apache.flink.streaming.tests.util.s3.S3UtilProgram.java
License:Apache License
private static void deleteByFullPathPrefix(ParameterTool params) { final String bucket = params.getRequired("bucket"); final String s3prefix = params.getRequired("s3prefix"); String[] keys = listByFullPathPrefix(bucket, s3prefix).toArray(new String[] {}); if (keys.length > 0) { DeleteObjectsRequest request = new DeleteObjectsRequest(bucket).withKeys(keys); AmazonS3ClientBuilder.defaultClient().deleteObjects(request); }//from w w w. j a v a 2 s . c o m }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * Renames Path src to Path dst. Can take place on local fs * or remote DFS./*from www . j av a 2 s .c om*/ * * Warning: S3 does not support renames. This method does a copy which can take S3 some time to execute with large * files and directories. Since there is no Progressable passed in, this can time out jobs. * * Note: This implementation differs with other S3 drivers. Specifically: * Fails if src is a file and dst is a directory. * Fails if src is a directory and dst is a file. * Fails if the parent of dst does not exist or is a file. * Fails if dst is a directory that is not empty. * * @param src path to be renamed * @param dst new path after rename * @throws IOException on failure * @return true if rename is successful */ public boolean rename(Path src, Path dst) throws IOException { LOG.info("Rename path " + src + " to " + dst); String srcKey = pathToKey(src); String dstKey = pathToKey(dst); if (srcKey.length() == 0 || dstKey.length() == 0) { LOG.info("rename: src or dst are empty"); return false; } if (srcKey.equals(dstKey)) { LOG.info("rename: src and dst refer to the same file"); return true; } S3AFileStatus srcStatus; try { srcStatus = getFileStatus(src); } catch (FileNotFoundException e) { LOG.info("rename: src not found " + src); return false; } S3AFileStatus dstStatus = null; try { dstStatus = getFileStatus(dst); if (srcStatus.isFile() && dstStatus.isDirectory()) { LOG.info("rename: src is a file and dst is a directory"); return false; } if (srcStatus.isDirectory() && dstStatus.isFile()) { LOG.info("rename: src is a directory and dst is a file"); return false; } if (dstStatus.isDirectory() && !dstStatus.isEmptyDirectory()) { return false; } } catch (FileNotFoundException e) { // Parent must exist Path parent = dst.getParent(); if (!pathToKey(parent).isEmpty()) { try { S3AFileStatus dstParentStatus = getFileStatus(dst.getParent()); if (!dstParentStatus.isDirectory()) { return false; } } catch (FileNotFoundException e2) { return false; } } } // Ok! Time to start if (srcStatus.isFile()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming file " + src + " to " + dst); } copyFile(srcKey, dstKey); delete(src, false); } else { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming directory " + src + " to " + dst); } // This is a directory to directory copy if (!dstKey.endsWith("/")) { dstKey = dstKey + "/"; } if (!srcKey.endsWith("/")) { srcKey = srcKey + "/"; } List<DeleteObjectsRequest.KeyVersion> keysToDelete = new ArrayList<DeleteObjectsRequest.KeyVersion>(); if (dstStatus != null && dstStatus.isEmptyDirectory()) { copyFile(srcKey, dstKey); statistics.incrementWriteOps(1); keysToDelete.add(new DeleteObjectsRequest.KeyVersion(srcKey)); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(srcKey); request.setMaxKeys(maxKeys); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keysToDelete.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); String newDstKey = dstKey + summary.getKey().substring(srcKey.length()); copyFile(summary.getKey(), newDstKey); if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket) .withKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keysToDelete.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { break; } } if (!keysToDelete.isEmpty()) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket); deleteRequest.setKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } } if (src.getParent() != dst.getParent()) { deleteUnnecessaryFakeDirectories(dst.getParent()); createFakeDirectoryIfNecessary(src.getParent()); } return true; }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** Delete a file. * * @param f the path to delete./*from w w w. j a v a 2s . com*/ * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException */ public boolean delete(Path f, boolean recursive) throws IOException { LOG.info("Delete path " + f + " - recursive " + recursive); S3AFileStatus status; try { status = getFileStatus(f); } catch (FileNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Couldn't delete " + f + " - does not exist"); } return false; } String key = pathToKey(f); if (status.isDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a directory"); } if (!recursive) { throw new IOException("Path is a folder: " + f); } if (!key.endsWith("/")) { key = key + "/"; } if (status.isEmptyDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting fake empty directory"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } else { if (LOG.isDebugEnabled()) { LOG.debug("Getting objects for directory prefix " + key + " to delete"); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); // Hopefully not setting a delimiter will cause this to find everything //request.setDelimiter("/"); request.setMaxKeys(maxKeys); List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<DeleteObjectsRequest.KeyVersion>(); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); if (LOG.isDebugEnabled()) { LOG.debug("Got object to delete " + summary.getKey()); } if (keys.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keys.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { break; } } if (!keys.isEmpty()) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } } } else { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a file"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } createFakeDirectoryIfNecessary(f.getParent()); return true; }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * Renames Path src to Path dst. Can take place on local fs * or remote DFS./*w ww. ja v a 2 s. c om*/ * * Warning: S3 does not support renames. This method does a copy which can * take S3 some time to execute with large files and directories. Since * there is no Progressable passed in, this can time out jobs. * * Note: This implementation differs with other S3 drivers. Specifically: * Fails if src is a file and dst is a directory. * Fails if src is a directory and dst is a file. * Fails if the parent of dst does not exist or is a file. * Fails if dst is a directory that is not empty. * * @param src path to be renamed * @param dst new path after rename * @throws IOException on failure * @return true if rename is successful */ public boolean rename(Path src, Path dst) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Rename path {} to {}", src, dst); } String srcKey = pathToKey(src); String dstKey = pathToKey(dst); if (srcKey.isEmpty() || dstKey.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src or dst are empty"); } return false; } S3RFileStatus srcStatus; try { srcStatus = getFileStatus(src); } catch (FileNotFoundException e) { LOG.error("rename: src not found {}", src); return false; } if (srcKey.equals(dstKey)) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src and dst refer to the same file or directory"); } return srcStatus.isFile(); } S3RFileStatus dstStatus = null; try { dstStatus = getFileStatus(dst); if (srcStatus.isDirectory() && dstStatus.isFile()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src is a directory and dst is a file"); } return false; } if (dstStatus.isDirectory() && !dstStatus.isEmptyDirectory()) { return false; } } catch (FileNotFoundException e) { // Parent must exist Path parent = dst.getParent(); if (!pathToKey(parent).isEmpty()) { try { S3RFileStatus dstParentStatus = getFileStatus(dst.getParent()); if (!dstParentStatus.isDirectory()) { return false; } } catch (FileNotFoundException e2) { return false; } } } // Ok! Time to start if (srcStatus.isFile()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming file " + src + " to " + dst); } if (dstStatus != null && dstStatus.isDirectory()) { String newDstKey = dstKey; if (!newDstKey.endsWith("/")) { newDstKey = newDstKey + "/"; } String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1); newDstKey = newDstKey + filename; copyFile(srcKey, newDstKey); } else { copyFile(srcKey, dstKey); } delete(src, false); } else { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming directory " + src + " to " + dst); } // This is a directory to directory copy if (!dstKey.endsWith("/")) { dstKey = dstKey + "/"; } if (!srcKey.endsWith("/")) { srcKey = srcKey + "/"; } //Verify dest is not a child of the source directory if (dstKey.startsWith(srcKey)) { if (LOG.isDebugEnabled()) { LOG.debug("cannot rename a directory to a subdirectory of self"); } return false; } List<DeleteObjectsRequest.KeyVersion> keysToDelete = new ArrayList<>(); if (dstStatus != null && dstStatus.isEmptyDirectory()) { // delete unnecessary fake directory. keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey)); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(srcKey); request.setMaxKeys(maxKeys); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keysToDelete.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); String newDstKey = dstKey + summary.getKey().substring(srcKey.length()); copyFile(summary.getKey(), newDstKey); if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket) .withKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keysToDelete.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { if (keysToDelete.size() > 0) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket) .withKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } break; } } } if (src.getParent() != dst.getParent()) { deleteUnnecessaryFakeDirectories(dst.getParent()); createFakeDirectoryIfNecessary(src.getParent()); } return true; }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** Delete a file. * * @param f the path to delete.// w ww. ja v a 2s . co m * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException */ public boolean delete(Path f, boolean recursive) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Delete path " + f + " - recursive " + recursive); } S3RFileStatus status; try { status = getFileStatus(f); } catch (FileNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Couldn't delete " + f + " - does not exist"); } return false; } String key = pathToKey(f); if (status.isDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a directory"); } if (!recursive && !status.isEmptyDirectory()) { throw new IOException("Path is a folder: " + f + " and it is not an empty directory"); } if (!key.endsWith("/")) { key = key + "/"; } if (key.equals("/")) { LOG.info("s3a cannot delete the root directory"); return false; } if (status.isEmptyDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting fake empty directory"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } else { if (LOG.isDebugEnabled()) { LOG.debug("Getting objects for directory prefix " + key + " to delete"); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); // Hopefully not setting a delimiter will cause this to find everything //request.setDelimiter("/"); request.setMaxKeys(maxKeys); List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); if (LOG.isDebugEnabled()) { LOG.debug("Got object to delete " + summary.getKey()); } if (keys.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keys.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { if (!keys.isEmpty()) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } break; } } } } else { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a file"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } createFakeDirectoryIfNecessary(f.getParent()); return true; }
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
@Override public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException { long start = System.currentTimeMillis(); // S3 stores lastModified to lower boundary of timestamp in ms. // and hence min is reduced by 1000ms. min = min - 1000;// ww w .j a va 2 s . c om Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectListing prevObjectListing = s3service.listObjects(bucket); while (true) { List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>(); for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) { DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey())); long lastModified = s3ObjSumm.getLastModified().getTime(); LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified); if (!store.isInUse(identifier) && lastModified < min) { LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey()); deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey())); deleteIdSet.add(identifier); } } if (deleteList.size() > 0) { DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket); delObjsReq.setKeys(deleteList); DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq); if (dobjs.getDeletedObjects().size() != deleteList.size()) { throw new DataStoreException( "Incomplete delete object request. only " + dobjs.getDeletedObjects().size() + " out of " + deleteList.size() + " are deleted"); } else { LOG.debug("[{}] records deleted from datastore", deleteList); } } if (!prevObjectListing.isTruncated()) { break; } prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing); } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.info( "deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms", new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) }); return deleteIdSet; }