List of usage examples for com.amazonaws.services.s3 AmazonS3Client deleteObject
@Override public void deleteObject(String bucketName, String key) throws SdkClientException, AmazonServiceException
From source file:backup.store.s3.S3BackupStoreUtil.java
License:Apache License
public static void removeAllObjects(String bucketName) throws Exception { AmazonS3Client client = new AmazonS3Client(new DefaultAWSCredentialsProviderChain()); ObjectListing listObjects = client.listObjects(bucketName); List<S3ObjectSummary> objectSummaries = listObjects.getObjectSummaries(); for (S3ObjectSummary objectSummary : objectSummaries) { String key = objectSummary.getKey(); client.deleteObject(bucketName, key); }/*from w ww . j a va 2 s . c om*/ }
From source file:backup.store.s3.S3BackupStoreUtil.java
License:Apache License
public static void removeAllObjects(String bucketName, String prefix) throws Exception { AmazonS3Client client = new AmazonS3Client(new DefaultAWSCredentialsProviderChain()); ObjectListing listObjects = client.listObjects(bucketName); List<S3ObjectSummary> objectSummaries = listObjects.getObjectSummaries(); for (S3ObjectSummary objectSummary : objectSummaries) { String key = objectSummary.getKey(); if (key.startsWith(prefix)) { client.deleteObject(bucketName, key); }/*from w ww . j a va2 s . c o m*/ } }
From source file:ch.entwine.weblounge.maven.S3DeployMojo.java
License:Open Source License
/** * /* w w w.jav a 2 s . c om*/ * {@inheritDoc} * * @see org.apache.maven.plugin.Mojo#execute() */ public void execute() throws MojoExecutionException, MojoFailureException { // Setup AWS S3 client AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3Client uploadClient = new AmazonS3Client(credentials); TransferManager transfers = new TransferManager(credentials); // Make sure key prefix does not start with a slash but has one at the // end if (keyPrefix.startsWith("/")) keyPrefix = keyPrefix.substring(1); if (!keyPrefix.endsWith("/")) keyPrefix = keyPrefix + "/"; // Keep track of how much data has been transferred long totalBytesTransferred = 0L; int items = 0; Queue<Upload> uploads = new LinkedBlockingQueue<Upload>(); try { // Check if S3 bucket exists getLog().debug("Checking whether bucket " + bucket + " exists"); if (!uploadClient.doesBucketExist(bucket)) { getLog().error("Desired bucket '" + bucket + "' does not exist!"); return; } getLog().debug("Collecting files to transfer from " + resources.getDirectory()); List<File> res = getResources(); for (File file : res) { // Make path of resource relative to resources directory String filename = file.getName(); String extension = FilenameUtils.getExtension(filename); String path = file.getPath().substring(resources.getDirectory().length()); String key = concat("/", keyPrefix, path).substring(1); // Delete old file version in bucket getLog().debug("Removing existing object at " + key); uploadClient.deleteObject(bucket, key); // Setup meta data ObjectMetadata meta = new ObjectMetadata(); meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600)); FileInputStream fis = null; GZIPOutputStream gzipos = null; final File fileToUpload; if (gzip && ("js".equals(extension) || "css".equals(extension))) { try { fis = new FileInputStream(file); File gzFile = File.createTempFile(file.getName(), null); gzipos = new GZIPOutputStream(new FileOutputStream(gzFile)); IOUtils.copy(fis, gzipos); fileToUpload = gzFile; meta.setContentEncoding("gzip"); if ("js".equals(extension)) meta.setContentType("text/javascript"); if ("css".equals(extension)) meta.setContentType("text/css"); } catch (FileNotFoundException e) { getLog().error(e); continue; } catch (IOException e) { getLog().error(e); continue; } finally { IOUtils.closeQuietly(fis); IOUtils.closeQuietly(gzipos); } } else { fileToUpload = file; } // Do a random check for existing errors before starting the next upload if (erroneousUpload != null) break; // Create put object request long bytesToTransfer = fileToUpload.length(); totalBytesTransferred += bytesToTransfer; PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload); request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer)); request.setMetadata(meta); // Schedule put object request getLog().info( "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")"); Upload upload = transfers.upload(request); uploads.add(upload); items++; } } catch (AmazonServiceException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } catch (AmazonClientException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } // Wait for uploads to be finished String currentUpload = null; try { Thread.sleep(1000); getLog().info("Waiting for " + uploads.size() + " uploads to finish..."); while (!uploads.isEmpty()) { Upload upload = uploads.poll(); currentUpload = upload.getDescription().substring("Uploading to ".length()); if (TransferState.InProgress.equals(upload.getState())) getLog().debug("Waiting for upload " + currentUpload + " to finish"); upload.waitForUploadResult(); } } catch (AmazonServiceException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (AmazonClientException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (InterruptedException e) { getLog().debug("Interrupted while waiting for upload to finish"); } // Check for errors that happened outside of the actual uploading if (erroneousUpload != null) { throw new MojoExecutionException("Error while uploading " + erroneousUpload); } getLog().info("Deployed " + items + " files (" + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket); }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public DeleteObjectResponseType deleteObject(DeleteObjectType request) throws S3Exception { User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; try {/* w w w.jav a 2 s . co m*/ DeleteObjectResponseType reply = request.getReply(); reply.setStatus(HttpResponseStatus.NO_CONTENT); reply.setStatusMessage("NO CONTENT"); internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); s3Client.deleteObject(request.getBucket(), request.getKey()); return reply; } catch (AmazonServiceException ex) { LOG.debug("Error from backend", ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } }
From source file:com.mweagle.tereus.aws.S3Resource.java
License:Open Source License
@Override public void close() { if (!this.released && this.resourceURL.isPresent()) { DefaultAWSCredentialsProviderChain credentialProviderChain = new DefaultAWSCredentialsProviderChain(); final AmazonS3Client awsClient = new AmazonS3Client(credentialProviderChain); final String[] parts = this.resourceURL.get().split("/"); final String keyname = parts[parts.length - 1]; awsClient.deleteObject(bucketName, keyname); }//from w ww . ja va2 s . c o m }
From source file:com.netflix.exhibitor.core.s3.S3ClientFactoryImpl.java
License:Apache License
@Override public S3Client makeNewClient(final S3Credential credentials) throws Exception { return new S3Client() { private final AtomicReference<RefCountedClient> client = new AtomicReference<RefCountedClient>(null); {/*from ww w .j ava 2s. co m*/ changeCredentials(credentials); } @Override public void changeCredentials(S3Credential credential) throws Exception { RefCountedClient newRefCountedClient = (credential != null) ? new RefCountedClient( new AmazonS3Client(new BasicAWSCredentials(credentials.getAccessKeyId(), credentials.getSecretAccessKey()))) : null; RefCountedClient oldRefCountedClient = client.getAndSet(newRefCountedClient); if (oldRefCountedClient != null) { oldRefCountedClient.markForDelete(); } } @Override public void close() throws IOException { try { changeCredentials(null); } catch (Exception e) { throw new IOException(e); } } @Override public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.initiateMultipartUpload(request); } finally { holder.release(); } } @Override public PutObjectResult putObject(PutObjectRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.putObject(request); } finally { holder.release(); } } @Override public S3Object getObject(String bucket, String key) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.getObject(bucket, key); } finally { holder.release(); } } @Override public ObjectListing listObjects(ListObjectsRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.listObjects(request); } finally { holder.release(); } } @Override public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.listNextBatchOfObjects(previousObjectListing); } finally { holder.release(); } } @Override public void deleteObject(String bucket, String key) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { amazonS3Client.deleteObject(bucket, key); } finally { holder.release(); } } @Override public UploadPartResult uploadPart(UploadPartRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { return amazonS3Client.uploadPart(request); } finally { holder.release(); } } @Override public void completeMultipartUpload(CompleteMultipartUploadRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { amazonS3Client.completeMultipartUpload(request); } finally { holder.release(); } } @Override public void abortMultipartUpload(AbortMultipartUploadRequest request) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try { amazonS3Client.abortMultipartUpload(request); } finally { holder.release(); } } }; }
From source file:com.netflix.exhibitor.core.s3.S3ClientImpl.java
License:Apache License
@Override public void deleteObject(String bucket, String key) throws Exception { RefCountedClient holder = client.get(); AmazonS3Client amazonS3Client = holder.useClient(); try {//from ww w . j a v a 2 s.c om amazonS3Client.deleteObject(bucket, key); } finally { holder.release(); } }
From source file:dashboard.AmazonLogs.java
License:Open Source License
public int readAmazonLogs(int n, String AWS_USER, String AWS_PASS, String IPfile, String ERRfile, String bucketName, String DELETE_PROCESSED_LOGS, String API_KEY, String TOKEN, String apiuser, String apipass) throws Exception { if (n < 1) return 0; int eventsNumber = 0; String line = null;/* w w w.j a va 2 s . co m*/ int begin = 0; int zips = 0; int deletedZips = 0; int mixpanelStatus = 0; String registrant = ""; String ip = ""; String prevIP = ""; Mixpanel mix = new Mixpanel(); Whois w = new Whois(apiuser, apipass); int index = -1; Registrant r; ArrayList<Registrant> rList = new ArrayList<Registrant>(); ArrayList<Registrant> eList = new ArrayList<Registrant>(); IPList ipl = new IPList(); IPList errl = new IPList(); // Log files Bucket AWSCredentials credentials = new BasicAWSCredentials(AWS_USER, AWS_PASS); AmazonS3Client s3Client = new AmazonS3Client(credentials); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); BufferedReader br = null; ObjectListing objectListing = s3Client.listObjects(listObjectsRequest); ObjectListing nextObjectListing = objectListing; zips = 0; Boolean more = true; if (objectListing == null) more = false; else { ipl.loadList(rList, IPfile); ipl.printList(rList, 30); } while (more) { // Reads 1000 files for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { // Handle ZIP files try { // Open and send to mixpanel events of one ZIP file String key = objectSummary.getKey(); S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, key)); // Extract ZIP and read Object to reader br = new BufferedReader(new InputStreamReader(new GZIPInputStream(object.getObjectContent()))); zips++; // Read the lines from the unzipped file, break it and send to Mixpanel while ((line = br.readLine()) != null) { if (line.startsWith("#")) continue; if (line.trim().equals("")) continue; String[] values = line.split("\\s"); String eventTime = values[0] + " " + values[1]; ip = values[4]; if (ip != prevIP) { prevIP = ip; index = ipl.ipInList(ip, rList); if (index >= 0) { r = rList.get(index); registrant = r.name; // Update counter for this IP r.counter = r.counter + 1; rList.set(index, r); } else { // WHOIS - Check registrant of this IP address registrant = w.whoisIP(ip); // if there was an error, try again if (registrant.equals("ERROR")) registrant = w.whoisIP(ip); // if there was a second error, add it to errors list if (registrant.equals("ERROR")) { eList.add(new Registrant(ip, registrant, 1)); } else { // If name includes a comma, exclude the comma registrant = registrant.replace(",", ""); rList.add(new Registrant(ip, registrant, 1)); } } } String method = values[5]; String fileName = values[7]; String statusCode = values[8]; String userAgent = values[10]; String fName = fileName; if (fileName.contains("gigaspaces-")) { begin = fileName.lastIndexOf("gigaspaces-") + 11; fName = fileName.substring(begin, fileName.length()); } eventsNumber++; System.out.println(eventsNumber + ": " + eventTime + " " + ip + " " + registrant); // ==================================================== // Track the event in Mixpanel (using the POST import) // ==================================================== mixpanelStatus = mix.postCDNEventToMixpanel(API_KEY, TOKEN, ip, "Cloudfront CDN", eventTime, method, fileName, fName, userAgent, statusCode, registrant); } // while on ZIP file lines if (mixpanelStatus == 1 & DELETE_PROCESSED_LOGS.equals("YES")) { // Delete the CDN log ZIP file s3Client.deleteObject(bucketName, key); System.out.println("========= Deleted Zip " + zips + " ===== List Size " + rList.size() + " =========="); deletedZips++; } } catch (IOException e) { e.printStackTrace(); return eventsNumber; } finally { if (br != null) { br.close(); } if (eventsNumber >= n) { System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files. Deleted " + deletedZips + " Zip files.\n"); ipl.printList(rList, 100); ipl.saveList(rList, IPfile); if (!eList.isEmpty()) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm"); String fName = ERRfile + sdf.format(new Date()) + ".txt"; System.out.println("\n>>> " + eList.size() + " DomainTools errors:"); errl.saveList(eList, fName); } else System.out.println("\n>>> No DomainTools errors"); return eventsNumber; } } } // for (continue to next ZIP file // If there are more ZIP files, read next batch of 1000 if (objectListing.isTruncated()) { nextObjectListing = s3Client.listNextBatchOfObjects(objectListing); objectListing = nextObjectListing; } else more = false; // no more files } // while next objectListing System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files. Deleted " + deletedZips + " Zip files.\n"); ipl.printList(rList, 50); ipl.saveList(rList, IPfile); if (!eList.isEmpty()) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm"); String fName = ERRfile + sdf.format(new Date()) + ".txt"; System.out.println("\n>>> " + eList.size() + " DomainTools errors:"); errl.saveList(eList, fName); } else System.out.println("\n>>> No DomainTools errors"); return eventsNumber; }
From source file:dashboard.ImportCDN.java
License:Open Source License
public static int readAmazonLogs(int n) throws Exception { if (n < 1) return 0; int eventsNumber = 0; int begin = 0; int zips = 0; int postFailures = 0; int deletedZips = 0; int mixpanelStatus = 0; String line = null;//from w ww.java 2 s . co m // Log files Bucket AWSCredentials credentials = new BasicAWSCredentials(AWS_USER, AWS_PASS); AmazonS3Client s3Client = new AmazonS3Client(credentials); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); // Set MARKER - from which Log File to start reading // listObjectsRequest.setMarker("E2DXXJR0N8BXOK.2013-03-18-10.ICK6IvaY.gz"); BufferedReader br = null; ObjectListing objectListing = s3Client.listObjects(listObjectsRequest); ObjectListing nextObjectListing = objectListing; zips = 0; Boolean more = true; if (objectListing == null) more = false; while (more) { // Reads 1000 files for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { // Handle ZIP files try { // Open and send to mixpanel events of one ZIP file String key = objectSummary.getKey(); S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, key)); // Extract ZIP and read Object to reader br = new BufferedReader(new InputStreamReader(new GZIPInputStream(object.getObjectContent()))); zips++; // Read the lines from the unzipped file, break it and send to Mixpanel while ((line = br.readLine()) != null) { if (line.startsWith("#")) continue; if (line.trim().equals("")) continue; String[] values = line.split("\\s"); String eventTime = values[0] + " " + values[1]; String ip = values[4]; String method = values[5]; String fileName = values[7]; String statusCode = values[8]; String userAgent = values[10]; String fName = fileName; if (fileName.contains("gigaspaces-")) { begin = fileName.lastIndexOf("gigaspaces-") + 11; fName = fileName.substring(begin, fileName.length()); } eventsNumber++; System.out.println(eventsNumber + ": " + eventTime + " " + ip); // Track the event in Mixpanel (using the POST import) mixpanelStatus = postCDNEventToMixpanel(ip, "Cloudfront CDN", eventTime, method, fileName, fName, userAgent, statusCode); // If failed if (mixpanelStatus != 1) { postFailures++; System.out.println(" >>> POST event to Mixpanel Failed!! " + postFailures); } } // while on ZIP file lines if (mixpanelStatus == 1 & DELETE_PROCESSED_LOGS.equals("YES")) { // Delete the CDN log ZIP file s3Client.deleteObject(bucketName, key); System.out.println("============ Deleted Zip " + zips + " ============"); deletedZips++; } else System.out.println("============ Zip " + zips + " (not deleted) ============"); } catch (IOException e) { e.printStackTrace(); return eventsNumber; } finally { if (br != null) { br.close(); } if (eventsNumber >= n) { System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files."); System.out.println("\n>>> " + deletedZips + " Zip files deleted."); System.out.println("\n>>> " + postFailures + " post Failures\n"); return eventsNumber; } } } // for (continue to next ZIP file // If there are more ZIP files, read next batch of 1000 if (objectListing.isTruncated()) { nextObjectListing = s3Client.listNextBatchOfObjects(objectListing); objectListing = nextObjectListing; } else more = false; // no more files } // while next objectListing System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files."); System.out.println("\n>>> " + deletedZips + " Zip files deleted."); System.out.println("\n>>> " + postFailures + " post Failures\n"); return eventsNumber; }
From source file:n3phele.storage.s3.CloudStorageImpl.java
License:Open Source License
public boolean deleteFile(Repository repo, String filename) { boolean result = false; Credential credential = repo.getCredential().decrypt(); AmazonS3Client s3 = new AmazonS3Client( new BasicAWSCredentials(credential.getAccount(), credential.getSecret())); s3.setEndpoint(repo.getTarget().toString()); try {/* w ww.j a v a 2s . com*/ s3.deleteObject(repo.getRoot(), filename); result = true; } catch (AmazonServiceException e) { log.log(Level.WARNING, "Service Error processing " + repo, e); } catch (AmazonClientException e) { log.log(Level.SEVERE, "Client Error processing " + repo, e); } return result; }