Example usage for com.amazonaws.services.s3 AmazonS3 deleteObject

List of usage examples for com.amazonaws.services.s3 AmazonS3 deleteObject

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 deleteObject.

Prototype

public void deleteObject(String bucketName, String key) throws SdkClientException, AmazonServiceException;

Source Link

Document

Deletes the specified object in the specified bucket.

Usage

From source file:jp.classmethod.aws.gradle.s3.AmazonS3DeleteAllFilesTask.java

License:Apache License

@TaskAction
public void delete() {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String prefix = getPrefix();//from w  w  w. j  a v  a 2s.  co  m

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    if (prefix.startsWith("/")) {
        prefix = prefix.substring(1);
    }

    getLogger().info("Delete s3://{}/{}*", bucketName, prefix);

    List<S3ObjectSummary> objectSummaries;
    while ((objectSummaries = s3.listObjects(bucketName, prefix).getObjectSummaries()).isEmpty() == false) {
        objectSummaries.forEach(os -> {
            getLogger().info("  Deleting... s3://{}/{}", bucketName, os.getKey());
            s3.deleteObject(bucketName, os.getKey());
        });
    }
}

From source file:jp.classmethod.aws.gradle.s3.AmazonS3FileDeleteTask.java

License:Apache License

@TaskAction
public void delete() {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();/*from   w  w  w.j av  a2 s  .  com*/

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    getLogger().info("deleting... " + bucketName + "/" + key);
    s3.deleteObject(bucketName, key);
}

From source file:jp.classmethod.aws.gradle.s3.DeleteBucketTask.java

License:Apache License

@TaskAction
public void deleteBucket() {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    boolean ifExists = isIfExists();

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }/*from   w w  w  . j  a  v a 2  s  .  co m*/

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    if (ifExists == false || exists(s3)) {
        if (deleteObjects) {
            getLogger().info("Delete all S3 objects in bucket [{}]", bucketName);
            ObjectListing objectListing = s3.listObjects(bucketName);
            while (objectListing.getObjectSummaries().isEmpty() == false) {
                objectListing.getObjectSummaries().forEach(summary -> {
                    getLogger().info(" => delete s3://{}/{}", bucketName, summary.getKey());
                    s3.deleteObject(bucketName, summary.getKey());
                });
                objectListing = s3.listNextBatchOfObjects(objectListing);
            }
        }
        s3.deleteBucket(bucketName);
        getLogger().info("S3 bucket {} is deleted", bucketName);
    } else {
        getLogger().debug("S3 bucket {} does not exist", bucketName);
    }
}

From source file:jp.classmethod.aws.gradle.s3.SyncTask.java

License:Apache License

private void deleteAbsent(AmazonS3 s3, String prefix) {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String pathPrefix = getNormalizedPathPrefix();

    s3.listObjects(bucketName, prefix).getObjectSummaries().forEach(os -> {
        File f = getProject().file(pathPrefix + os.getKey().substring(prefix.length()));
        if (f.exists() == false) {
            getLogger().info("deleting... s3://{}/{}", bucketName, os.getKey());
            s3.deleteObject(bucketName, os.getKey());
        }/*  w w w .j  a v  a 2 s . c o  m*/
    });
}

From source file:org.apache.jackrabbit.aws.ext.Utils.java

License:Apache License

/**
 * Delete S3 bucket. This method first deletes all objects from bucket and
 * then delete empty bucket./*from   w w w.j  a  v a  2s .  com*/
 * 
 * @param bucketName the bucket name.
 */
public static void deleteBucket(final String bucketName) throws IOException {
    Properties prop = readConfig(DEFAULT_CONFIG_FILE);
    AmazonS3 s3service = openService(prop);
    ObjectListing prevObjectListing = s3service.listObjects(bucketName);
    while (true) {
        for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
            s3service.deleteObject(bucketName, s3ObjSumm.getKey());
        }
        if (!prevObjectListing.isTruncated()) {
            break;
        }
        prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
    }
    s3service.deleteBucket(bucketName);
}

From source file:org.boriken.s3fileuploader.S3SampleRefactored.java

License:Open Source License

public static void deleteFile(AmazonS3 s3, String bucketName, String key) {
    /*//from ww w. j av a 2  s .  com
     * Delete an object - Unless versioning has been turned on for your bucket,
     * there is no way to undelete an object, so use caution when deleting objects.
     */
    System.out.println("Deleting an object\n");
    s3.deleteObject(bucketName, key);
}

From source file:org.cto.VVS3Box.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from ww  w  . jav a  2s.  co m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "lior.test-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:org.huahinframework.emanager.util.S3Utils.java

License:Apache License

/**
 * @param s3//from   ww w. j  a v a 2 s.  c om
 * @param path
 * @throws URISyntaxException
 */
public static void delete(AmazonS3 s3, String path) throws URISyntaxException {
    URI uri = new URI(path);
    String key = uri.getPath().substring(1);
    s3.deleteObject(uri.getHost(), key);
}

From source file:org.jumpmind.symmetric.io.RedshiftBulkDatabaseWriter.java

License:Open Source License

protected void flush() {
    if (loadedRows > 0) {
        stagedInputFile.close();/*from w  w w.  j  a v a  2 s  .co  m*/
        statistics.get(batch).startTimer(DataWriterStatisticConstants.DATABASEMILLIS);
        AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
        if (isNotBlank(s3Endpoint)) {
            s3client.setEndpoint(s3Endpoint);
        }
        String objectKey = stagedInputFile.getFile().getName();
        try {
            s3client.putObject(bucket, objectKey, stagedInputFile.getFile());
        } catch (AmazonServiceException ase) {
            log.error("Exception from AWS service: " + ase.getMessage());
        } catch (AmazonClientException ace) {
            log.error("Exception from AWS client: " + ace.getMessage());
        }

        try {
            JdbcSqlTransaction jdbcTransaction = (JdbcSqlTransaction) transaction;
            Connection c = jdbcTransaction.getConnection();
            String sql = "COPY " + getTargetTable().getFullyQualifiedTableName() + " ("
                    + Table.getCommaDeliminatedColumns(table.getColumns()) + ") FROM 's3://" + bucket + "/"
                    + objectKey + "' CREDENTIALS 'aws_access_key_id=" + accessKey + ";aws_secret_access_key="
                    + secretKey + "' CSV DATEFORMAT 'YYYY-MM-DD HH:MI:SS' "
                    + (needsExplicitIds ? "EXPLICIT_IDS" : "")
                    + (isNotBlank(appendToCopyCommand) ? (" " + appendToCopyCommand) : "");
            Statement stmt = c.createStatement();

            log.debug(sql);
            stmt.execute(sql);
            stmt.close();
            transaction.commit();
        } catch (SQLException ex) {
            throw platform.getSqlTemplate().translate(ex);
        } finally {
            statistics.get(batch).stopTimer(DataWriterStatisticConstants.DATABASEMILLIS);
        }

        stagedInputFile.delete();
        try {
            s3client.deleteObject(bucket, objectKey);
        } catch (AmazonServiceException ase) {
            log.error("Exception from AWS service: " + ase.getMessage());
        } catch (AmazonClientException ace) {
            log.error("Exception from AWS client: " + ace.getMessage());
        }

        createStagingFile();
        loadedRows = 0;
        loadedBytes = 0;
    }
}

From source file:org.pentaho.amazon.emr.job.AmazonElasticMapReduceJobExecutor.java

License:Apache License

public Result execute(Result result, int arg1) throws KettleException {
    Log4jFileAppender appender = null;//from w w w .  j  av  a  2s .  c o m
    String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$
    try {
        appender = LogWriter.createFileAppender(logFileName, true, false);
        LogWriter.getInstance().addAppender(appender);
        log.setLogLevel(parentJob.getLogLevel());
    } catch (Exception e) {
        logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$
                logFileName, e.toString()));
        logError(Const.getStackTracker(e));
    }

    try {
        // create/connect aws service
        AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials);

        // pull down jar from vfs
        FileObject jarFile = KettleVFS.getFileObject(buildFilename(jarUrl));
        File tmpFile = File.createTempFile("customEMR", "jar");
        tmpFile.deleteOnExit();
        FileOutputStream tmpFileOut = new FileOutputStream(tmpFile);
        IOUtils.copy(jarFile.getContent().getInputStream(), tmpFileOut);
        URL localJarUrl = tmpFile.toURI().toURL();

        // find main class in jar
        String mainClass = getMainClass(localJarUrl);

        // create staging bucket
        AmazonS3 s3Client = new AmazonS3Client(awsCredentials);

        FileSystemOptions opts = new FileSystemOptions();
        DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, new StaticUserAuthenticator(
                null, awsCredentials.getAWSAccessKeyId(), awsCredentials.getAWSSecretKey()));
        FileObject stagingDirFileObject = KettleVFS.getFileObject(stagingDir, getVariables(), opts);

        String stagingBucketName = stagingDirFileObject.getName().getBaseName();
        if (!s3Client.doesBucketExist(stagingBucketName)) {
            s3Client.createBucket(stagingBucketName);
        }

        // delete old jar if needed
        try {
            s3Client.deleteObject(stagingBucketName, jarFile.getName().getBaseName());
        } catch (Exception ex) {
            logError(Const.getStackTracker(ex));
        }

        // put jar in s3 staging bucket
        s3Client.putObject(new PutObjectRequest(stagingBucketName, jarFile.getName().getBaseName(), tmpFile));
        // create non-vfs s3 url to jar
        String stagingS3JarUrl = "s3://" + stagingBucketName + "/" + jarFile.getName().getBaseName();
        String stagingS3BucketUrl = "s3://" + stagingBucketName;

        RunJobFlowRequest runJobFlowRequest = null;
        RunJobFlowResult runJobFlowResult = null;
        if (StringUtil.isEmpty(hadoopJobFlowId)) {
            // create EMR job flow
            runJobFlowRequest = createJobFlow(stagingS3BucketUrl, stagingS3JarUrl, mainClass);
            // start EMR job
            runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest);
        } else {
            List<String> jarStepArgs = new ArrayList<String>();
            if (!StringUtil.isEmpty(cmdLineArgs)) {
                StringTokenizer st = new StringTokenizer(cmdLineArgs, " ");
                while (st.hasMoreTokens()) {
                    String token = st.nextToken();
                    logBasic("adding args: " + token);
                    jarStepArgs.add(token);
                }
            }

            HadoopJarStepConfig hadoopJarStep = new HadoopJarStepConfig();
            hadoopJarStep.setJar(stagingS3JarUrl);
            hadoopJarStep.setMainClass(mainClass);
            hadoopJarStep.setArgs(jarStepArgs);

            StepConfig stepConfig = new StepConfig();
            stepConfig.setName("custom jar: " + jarUrl);
            stepConfig.setHadoopJarStep(hadoopJarStep);

            List<StepConfig> steps = new ArrayList<StepConfig>();
            steps.add(stepConfig);

            AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest();
            addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId);
            addJobFlowStepsRequest.setSteps(steps);

            emrClient.addJobFlowSteps(addJobFlowStepsRequest);
        }

        String loggingIntervalS = environmentSubstitute(loggingInterval);
        int logIntv = 60;
        try {
            logIntv = Integer.parseInt(loggingIntervalS);
        } catch (NumberFormatException ex) {
            logError("Unable to parse logging interval '" + loggingIntervalS + "' - using " + "default of 60");
        }

        // monitor it / blocking / logging if desired
        if (blocking) {
            try {
                if (log.isBasic()) {

                    String executionState = "RUNNING";

                    List<String> jobFlowIds = new ArrayList<String>();
                    String id = hadoopJobFlowId;
                    if (StringUtil.isEmpty(hadoopJobFlowId)) {
                        id = runJobFlowResult.getJobFlowId();
                        jobFlowIds.add(id);
                    }

                    while (isRunning(executionState)) {
                        DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest();
                        describeJobFlowsRequest.setJobFlowIds(jobFlowIds);

                        DescribeJobFlowsResult describeJobFlowsResult = emrClient
                                .describeJobFlows(describeJobFlowsRequest);
                        boolean found = false;
                        for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
                            if (jobFlowDetail.getJobFlowId().equals(id)) {
                                executionState = jobFlowDetail.getExecutionStatusDetail().getState();
                                found = true;
                            }
                        }

                        if (!found) {
                            break;
                        }
                        // logBasic(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.RunningPercent", setupPercent,
                        // mapPercent, reducePercent));
                        logBasic(hadoopJobName + " execution status: " + executionState);
                        try {
                            if (isRunning(executionState)) {
                                Thread.sleep(logIntv * 1000);
                            }
                        } catch (InterruptedException ie) {
                            // Ignore
                        }
                    }

                    if ("FAILED".equalsIgnoreCase(executionState)) {
                        result.setStopped(true);
                        result.setNrErrors(1);
                        result.setResult(false);

                        S3Object outObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stdout");
                        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
                        IOUtils.copy(outObject.getObjectContent(), outStream);
                        logError(outStream.toString());

                        S3Object errorObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stderr");
                        ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
                        IOUtils.copy(errorObject.getObjectContent(), errorStream);
                        logError(errorStream.toString());
                    }
                }
            } catch (Exception e) {
                logError(e.getMessage(), e);
            }
        }

    } catch (Throwable t) {
        t.printStackTrace();
        result.setStopped(true);
        result.setNrErrors(1);
        result.setResult(false);
        logError(t.getMessage(), t);
    }

    if (appender != null) {
        LogWriter.getInstance().removeAppender(appender);
        appender.close();

        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(),
                parentJob.getJobname(), getName());
        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
    }

    return result;
}