Example usage for com.amazonaws.services.s3 AmazonS3Client putObject

List of usage examples for com.amazonaws.services.s3 AmazonS3Client putObject

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3Client putObject.

Prototype

@Override
    public PutObjectResult putObject(String bucketName, String key, String content)
            throws AmazonServiceException, SdkClientException 

Source Link

Usage

From source file:awslabs.lab51.SolutionCode.java

License:Open Source License

@Override
public void addImage(AmazonDynamoDBClient dynamoDbClient, String tableName, AmazonS3Client s3Client,
        String bucketName, String imageKey, String filePath) {

    try {/*from   w  w w .  jav  a  2s.  co m*/
        File file = new File(filePath);
        if (file.exists()) {
            s3Client.putObject(bucketName, imageKey, file);

            PutItemRequest putItemRequest = new PutItemRequest().withTableName(tableName);
            putItemRequest.addItemEntry("Key", new AttributeValue(imageKey));
            putItemRequest.addItemEntry("Bucket", new AttributeValue(bucketName));
            dynamoDbClient.putItem(putItemRequest);
            labController.logMessageToPage("Added imageKey: " + imageKey);
        } else {
            labController.logMessageToPage(
                    "Image doesn't exist on disk. Skipped: " + imageKey + "[" + filePath + "]");
        }
    } catch (Exception ex) {
        labController.logMessageToPage("addImage Error: " + ex.getMessage());
    }

}

From source file:com.adobe.people.jedelson.rugsinlambda.GenerateHandler.java

License:Apache License

@Override
protected GenerationResultDTO handleRequest(GenerationRequestDTO input, Context context, Rugs rugs) {
    String generatorName = (String) input.getGeneratorName();
    log.info("Using {} as generator name from {}.", generatorName, input);

    Optional<ProjectGenerator> opt = asJavaCollection(rugs.generators()).stream()
            .filter(g -> g.name().equals(input.getGeneratorName())).findFirst();
    if (opt.isPresent()) {
        ProjectGenerator generator = opt.get();
        ParameterValues paramValues = input.toParameterValues();
        if (!generator.areValid(paramValues)) {
            GenerationResultDTO result = new GenerationResultDTO(false);
            asJavaCollection(generator.findInvalidParameterValues(paramValues)).forEach(p -> {
                result.addInvalidParameter(p);
            });/* w  w  w .j a  v a 2  s .c  o  m*/
            asJavaCollection(generator.findMissingParameters(paramValues)).forEach(p -> {
                result.addMissingParameter(p);
            });
            return result;
        } else {
            String projectName = input.getParams().get("project_name");
            TempProjectManagement tpm = new TempProjectManagement(context.getAwsRequestId());
            tpm.generate(generator, paramValues, projectName);

            GenerationResultDTO result = new GenerationResultDTO(true);

            for (EditRequestDTO edit : input.getEditors()) {
                String editorName = edit.getName();
                log.info("Editing with {} using params {}.", editorName, edit.getParams());
                Optional<ProjectEditor> editorOpt = asJavaCollection(rugs.editors()).stream()
                        .filter(g -> g.name().equals(editorName)).findFirst();
                if (editorOpt.isPresent()) {
                    ProjectEditor editor = editorOpt.get();
                    ParameterValues editorParams = edit.toParameterValues(input.getParams());
                    if (!editor.areValid(editorParams)) {
                        asJavaCollection(generator.findInvalidParameterValues(paramValues)).forEach(p -> {
                            result.addInvalidParameter(editorName, p);
                        });
                        asJavaCollection(generator.findMissingParameters(paramValues)).forEach(p -> {
                            result.addMissingParameter(editorName, p);
                        });
                    } else {
                        tpm.edit(editor, editorParams, projectName);
                    }
                }
            }

            File zipFile = tpm.createZipFile();
            log.info("zip file is at {} length is {}.", zipFile.getAbsolutePath(), zipFile.length());

            AmazonS3Client s3Client = new AmazonS3Client();
            String keyName = context.getAwsRequestId() + "/project.zip";
            s3Client.putObject(BUCKET_NAME, keyName, zipFile);

            Date expiration = new Date();
            long msec = expiration.getTime();
            msec += 1000 * 60 * 60; // 1 hour.
            expiration.setTime(msec);

            GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(
                    BUCKET_NAME, keyName);
            generatePresignedUrlRequest.setMethod(HttpMethod.GET);
            generatePresignedUrlRequest.setExpiration(expiration);

            URL presignedUrl = s3Client.generatePresignedUrl(generatePresignedUrlRequest);

            result.setUrl(presignedUrl.toString());
            return result;
        }
    } else {
        throw new NoSuchGeneratorException(input.getGeneratorName());
    }
}

From source file:com.cleanenergyexperts.aws.cf.CloudFormationMojo.java

License:Apache License

public void execute() throws MojoExecutionException {
    getLog().info("Bucket Name: " + bucketName);
    //getLog().info("Cloud Formation Stack Name: " + stackName);

    if (artifactFile == null || !artifactFile.isFile()) {
        throw new MojoExecutionException("Cannot find artifact file to upload");
    }/*from  ww w .j a  v  a 2  s.co  m*/
    String artifactKey = artifactFile.getName();
    getLog().info("Artifact Name: " + artifactKey);

    BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonCloudFormationClient cfClient = new AmazonCloudFormationClient(awsCredentials);
    cfClient.setEndpoint(getCloudFormationEndPoint());
    AmazonS3Client s3Client = new AmazonS3Client(awsCredentials);

    // Upload Artifact to S3
    try {
        getLog().info("Uploading artifact to S3...");
        s3Client.putObject(bucketName, artifactKey, artifactFile);
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("[SERVICE] Could Not Upload File to S3", e);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("[CLIENT] Could Not Upload File to S3", e);
    }

    // Update each stack with the new artifact file
    for (String stackName : stackNames) {
        getLog().info("Cloud Formation Stack Name: " + stackName);
        String templateBody = getTemplateBody(cfClient, stackName);
        Stack stack = getStack(cfClient, stackName);

        // If passed additional parameters, update them
        List<Parameter> parameters = stack.getParameters();
        if (stackParameters != null && !stackParameters.isEmpty()) {
            List<Parameter> tmpParams = new ArrayList<Parameter>();

            // Add Existing Parameters we haven't locally overwritten
            for (Parameter oldParam : parameters) {
                String oldKey = oldParam.getParameterKey();
                if (!stackParameters.containsKey(oldKey)) {
                    tmpParams.add(oldParam);
                }
            }

            // Add Overwrite parameters
            for (String key : stackParameters.keySet()) {
                Parameter newParam = new Parameter();
                newParam.setParameterKey(key);
                newParam.setParameterValue(stackParameters.get(key));
                tmpParams.add(newParam);
            }
            parameters = tmpParams;
        }

        // Update the Stack
        UpdateStackRequest updateStackRequest = new UpdateStackRequest();
        updateStackRequest.setStackName(stackName);
        updateStackRequest.setTemplateBody(templateBody);
        updateStackRequest.setParameters(parameters);
        updateStackRequest.setCapabilities(stack.getCapabilities());
        try {
            getLog().info("Updating Cloud Formation Stack...");
            cfClient.updateStack(updateStackRequest);
        } catch (AmazonServiceException e) {
            throw new MojoExecutionException("[SERVICE] Could Not Update Cloud Formation Stack", e);
        } catch (AmazonClientException e) {
            throw new MojoExecutionException("[CLIENT] Could Not Update Cloud Formation Stack", e);
        }
        getLog().info("Cloud Formation Stack " + stackName + "is now updating...");
    }

    getLog().info("All stacks have been updated. Complete.");
}

From source file:com.netflix.ice.basic.MapDb.java

License:Apache License

void upload() {
    AmazonS3Client s3Client = AwsUtils.getAmazonS3Client();

    File dir = new File(config.localDir);
    File[] files = dir.listFiles(new FilenameFilter() {
        public boolean accept(File file, String fileName) {
            return fileName.startsWith(dbName);
        }//from  ww w. j  a v  a2  s  . co  m
    });
    for (File file : files)
        s3Client.putObject(config.workS3BucketName, config.workS3BucketPrefix + file.getName(), file);

    for (File file : files)
        s3Client.putObject(config.workS3BucketName, config.workS3BucketPrefix + "copy" + file.getName(), file);
}

From source file:com.tango.BucketSyncer.S32GCSTestFile.java

License:Apache License

public static S32GCSTestFile create(String key, AmazonS3Client client, List<StorageAsset> stuffToCleanup,
        Copy copy, Clean clean) throws Exception {
    S32GCSTestFile s32GCSTestFile = new S32GCSTestFile();
    switch (clean) {
    case SOURCE://from ww w  .  ja  v  a  2s  .  c o  m
        stuffToCleanup.add(new StorageAsset(S32GCSMirrorTest.SOURCE, key));
        break;
    case DEST:
        stuffToCleanup.add(new StorageAsset(S32GCSMirrorTest.DESTINATION, key));
        break;
    case SOURCE_AND_DEST:
        stuffToCleanup.add(new StorageAsset(S32GCSMirrorTest.SOURCE, key));
        stuffToCleanup.add(new StorageAsset(S32GCSMirrorTest.DESTINATION, key));
        break;
    }
    switch (copy) {
    case SOURCE:
        client.putObject(S32GCSMirrorTest.SOURCE, key, s32GCSTestFile.file);
        break;
    case DEST:
        client.putObject(S32GCSMirrorTest.DESTINATION, key, s32GCSTestFile.file);
        break;
    case SOURCE_AND_DEST:
        client.putObject(S32GCSMirrorTest.SOURCE, key, s32GCSTestFile.file);
        client.putObject(S32GCSMirrorTest.DESTINATION, key, s32GCSTestFile.file);
        break;
    }
    return s32GCSTestFile;
}

From source file:com.tango.BucketSyncer.S32S3TestFile.java

License:Apache License

public static S32S3TestFile create(String key, AmazonS3Client client, List<StorageAsset> stuffToCleanup,
        Copy copy, Clean clean) throws Exception {
    S32S3TestFile s32S3TestFile = new S32S3TestFile();
    switch (clean) {
    case SOURCE://from   w w  w  . j  a  v a 2 s  .c o  m
        stuffToCleanup.add(new StorageAsset(S32S3MirrorTest.SOURCE, key));
        break;
    case DEST:
        stuffToCleanup.add(new StorageAsset(S32S3MirrorTest.DESTINATION, key));
        break;
    case SOURCE_AND_DEST:
        stuffToCleanup.add(new StorageAsset(S32S3MirrorTest.SOURCE, key));
        stuffToCleanup.add(new StorageAsset(S32S3MirrorTest.DESTINATION, key));
        break;
    }
    switch (copy) {
    case SOURCE:
        client.putObject(S32S3MirrorTest.SOURCE, key, s32S3TestFile.file);
        break;
    case DEST:
        client.putObject(S32S3MirrorTest.DESTINATION, key, s32S3TestFile.file);
        break;
    case SOURCE_AND_DEST:
        client.putObject(S32S3MirrorTest.SOURCE, key, s32S3TestFile.file);
        client.putObject(S32S3MirrorTest.DESTINATION, key, s32S3TestFile.file);
        break;
    }
    return s32S3TestFile;
}

From source file:de.fischer.thotti.s3.clients.S3FileUploader.java

License:Apache License

private void putObjectInternal(String bucket, File file) {
    boolean keyAlreadyInUse = isKeyKnownInBucket(file.getName());

    if (!keyAlreadyInUse) {
        AmazonS3Client client = getAmazonS3Client();

        PutObjectResult result = client.putObject(bucket, file.getName(), file);
        getLoggingSupport().info("Uploaded %s to S3 as %s/%s.", file.getName(), bucket, file.getName());
    } else {/*from w ww  .j ava2s. c o m*/
        getLoggingSupport().warn("Key %s is already in use for bucket %s.", file.getName(), bucket);
    }
}

From source file:org.apache.usergrid.tools.WarehouseExport.java

License:Apache License

private void copyToS3(String fileName) {

    String bucketName = (String) properties.get(BUCKET_PROPNAME);
    String accessId = (String) properties.get(ACCESS_ID_PROPNAME);
    String secretKey = (String) properties.get(SECRET_KEY_PROPNAME);

    Properties overrides = new Properties();
    overrides.setProperty("s3" + ".identity", accessId);
    overrides.setProperty("s3" + ".credential", secretKey);

    final Iterable<? extends Module> MODULES = ImmutableSet.of(new JavaUrlHttpCommandExecutorServiceModule(),
            new Log4JLoggingModule(), new NettyPayloadModule());

    AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);

    s3Client.createBucket(bucketName);/*from   www. j a  v a2  s  .c  o  m*/
    File uploadFile = new File(fileName);
    PutObjectResult putObjectResult = s3Client.putObject(bucketName, uploadFile.getName(), uploadFile);
    logger.info("Uploaded file etag={}", putObjectResult.getETag());
}