List of usage examples for com.amazonaws.services.s3.model PutObjectRequest withMetadata
@Override @SuppressWarnings("unchecked") public PutObjectRequest withMetadata(ObjectMetadata metadata)
From source file:com.tvarit.plugin.TvaritTomcatDeployerMojo.java
License:Open Source License
@Override public void execute() throws MojoExecutionException, MojoFailureException { final MavenProject project = (MavenProject) this.getPluginContext().getOrDefault("project", null); if (templateUrl == null) try {/*from w w w.j a v a 2 s .co m*/ templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString(); } catch (MalformedURLException e) { throw new MojoExecutionException( "Could not create default url for templates. Please open an issue on github.", e); } final BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3Client s3Client = new AmazonS3Client(awsCredentials); final File warFile = project.getArtifact().getFile(); final String key = "deployables/" + project.getGroupId() + "/" + project.getArtifactId() + "/" + project.getVersion() + "/" + warFile.getName(); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, warFile); final ObjectMetadata metadata = new ObjectMetadata(); final HashMap<String, String> userMetadata = new HashMap<>(); userMetadata.put("project_name", projectName); userMetadata.put("stack_template_url", templateUrl); userMetadata.put("private_key_name", sshKeyName); metadata.setUserMetadata(userMetadata); putObjectRequest.withMetadata(metadata); final PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest); /* AmazonCloudFormationClient amazonCloudFormationClient = new AmazonCloudFormationClient(awsCredentials); final com.amazonaws.services.cloudformation.model.Parameter projectNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("projectName").withParameterValue(this.projectName); final com.amazonaws.services.cloudformation.model.Parameter publicSubnetsParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("publicSubnets").withParameterValue(commaSeparatedSubnetIds); final com.amazonaws.services.cloudformation.model.Parameter tvaritRoleParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritRole").withParameterValue(tvaritRole); final com.amazonaws.services.cloudformation.model.Parameter tvaritInstanceProfileParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritInstanceProfile").withParameterValue(this.tvaritInstanceProfile); final com.amazonaws.services.cloudformation.model.Parameter tvaritBucketNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("bucketName").withParameterValue(this.bucketName); final com.amazonaws.services.cloudformation.model.Parameter instanceSecurityGroupIdParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("sgId").withParameterValue(this.instanceSecurityGroupId); final com.amazonaws.services.cloudformation.model.Parameter sshKeyNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("keyName").withParameterValue(this.sshKeyName); final String warFileUrl = s3Client.getUrl(bucketName, key).toString(); final com.amazonaws.services.cloudformation.model.Parameter warFileUrlParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("warFileUrl").withParameterValue(warFileUrl); final CreateStackRequest createStackRequest = new CreateStackRequest(); if (templateUrl == null) { try { templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString(); } catch (MalformedURLException e) { throw new MojoExecutionException("Could not create default url for templates. Please open an issue on github.", e); } } createStackRequest. withStackName(projectName + "-instance-" + project.getVersion().replace(".", "-")). withParameters( projectNameParameter, publicSubnetsParameter, tvaritInstanceProfileParameter, tvaritRoleParameter, tvaritBucketNameParameter, instanceSecurityGroupIdParameter, warFileUrlParameter, sshKeyNameParameter ). withDisableRollback(true). withTemplateURL(templateUrl); createStackRequest.withDisableRollback(true); final Stack stack = new StackMaker().makeStack(createStackRequest, amazonCloudFormationClient, getLog()); AmazonAutoScalingClient amazonAutoScalingClient = new AmazonAutoScalingClient(awsCredentials); final AttachInstancesRequest attachInstancesRequest = new AttachInstancesRequest(); attachInstancesRequest.withInstanceIds(stack.getOutputs().get(0).getOutputValue(), stack.getOutputs().get(1).getOutputValue()).withAutoScalingGroupName(autoScalingGroupName); amazonAutoScalingClient.attachInstances(attachInstancesRequest); */ }
From source file:io.druid.storage.s3.S3ServerSideEncryption.java
License:Apache License
@Override public PutObjectRequest decorate(PutObjectRequest request) { final ObjectMetadata objectMetadata = request.getMetadata() == null ? new ObjectMetadata() : request.getMetadata().clone(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); return request.withMetadata(objectMetadata); }
From source file:io.fastup.maven.plugin.app.DeployPutRequestMaker.java
License:Open Source License
PutObjectRequest makePutRequest() throws MojoExecutionException { final TvaritEnvironment tvaritEnvironment = TvaritEnvironment.getInstance(); tvaritEnvironment.<AppDeployerMojo>getMojo().getArtifactBucketName(); final File warFile = tvaritEnvironment.getMavenProject().getArtifact().getFile(); String projectArtifactId = tvaritEnvironment.getMavenProject().getArtifactId(); String projectVersion = tvaritEnvironment.getMavenProject().getVersion(); final String projectGroupId = tvaritEnvironment.getMavenProject().getGroupId(); final String key = "deployables/" + projectGroupId + "/" + projectArtifactId + "/" + projectVersion + "/" + warFile.getName();/* ww w .j ava 2 s .co m*/ final String bucketName = tvaritEnvironment.getArtifactBucketName(); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, warFile); final ObjectMetadata metadata = new ObjectMetadata(); final Map<String, String> userMetadata = new HashMap<>(); userMetadata.put("project_name", tvaritEnvironment.getProjectName()); userMetadata.put("health_check_url", tvaritEnvironment.<AppDeployerMojo>getMojo().getHealthCheckUrl()); userMetadata.put("private_key_name", tvaritEnvironment.<AppDeployerMojo>getMojo().getSshKeyName()); userMetadata.put("db-version", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbVersion()); userMetadata.put("group-id", tvaritEnvironment.getMavenProject().getGroupId()); userMetadata.put("artifact-id", tvaritEnvironment.getMavenProject().getArtifactId()); userMetadata.put("version", tvaritEnvironment.getMavenProject().getVersion()); userMetadata.put("app_fqdn", tvaritEnvironment.<AppDeployerMojo>getMojo().getAppFqdn()); userMetadata.put("db-name", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbName()); userMetadata.put("db-username", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbUsername()); userMetadata.put("db-password", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbPassword()); final String contextConfigUrl = tvaritEnvironment.<AppDeployerMojo>getMojo().getContextConfigUrl(); final URL url; try { url = new TemplateUrlMaker().makeUrl(contextConfigUrl); } catch (MalformedURLException e) { throw new MojoExecutionException("failed", e); } userMetadata.put("context_config_url", url.toString()); final String contextRoot = tvaritEnvironment.<AppDeployerMojo>getMojo().getContextRoot(); userMetadata.put("context_root", contextRoot.equals("/") ? "ROOT" : contextRoot); metadata.setUserMetadata(userMetadata); putObjectRequest.withMetadata(metadata); return putObjectRequest; }
From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java
License:Open Source License
@Override public void create(String filePath, InputStream content) throws IOException { // Check parameters if (content == null) { throw new IllegalArgumentException("Content of a file can not be null."); }//from w w w. j av a 2 s. c o m if (filePath == null) { throw new IllegalArgumentException("Name of a file can not be null."); } if (checkFileExists(filePath)) { throw new IllegalArgumentException("The file already exists"); } File scratchFile = File.createTempFile("prefix", String.valueOf(System.currentTimeMillis())); try { if (!getS3Client().doesBucketExist(rootFolder)) { getS3Client().createBucket(rootFolder); } FileUtils.copyInputStreamToFile(content, scratchFile); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding(ENCODING); PutObjectRequest putObjectRequest = new PutObjectRequest(rootFolder, filePath, scratchFile); putObjectRequest.withMetadata(metadata); getS3Client().putObject(putObjectRequest); } catch (AmazonClientException e) { throw new IOException(e); } finally { if (scratchFile.exists()) { scratchFile.delete(); } } }
From source file:org.springframework.integration.aws.s3.core.AmazonS3OperationsImpl.java
License:Apache License
public void putObject(String bucketName, String folder, String objectName, AmazonS3Object s3Object) { if (logger.isDebugEnabled()) { logger.debug("Putting object to bucket " + bucketName + " and folder " + folder); logger.debug("Object Name is " + objectName); }/*from w ww.j a va 2 s . com*/ if (objectName == null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Object Name is Mandatory"); boolean isTempFile = false; File file = s3Object.getFileSource(); InputStream in = s3Object.getInputStream(); if (file != null && in != null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "File Object and Input Stream in the S3 Object are mutually exclusive"); if (file == null && in == null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "At lease one of File object or Input Stream in the S3 Object are mandatory"); String key; if (folder != null) { key = folder.endsWith(PATH_SEPARATOR) ? folder + objectName : folder + PATH_SEPARATOR + objectName; } else { key = objectName; } if (in != null) { file = getTempFile(in, bucketName, objectName); isTempFile = true; } PutObjectRequest request; if (file != null) { request = new PutObjectRequest(bucketName, key, file); //if the size of the file is greater than the threshold for multipart upload, //set the Content-MD5 header for this upload. This header will also come handy //later in inbound-channel-adapter where we cant find the MD5 sum of the //multipart upload file from its ETag String stringContentMD5 = null; try { stringContentMD5 = AmazonWSCommonUtils.encodeHex(AmazonWSCommonUtils.getContentsMD5AsBytes(file)); } catch (UnsupportedEncodingException e) { logger.error("Exception while generating the content's MD5 of the file " + file.getAbsolutePath(), e); } if (stringContentMD5 != null) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentMD5(stringContentMD5); request.withMetadata(metadata); } } else throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Unable to get the File handle to upload the file to S3"); Upload upload; try { upload = transferManager.upload(request); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while invoking upload on multipart/single thread file, " + "see nested exceptions for more details", e); } //Wait till the upload completes, the call to putObject is synchronous try { if (logger.isInfoEnabled()) logger.info("Waiting for Upload to complete"); upload.waitForCompletion(); if (logger.isInfoEnabled()) logger.info("Upload completed"); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while uploading the multipart/single thread file, " + "see nested exceptions for more details", e); } if (isTempFile) { //Delete the temp file if (logger.isDebugEnabled()) logger.debug("Deleting temp file: " + file.getName()); file.delete(); } //Now since the object is present on S3, set the AccessControl list on it //Please note that it is not possible to set the object ACL with the //put object request, and hence both these operations cannot be atomic //it is possible the objects is uploaded and the ACl not set due to some //failure AmazonS3ObjectACL acl = s3Object.getObjectACL(); AccessControlList objectACL = getAccessControlList(bucketName, key, acl); if (objectACL != null) { if (logger.isInfoEnabled()) logger.info("Setting Access control list for key " + key); try { client.setObjectAcl(bucketName, key, objectACL); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while setting the Object ACL for key , " + key + "see nested exceptions for more details", e); } if (logger.isDebugEnabled()) logger.debug("Successfully set the object ACL"); } else { if (logger.isInfoEnabled()) logger.info("No Object ACL found to be set"); } }
From source file:org.springframework.integration.aws.s3.core.DefaultAmazonS3Operations.java
License:Apache License
/** * The implementation puts the given {@link File} instance to the provided bucket against * the given key.//from w ww .jav a 2s . c o m * * @param bucketName The bucket on S3 where this object is to be put * @param key The key against which this Object is to be stored in S3 * @param file resource to be uploaded to S3 * @param objectACL the Object's Access controls for the object to be uploaded * @param userMetadata The user's metadata to be associated with the object uploaded * @param stringContentMD5 The MD5 sum of the contents of the file to be uploaded */ @Override public void doPut(String bucketName, String key, File file, AmazonS3ObjectACL objectACL, Map<String, String> userMetadata, String stringContentMD5) { ObjectMetadata metadata = new ObjectMetadata(); PutObjectRequest request = new PutObjectRequest(bucketName, key, file); request.withMetadata(metadata); if (stringContentMD5 != null) { metadata.setContentMD5(stringContentMD5); } if (userMetadata != null) { metadata.setUserMetadata(userMetadata); } Upload upload; try { upload = transferManager.upload(request); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while invoking upload on multipart/single thread file, " + "see nested exceptions for more details", e); } //Wait till the upload completes, the call to putObject is synchronous try { if (logger.isInfoEnabled()) { logger.info("Waiting for Upload to complete"); } upload.waitForCompletion(); if (logger.isInfoEnabled()) { logger.info("Upload completed"); } } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while uploading the multipart/single thread file, " + "see nested exceptions for more details", e); } //Now since the object is present on S3, set the AccessControl list on it //Please note that it is not possible to set the object ACL with the //put object request, and hence both these operations cannot be atomic //it is possible the objects is uploaded and the ACl not set due to some //failure if (objectACL != null) { if (logger.isInfoEnabled()) { logger.info("Setting Access control list for key " + key); } try { client.setObjectAcl(bucketName, key, getAccessControlList(bucketName, key, objectACL)); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while setting the Object ACL for key , " + key + "see nested exceptions for more details", e); } if (logger.isDebugEnabled()) { logger.debug("Successfully set the object ACL"); } } }