List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentEncoding
public void setContentEncoding(String encoding)
From source file:alluxio.underfs.s3a.S3AOutputStream.java
License:Apache License
@Override public void close() throws IOException { if (mClosed) { return;/*from w ww. ja v a2 s . c o m*/ } mLocalOutputStream.close(); try { // Generate the object metadata by setting server side encryption, md5 checksum, the file // length, and encoding as octet stream since no assumptions are made about the file type ObjectMetadata meta = new ObjectMetadata(); if (SSE_ENABLED) { meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } if (mHash != null) { meta.setContentMD5(new String(Base64.encode(mHash.digest()))); } meta.setContentLength(mFile.length()); meta.setContentEncoding(Mimetypes.MIMETYPE_OCTET_STREAM); // Generate the put request and wait for the transfer manager to complete the upload, then // delete the temporary file on the local machine PutObjectRequest putReq = new PutObjectRequest(mBucketName, mKey, mFile).withMetadata(meta); mManager.upload(putReq).waitForUploadResult(); if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath()); } } catch (Exception e) { LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath()); throw new IOException(e); } // Set the closed flag, close can be retried until mFile.delete is called successfully mClosed = true; }
From source file:ch.entwine.weblounge.maven.S3DeployMojo.java
License:Open Source License
/** * /* w ww. ja va2 s . c om*/ * {@inheritDoc} * * @see org.apache.maven.plugin.Mojo#execute() */ public void execute() throws MojoExecutionException, MojoFailureException { // Setup AWS S3 client AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3Client uploadClient = new AmazonS3Client(credentials); TransferManager transfers = new TransferManager(credentials); // Make sure key prefix does not start with a slash but has one at the // end if (keyPrefix.startsWith("/")) keyPrefix = keyPrefix.substring(1); if (!keyPrefix.endsWith("/")) keyPrefix = keyPrefix + "/"; // Keep track of how much data has been transferred long totalBytesTransferred = 0L; int items = 0; Queue<Upload> uploads = new LinkedBlockingQueue<Upload>(); try { // Check if S3 bucket exists getLog().debug("Checking whether bucket " + bucket + " exists"); if (!uploadClient.doesBucketExist(bucket)) { getLog().error("Desired bucket '" + bucket + "' does not exist!"); return; } getLog().debug("Collecting files to transfer from " + resources.getDirectory()); List<File> res = getResources(); for (File file : res) { // Make path of resource relative to resources directory String filename = file.getName(); String extension = FilenameUtils.getExtension(filename); String path = file.getPath().substring(resources.getDirectory().length()); String key = concat("/", keyPrefix, path).substring(1); // Delete old file version in bucket getLog().debug("Removing existing object at " + key); uploadClient.deleteObject(bucket, key); // Setup meta data ObjectMetadata meta = new ObjectMetadata(); meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600)); FileInputStream fis = null; GZIPOutputStream gzipos = null; final File fileToUpload; if (gzip && ("js".equals(extension) || "css".equals(extension))) { try { fis = new FileInputStream(file); File gzFile = File.createTempFile(file.getName(), null); gzipos = new GZIPOutputStream(new FileOutputStream(gzFile)); IOUtils.copy(fis, gzipos); fileToUpload = gzFile; meta.setContentEncoding("gzip"); if ("js".equals(extension)) meta.setContentType("text/javascript"); if ("css".equals(extension)) meta.setContentType("text/css"); } catch (FileNotFoundException e) { getLog().error(e); continue; } catch (IOException e) { getLog().error(e); continue; } finally { IOUtils.closeQuietly(fis); IOUtils.closeQuietly(gzipos); } } else { fileToUpload = file; } // Do a random check for existing errors before starting the next upload if (erroneousUpload != null) break; // Create put object request long bytesToTransfer = fileToUpload.length(); totalBytesTransferred += bytesToTransfer; PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload); request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer)); request.setMetadata(meta); // Schedule put object request getLog().info( "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")"); Upload upload = transfers.upload(request); uploads.add(upload); items++; } } catch (AmazonServiceException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } catch (AmazonClientException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } // Wait for uploads to be finished String currentUpload = null; try { Thread.sleep(1000); getLog().info("Waiting for " + uploads.size() + " uploads to finish..."); while (!uploads.isEmpty()) { Upload upload = uploads.poll(); currentUpload = upload.getDescription().substring("Uploading to ".length()); if (TransferState.InProgress.equals(upload.getState())) getLog().debug("Waiting for upload " + currentUpload + " to finish"); upload.waitForUploadResult(); } } catch (AmazonServiceException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (AmazonClientException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (InterruptedException e) { getLog().debug("Interrupted while waiting for upload to finish"); } // Check for errors that happened outside of the actual uploading if (erroneousUpload != null) { throw new MojoExecutionException("Error while uploading " + erroneousUpload); } getLog().info("Deployed " + items + " files (" + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket); }
From source file:com.emc.ecs.sync.util.AwsS3Util.java
License:Open Source License
public static ObjectMetadata s3MetaFromSyncMeta(SyncMetadata syncMeta) { ObjectMetadata om = new ObjectMetadata(); if (syncMeta.getCacheControl() != null) om.setCacheControl(syncMeta.getCacheControl()); if (syncMeta.getContentDisposition() != null) om.setContentDisposition(syncMeta.getContentDisposition()); if (syncMeta.getContentEncoding() != null) om.setContentEncoding(syncMeta.getContentEncoding()); om.setContentLength(syncMeta.getContentLength()); if (syncMeta.getChecksum() != null && syncMeta.getChecksum().getAlgorithm().equals("MD5")) om.setContentMD5(syncMeta.getChecksum().getValue()); if (syncMeta.getContentType() != null) om.setContentType(syncMeta.getContentType()); if (syncMeta.getHttpExpires() != null) om.setHttpExpiresDate(syncMeta.getHttpExpires()); om.setUserMetadata(formatUserMetadata(syncMeta)); if (syncMeta.getModificationTime() != null) om.setLastModified(syncMeta.getModificationTime()); return om;/*from w w w. j a va2 s .c o m*/ }
From source file:com.erudika.para.storage.AWSFileStore.java
License:Apache License
@Override public String store(String path, InputStream data) { if (StringUtils.startsWith(path, "/")) { path = path.substring(1);/* w ww. j a va2s . co m*/ } if (StringUtils.isBlank(path) || data == null) { return null; } int maxFileSizeMBytes = Config.getConfigInt("para.s3.max_filesize_mb", 10); try { if (data.available() > 0 && data.available() <= (maxFileSizeMBytes * 1024 * 1024)) { ObjectMetadata om = new ObjectMetadata(); om.setCacheControl("max-age=15552000, must-revalidate"); // 180 days if (path.endsWith(".gz")) { om.setContentEncoding("gzip"); path = path.substring(0, path.length() - 3); } path = System.currentTimeMillis() + "." + path; PutObjectRequest por = new PutObjectRequest(bucket, path, data, om); por.setCannedAcl(CannedAccessControlList.PublicRead); por.setStorageClass(StorageClass.ReducedRedundancy); s3.putObject(por); return Utils.formatMessage(baseUrl, Config.AWS_REGION, bucket, path); } } catch (IOException e) { logger.error(null, e); } finally { try { data.close(); } catch (IOException ex) { logger.error(null, ex); } } return null; }
From source file:com.shareplaylearn.models.UserItemManager.java
License:Open Source License
private ObjectMetadata makeBasicMetadata(int bufferLength, boolean isPublic, String itemName) { ObjectMetadata fileMetadata = new ObjectMetadata(); fileMetadata.setContentEncoding(MediaType.APPLICATION_OCTET_STREAM); if (isPublic) { fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.TRUE_VALUE); } else {/* w ww . j a va2s .co m*/ fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.FALSE_VALUE); } fileMetadata.addUserMetadata(UploadMetadataFields.DISPLAY_NAME, itemName); fileMetadata.setContentLength(bufferLength); return fileMetadata; }
From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java
License:Apache License
private ObjectMetadata determineMetadata(final Exchange exchange) { ObjectMetadata objectMetadata = new ObjectMetadata(); Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class); if (contentLength != null) { objectMetadata.setContentLength(contentLength); }/*from w w w . j a v a 2s .co m*/ String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class); if (contentType != null) { objectMetadata.setContentType(contentType); } String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class); if (cacheControl != null) { objectMetadata.setCacheControl(cacheControl); } String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class); if (contentDisposition != null) { objectMetadata.setContentDisposition(contentDisposition); } String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class); if (contentEncoding != null) { objectMetadata.setContentEncoding(contentEncoding); } String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class); if (contentMD5 != null) { objectMetadata.setContentMD5(contentMD5); } Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class); if (lastModified != null) { objectMetadata.setLastModified(lastModified); } Map<String, String> userMetadata = CastUtils .cast(exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class)); if (userMetadata != null) { objectMetadata.setUserMetadata(userMetadata); } Map<String, String> s3Headers = CastUtils .cast(exchange.getIn().getHeader(S3Constants.S3_HEADERS, Map.class)); if (s3Headers != null) { for (Map.Entry<String, String> entry : s3Headers.entrySet()) { objectMetadata.setHeader(entry.getKey(), entry.getValue()); } } String encryption = exchange.getIn().getHeader(S3Constants.SERVER_SIDE_ENCRYPTION, getConfiguration().getServerSideEncryption(), String.class); if (encryption != null) { objectMetadata.setSSEAlgorithm(encryption); } return objectMetadata; }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void upload(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util upload"); int retryAttempt = 0; int i;//ww w .j a va 2 s.c o m java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath(); String keyName = inputfile.getFileName().toString(); log.info("keyName is: " + keyName); log.info("bucket name is:" + runFileTransferEntity.getBucketName()); log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); String amazonFileUploadLocationOriginal = null; FileInputStream stream = null; File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.isFile() || filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { Log.error("Invalid local path.Please provide valid path"); throw new AWSUtilException("Invalid local path"); } if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); log.debug("file path name" + filepath); s3folderName = filepath; if (s3folderName != null && !s3folderName.trim().equals("")) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } File f = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { log.debug("Provided HDFS local path "); String inputPath = runFileTransferEntity.getLocalPath(); String s1 = inputPath.substring(7, inputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File file = new File("/tmp"); if (!file.exists()) file.mkdir(); Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); Path local = new Path("/tmp"); String s = inputPath.substring(7, inputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path hdfs = new Path(hdfspath); ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); File dir = new File(hdfspath); if (hdfsFileSystem.isDirectory(new Path(hdfspath))) { InputStream is = null; OutputStream os = null; String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); FileStatus[] fileStatus = hdfsFileSystem .listStatus(new Path(runFileTransferEntity.getLocalPath())); Path[] paths = FileUtil.stat2Paths(fileStatus); File dirs = null; try { String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); DateFormat df = new SimpleDateFormat("dd-MM-yyyy"); String dateWithoutTime = df.format(new Date()).toString(); Random ran = new Random(); String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000); dirs = new File("/tmp/" + tempFolder); boolean success = dirs.mkdirs(); for (Path files : paths) { is = hdfsFileSystem.open(files); os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName())); org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf); } for (File files : dirs.listFiles()) { if (files.isFile()) { s3Client.putObject(new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files)); } } } catch (IOException e) { Log.error("IOException occured while transfering the file", e); } finally { org.apache.hadoop.io.IOUtils.closeStream(is); org.apache.hadoop.io.IOUtils.closeStream(os); if (dirs != null) { FileUtils.deleteDirectory(dirs); } } } else { hdfsFileSystem.copyToLocalFile(false, hdfs, local); stream = new FileInputStream("/tmp/" + f.getName()); File S3file = new File("/tmp/" + f.getName()); PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } else { ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); if (Files.isDirectory(inputfile)) { File fileloc = new File(inputfile.toAbsolutePath().toString()); String folderName = new File(runFileTransferEntity.getLocalPath()).getName(); for (File files : fileloc.listFiles()) { if (files.isFile()) { PutObjectRequest putObjectRequest = new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } else { PutObjectRequest putObjectRequest = null; File file = new File(runFileTransferEntity.getLocalPath()); stream = new FileInputStream(runFileTransferEntity.getLocalPath()); putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } catch (AmazonServiceException e) { if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) Log.error("Incorrect details provided.Please provide valid details", e); throw new AWSUtilException("Incorrect details provided"); } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Exception e) { log.error("error while transferring file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } catch (Error err) { Log.error("Error occured while uploading the file", err); throw new AWSUtilException(err); } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) throw new AWSUtilException("File transfer failed"); } log.debug("Finished AWSS3Util upload"); }
From source file:jenkins.plugins.itemstorage.s3.S3BaseUploadCallable.java
License:Open Source License
protected ObjectMetadata buildMetadata(File file) throws IOException { final ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(Mimetypes.getInstance().getMimetype(file.getName())); metadata.setContentLength(file.length()); metadata.setLastModified(new Date(file.lastModified())); if (storageClass != null && !storageClass.isEmpty()) { metadata.setHeader("x-amz-storage-class", storageClass); }//from ww w. jav a 2 s . co m if (useServerSideEncryption) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } for (Map.Entry<String, String> entry : userMetadata.entrySet()) { final String key = entry.getKey().toLowerCase(); switch (key) { case "cache-control": metadata.setCacheControl(entry.getValue()); break; case "expires": try { final Date expires = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z") .parse(entry.getValue()); metadata.setHttpExpiresDate(expires); } catch (ParseException e) { metadata.addUserMetadata(entry.getKey(), entry.getValue()); } break; case "content-encoding": metadata.setContentEncoding(entry.getValue()); break; case "content-type": metadata.setContentType(entry.getValue()); default: metadata.addUserMetadata(entry.getKey(), entry.getValue()); break; } } return metadata; }
From source file:org.apache.camel.component.aws.s3.S3Producer.java
License:Apache License
@Override public void process(final Exchange exchange) throws Exception { ObjectMetadata objectMetadata = new ObjectMetadata(); Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class); if (contentLength != null) { objectMetadata.setContentLength(contentLength); }// w w w . j a v a2s. c o m String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class); if (contentType != null) { objectMetadata.setContentType(contentType); } String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class); if (cacheControl != null) { objectMetadata.setCacheControl(cacheControl); } String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class); if (contentDisposition != null) { objectMetadata.setContentDisposition(contentDisposition); } String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class); if (contentEncoding != null) { objectMetadata.setContentEncoding(contentEncoding); } String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class); if (contentMD5 != null) { objectMetadata.setContentMD5(contentMD5); } Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class); if (lastModified != null) { objectMetadata.setLastModified(lastModified); } Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class); if (userMetadata != null) { objectMetadata.setUserMetadata(userMetadata); } File filePayload = null; Object obj = exchange.getIn().getMandatoryBody(); if (obj instanceof File) { filePayload = (File) obj; } PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata); String storageClass = determineStorageClass(exchange); if (storageClass != null) { putObjectRequest.setStorageClass(storageClass); } String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class); if (cannedAcl != null) { CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl); putObjectRequest.setCannedAcl(objectAcl); } AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class); if (acl != null) { // note: if cannedacl and acl are both specified the last one will be used. refer to // PutObjectRequest#setAccessControlList for more details putObjectRequest.setAccessControlList(acl); } LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange); PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest); LOG.trace("Received result [{}]", putObjectResult); Message message = getMessageForResponse(exchange); message.setHeader(S3Constants.E_TAG, putObjectResult.getETag()); if (putObjectResult.getVersionId() != null) { message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId()); } if (getConfiguration().isDeleteAfterWrite() && filePayload != null) { IOHelper.close(putObjectRequest.getInputStream()); FileUtil.deleteFile(filePayload); } }
From source file:org.duracloud.s3storage.S3StorageProvider.java
License:Apache License
/** * {@inheritDoc}//from w w w.j a va 2 s .co m */ public String addContent(String spaceId, String contentId, String contentMimeType, Map<String, String> userProperties, long contentSize, String contentChecksum, InputStream content) { log.debug("addContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ", " + contentSize + ", " + contentChecksum + ")"); // Will throw if bucket does not exist String bucketName = getBucketName(spaceId); // Wrap the content in order to be able to retrieve a checksum ChecksumInputStream wrappedContent = new ChecksumInputStream(content, contentChecksum); String contentEncoding = removeContentEncoding(userProperties); userProperties = removeCalculatedProperties(userProperties); if (contentMimeType == null || contentMimeType.equals("")) { contentMimeType = DEFAULT_MIMETYPE; } ObjectMetadata objMetadata = new ObjectMetadata(); objMetadata.setContentType(contentMimeType); if (contentSize > 0) { objMetadata.setContentLength(contentSize); } if (null != contentChecksum && !contentChecksum.isEmpty()) { String encodedChecksum = ChecksumUtil.convertToBase64Encoding(contentChecksum); objMetadata.setContentMD5(encodedChecksum); } if (contentEncoding != null) { objMetadata.setContentEncoding(contentEncoding); } if (userProperties != null) { for (String key : userProperties.keySet()) { String value = userProperties.get(key); if (log.isDebugEnabled()) { log.debug("[" + key + "|" + value + "]"); } objMetadata.addUserMetadata(getSpaceFree(encodeHeaderKey(key)), encodeHeaderValue(value)); } } PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, wrappedContent, objMetadata); putRequest.setStorageClass(DEFAULT_STORAGE_CLASS); putRequest.setCannedAcl(CannedAccessControlList.Private); // Add the object String etag; try { PutObjectResult putResult = s3Client.putObject(putRequest); etag = putResult.getETag(); } catch (AmazonClientException e) { if (e instanceof AmazonS3Exception) { AmazonS3Exception s3Ex = (AmazonS3Exception) e; String errorCode = s3Ex.getErrorCode(); Integer statusCode = s3Ex.getStatusCode(); String message = MessageFormat.format( "exception putting object {0} into {1}: errorCode={2}," + " statusCode={3}, errorMessage={4}", contentId, bucketName, errorCode, statusCode, e.getMessage()); if (errorCode.equals("InvalidDigest") || errorCode.equals("BadDigest")) { log.error(message, e); String err = "Checksum mismatch detected attempting to add " + "content " + contentId + " to S3 bucket " + bucketName + ". Content was not added."; throw new ChecksumMismatchException(err, e, NO_RETRY); } else if (errorCode.equals("IncompleteBody")) { log.error(message, e); throw new StorageException("The content body was incomplete for " + contentId + " to S3 bucket " + bucketName + ". Content was not added.", e, NO_RETRY); } else if (!statusCode.equals(HttpStatus.SC_SERVICE_UNAVAILABLE) && !statusCode.equals(HttpStatus.SC_NOT_FOUND)) { log.error(message, e); } else { log.warn(message, e); } } else { String err = MessageFormat.format("exception putting object {0} into {1}: {2}", contentId, bucketName, e.getMessage()); log.error(err, e); } // Check to see if file landed successfully in S3, despite the exception etag = doesContentExistWithExpectedChecksum(bucketName, contentId, contentChecksum); if (null == etag) { String err = "Could not add content " + contentId + " with type " + contentMimeType + " and size " + contentSize + " to S3 bucket " + bucketName + " due to error: " + e.getMessage(); throw new StorageException(err, e, NO_RETRY); } } // Compare checksum String providerChecksum = getETagValue(etag); String checksum = wrappedContent.getMD5(); StorageProviderUtil.compareChecksum(providerChecksum, spaceId, contentId, checksum); return providerChecksum; }