List of usage examples for com.amazonaws.services.s3 Headers CONTENT_LENGTH
String CONTENT_LENGTH
To view the source code for com.amazonaws.services.s3 Headers CONTENT_LENGTH.
Click Source Link
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects.//from w w w . j a v a2 s. co m * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:com.ibm.og.s3.v4.AWSS3V4Signer.java
License:Open Source License
/** * Returns the pre-defined header value and set other necessary headers if the request needs to be * chunk-encoded. Otherwise calls the superclass method which calculates the hash of the whole * content for signing./*w w w.ja v a 2 s . c om*/ */ @Override protected String calculateContentHash(final SignableRequest<?> request) { // To be consistent with other service clients using sig-v4, // we just set the header as "required", and AWS4Signer.sign() will be // notified to pick up the header value returned by this method. request.addHeader(X_AMZ_CONTENT_SHA256, "required"); final String contentLength = request.getHeaders().get(Headers.CONTENT_LENGTH); if (useChunkEncoding(request)) { final long originalContentLength; if (contentLength != null) { originalContentLength = Long.parseLong(contentLength); } else { /** * "Content-Length" header could be missing if the caller is uploading a stream without * setting Content-Length in ObjectMetadata. Before using sigv4, we rely on HttpClient to * add this header by using BufferedHttpEntity when creating the HttpRequest object. But * now, we need this information immediately for the signing process, so we have to cache * the stream here. */ try { originalContentLength = getContentLength(request); } catch (final IOException e) { throw new AmazonClientException("Cannot get the content-length of the request content.", e); } } request.addHeader("x-amz-decoded-content-length", Long.toString(originalContentLength)); // Make sure "Content-Length" header is not empty so that HttpClient // won't cache the stream again to recover Content-Length request.addHeader(Headers.CONTENT_LENGTH, Long .toString(AwsChunkedEncodingInputStream.calculateStreamContentLength(originalContentLength))); return CONTENT_SHA_256; } if (this.digestCache != null) { try { final long length = contentLength != null ? Long.parseLong(contentLength) : 0; return BinaryUtils.toHex(this.digestCache.get(length)); } catch (final ExecutionException e) { throw new RuntimeException(e); } } return super.calculateContentHash(request); }
From source file:org.duracloud.s3storage.S3StorageProvider.java
License:Apache License
@Override protected Map<String, String> removeCalculatedProperties(Map<String, String> contentProperties) { contentProperties = super.removeCalculatedProperties(contentProperties); if (contentProperties != null) { contentProperties.remove(Headers.CONTENT_LENGTH); contentProperties.remove(Headers.CONTENT_TYPE); // Content-Type is set on ObjectMetadata object contentProperties.remove(Headers.LAST_MODIFIED); contentProperties.remove(Headers.DATE); contentProperties.remove(Headers.ETAG); contentProperties.remove(Headers.CONTENT_LENGTH.toLowerCase()); contentProperties.remove(Headers.CONTENT_TYPE.toLowerCase()); contentProperties.remove(Headers.LAST_MODIFIED.toLowerCase()); contentProperties.remove(Headers.DATE.toLowerCase()); contentProperties.remove(Headers.ETAG.toLowerCase()); }/*from w w w . j a v a2 s. c om*/ return contentProperties; }
From source file:org.duracloud.s3storage.S3StorageProvider.java
License:Apache License
private Map<String, String> prepContentProperties(ObjectMetadata objMetadata) { Map<String, String> contentProperties = new HashMap<>(); // Set the user properties Map<String, String> userProperties = objMetadata.getUserMetadata(); for (String metaName : userProperties.keySet()) { String metaValue = userProperties.get(metaName); contentProperties.put(getWithSpace(decodeHeaderKey(metaName)), decodeHeaderValue(metaValue)); }//w w w .jav a2s . c om // Set the response metadata Map<String, Object> responseMeta = objMetadata.getRawMetadata(); for (String metaName : responseMeta.keySet()) { Object metaValue = responseMeta.get(metaName); if (metaValue instanceof String) { contentProperties.put(metaName, (String) metaValue); } } // Set MIMETYPE String contentType = objMetadata.getContentType(); if (contentType != null) { contentProperties.put(PROPERTIES_CONTENT_MIMETYPE, contentType); contentProperties.put(Headers.CONTENT_TYPE, contentType); } // Set CONTENT_ENCODING String encoding = objMetadata.getContentEncoding(); if (encoding != null) { contentProperties.put(Headers.CONTENT_ENCODING, encoding); } // Set SIZE long contentLength = objMetadata.getContentLength(); if (contentLength >= 0) { String size = String.valueOf(contentLength); contentProperties.put(PROPERTIES_CONTENT_SIZE, size); contentProperties.put(Headers.CONTENT_LENGTH, size); } // Set CHECKSUM String checksum = objMetadata.getETag(); if (checksum != null) { String eTagValue = getETagValue(checksum); contentProperties.put(PROPERTIES_CONTENT_CHECKSUM, eTagValue); contentProperties.put(PROPERTIES_CONTENT_MD5, eTagValue); contentProperties.put(Headers.ETAG, eTagValue); } // Set MODIFIED Date modified = objMetadata.getLastModified(); if (modified != null) { String modDate = formattedDate(modified); contentProperties.put(PROPERTIES_CONTENT_MODIFIED, modDate); contentProperties.put(Headers.LAST_MODIFIED, modDate); } return contentProperties; }
From source file:org.openflamingo.fs.s3.S3ObjectProvider.java
License:Apache License
public boolean save(InputStream is, long size, String path) { // Assert.notNull(is, " ?? 'is'? ."); Assert.notNull(is, "Please enter the input stream."); // Assert.hasLength(path, "?? ? ?? 'path'? ."); Assert.hasLength(path, "Please enter the path."); try {// w w w. j a v a 2s.c o m String bucket = S3Utils.getBucket(path); String key = StringUtils.remove(path, "/" + bucket + "/"); ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.CONTENT_LENGTH, size); awsClient.putObject(new PutObjectRequest(bucket, key, is, metadata)); return true; } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, " + "which means your request made it " + "to Amazon S3, but was rejected with an error " + "response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); // throw new FileSystemException("?? . ? ? ? .", ase); throw new FileSystemException("Connot copy the file.", ase); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to " + " communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); // throw new FileSystemException("?? . ? ? ?.", ace); throw new FileSystemException("Connot copy the file.", ace); } }