Example usage for java.io InputStream markSupported

List of usage examples for java.io InputStream markSupported

Introduction

In this page you can find the example usage for java.io InputStream markSupported.

Prototype

public boolean markSupported() 

Source Link

Document

Tests if this input stream supports the mark and reset methods.

Usage

From source file:pt.lunacloud.services.storage.LunacloudStorageClient.java

public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws LunacloudClientException, LunacloudServiceException {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    ProgressListener progressListener = putObjectRequest.getProgressListener();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }//from w  ww . ja  va 2s  . co  m

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new LunacloudClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new LunacloudClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListener != null) {
        input = new ProgressReportingInputStream(input, progressListener);
        fireProgressEvent(progressListener, ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (LunacloudClientException ace) {
        fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    if (returnedMetadata != null && contentMd5 != null) {
        byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
        byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

        if (!Arrays.equals(clientSideHash, serverSideHash)) {
            fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
            throw new LunacloudClientException("Unable to verify integrity of data upload.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3.");
        }
    }

    fireProgressEvent(progressListener, ProgressEvent.COMPLETED_EVENT_CODE);

    PutObjectResult result = new PutObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());

    return result;
}

From source file:cn.ctyun.amazonaws.services.s3.AmazonS3Client.java

public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    ProgressListener progressListener = putObjectRequest.getProgressListener();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }//from  w w  w . ja v  a2 s  .c  o m

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListener != null) {
        input = new ProgressReportingInputStream(input, progressListener);
        fireProgressEvent(progressListener, ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    if (returnedMetadata != null && contentMd5 != null) {
        byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
        byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

        if (!Arrays.equals(clientSideHash, serverSideHash)) {
            fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
            throw new AmazonClientException("Unable to verify integrity of data upload.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3.");
        }
    }

    fireProgressEvent(progressListener, ProgressEvent.COMPLETED_EVENT_CODE);

    PutObjectResult result = new PutObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setContentMd5(contentMd5);

    return result;
}

From source file:com.amazonaws.services.s3.AmazonS3Client.java

@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");
    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();
    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");
    final boolean skipContentMd5Check = skipContentMd5IntegrityCheck(putObjectRequest);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();
        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        final boolean calculateMD5 = metadata.getContentMD5() == null;

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }// ww  w .j  a v  a2s.  co  m

        if (calculateMD5 && !skipContentMd5Check) {
            try {
                String contentMd5_b64 = Md5Utils.md5AsBase64(file);
                metadata.setContentMD5(contentMd5_b64);
            } catch (Exception e) {
                throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Populate the SSE-CPK parameters to the request header
    populateSseCpkRequestParameters(request, putObjectRequest.getSSECustomerKey());

    // Use internal interface to differentiate 0 from unset.
    final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
    if (contentLength == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    } else {
        final long expectedLength = contentLength.longValue();
        if (expectedLength >= 0) {
            // Performs length check on the underlying data stream.
            // For S3 encryption client, the underlying data stream here
            // refers to the cipher-text data stream (ie not the underlying
            // plain-text data stream which in turn may have been wrapped
            // with it's own length check input stream.)
            @SuppressWarnings("resource")
            LengthCheckInputStream lcis = new LengthCheckInputStream(input, expectedLength, // expected data length to be uploaded
                    EXCLUDE_SKIPPED_BYTES);
            input = lcis;
        }
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null && !skipContentMd5Check) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        input = md5DigestStream = new MD5DigestCalculatingInputStream(input);
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);
    final ProgressListener listener = putObjectRequest.getGeneralProgressListener();
    publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
    ObjectMetadata returnedMetadata;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (Throwable t) {
        publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
        throw failure(t);
    } finally {
        closeQuietly(input, log);
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    if (contentMd5 != null && !skipContentMd5Check) {
        byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
        byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

        if (!Arrays.equals(clientSideHash, serverSideHash)) {
            publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
            throw new AmazonClientException("Unable to verify integrity of data upload.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3.");
        }
    }
    publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
    PutObjectResult result = new PutObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
    result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
    result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setContentMd5(contentMd5);

    return result;
}

From source file:s3.com.qiniu.services.s3.AmazonS3Client.java

@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();

    /*/*from w w  w  .  ja  v  a  2 s .com*/
     * This is compatible with progress listener set by either the legacy
     * method PutObjectRequest#setProgressListener or the new method
     * PutObjectRequest#setGeneralProgressListener.
     */
    ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    final boolean skipContentMd5Check = ServiceUtils.skipMd5CheckPerRequest(putObjectRequest);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();
        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        final boolean calculateMD5 = metadata.getContentMD5() == null;

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        if (calculateMD5 && !skipContentMd5Check) {
            try {
                String contentMd5_b64 = Md5Utils.md5AsBase64(file);
                metadata.setContentMD5(contentMd5_b64);
            } catch (Exception e) {
                throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            setZeroContentLength(request);
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Populate the SSE-CPK parameters to the request header
    populateSseCpkRequestParameters(request, putObjectRequest.getSSECustomerKey());

    // Use internal interface to differentiate 0 from unset.
    final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
    if (contentLength == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        if (!input.markSupported()) {
            log.warn("No content length specified for stream data.  "
                    + "Stream contents will be buffered in memory and could result in "
                    + "out of memory errors.");
            ByteArrayInputStream bais = toByteArray(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(bais.available()));
            input = bais;
        } else {
            long len = calculateContentLength(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(len));
        }
    } else {
        final long expectedLength = contentLength.longValue();
        if (expectedLength >= 0) {
            // Performs length check on the underlying data stream.
            // For S3 encryption client, the underlying data stream here
            // refers to the cipher-text data stream (ie not the underlying
            // plain-text data stream which in turn may have been wrapped
            // with it's own length check input stream.)
            @SuppressWarnings("resource")
            LengthCheckInputStream lcis = new LengthCheckInputStream(input, expectedLength, // expected data length to be uploaded
                    EXCLUDE_SKIPPED_BYTES);
            input = lcis;
            request.addHeader(Headers.CONTENT_LENGTH, contentLength.toString());
        }
    }

    if (progressListenerCallbackExecutor != null) {
        input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null && !skipContentMd5Check) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        input = md5DigestStream = new MD5DigestCalculatingInputStream(input);
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);
    /*
     * Enable 100-continue support for PUT operations, since this is where
     * we're potentially uploading large amounts of data and want to find
     * out as early as possible if an operation will fail. We don't want to
     * do this for all operations since it will cause extra latency in the
     * network interaction.
     */
    request.addHeader("Expect", "100-continue");

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (AbortedException ignore) {
        } catch (Exception e) {
            log.debug("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    if (returnedMetadata != null && contentMd5 != null && !skipContentMd5Check) {
        byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
        byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

        if (!Arrays.equals(clientSideHash, serverSideHash)) {
            fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);

            throw new AmazonClientException("Unable to verify integrity of data upload.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3.");
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE);

    PutObjectResult result = new PutObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
    result.setSSEKMSKeyId(returnedMetadata.getSSEKMSKeyId());
    result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
    result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setContentMd5(contentMd5);

    return result;
}

From source file:org.sakaiproject.content.impl.BaseContentService.java

private boolean checkUpdateContentEncoding(ContentResourceEdit edit) {
    if (edit == null) {
        return false;
    }/*from   w ww . j  av a2  s. co m*/
    M_log.debug("checkUpdateContentEncoding(" + edit.getId() + ")");

    InputStream content = null;
    boolean updated = false;
    try {
        //no point in doing this for 0 size resources
        if (edit.getContentLength() == 0) {
            return false;
        }

        String contentEncoding = edit.getProperties().getProperty(ResourceProperties.PROP_CONTENT_ENCODING);
        if (contentEncoding == null) {
            contentEncoding = "";
        }
        String encoding = null;
        CharsetDetector detector = new CharsetDetector();
        content = edit.streamContent();
        //we don't want the whole file the first couple of bytes should do
        int len = 1000;
        byte[] contentBytes = new byte[len];
        if (content.markSupported()) {
            detector.setText(content);
        } else {
            content.read(contentBytes);
            detector.setText(contentBytes);
        }
        CharsetMatch match = detector.detect();
        //KNL-714 match can be null -DH
        if (match != null) {
            encoding = match.getName();
        } else {
            return false;
        }
        //KNL-682 do not set content as UTF-32LE or UTF-16
        if (encoding.indexOf("UTF-16") > -1 || encoding.indexOf("UTF-32") > -1) {
            encoding = "UTF-8";
        }

        int confidence = match.getConfidence();
        //KNL-683 we need a relatively good confidence before we change the encoding
        int threshold = m_serverConfigurationService.getInt("content.encodingDetection.threshold", 70);
        M_log.debug("detected character encoding of " + encoding + " with confidence of " + confidence
                + " origional was" + contentEncoding);
        if (encoding != null && !contentEncoding.equals(encoding) && (confidence >= threshold)) {
            ResourcePropertiesEdit rpe = edit.getPropertiesEdit();
            rpe.removeProperty(ResourceProperties.PROP_CONTENT_ENCODING);
            rpe.addProperty(ResourceProperties.PROP_CONTENT_ENCODING, encoding);
            updated = true;
        }

    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (ServerOverloadException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } finally {
        if (content != null) {
            try {
                content.close();
            } catch (IOException e) {
                //not much we can do
            }
        }
    }
    return updated;
}