Example usage for com.amazonaws.services.s3.model ObjectMetadata getContentType

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getContentType

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getContentType.

Prototype

public String getContentType() 

Source Link

Document

<p> Gets the Content-Type HTTP header, which indicates the type of content stored in the associated object.

Usage

From source file:ca.pgon.amazons3masscontenttype.App.java

License:Apache License

private static void process(ObjectListing objectListing) {
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        // Show the key
        String key = objectSummary.getKey();
        System.out.println(key);/*from   w ww  .ja va  2  s  .  c om*/

        // Get the metadata and check the content type
        ObjectMetadata objectMetadata = amazonS3Client.getObjectMetadata(bucketName, key);
        System.out.println("\tCurrent content type: " + objectMetadata.getContentType());
        if (!contentType.equals(objectMetadata.getContentType())) {
            System.out.println("\tChanging content type for : " + contentType);
            objectMetadata.setContentType(contentType);

            // Get the current ACL
            AccessControlList accessControlList = amazonS3Client.getObjectAcl(bucketName, key);

            // Modify the file
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key);
            copyObjectRequest.withNewObjectMetadata(objectMetadata);
            copyObjectRequest.withAccessControlList(accessControlList);
            amazonS3Client.copyObject(copyObjectRequest);
        }
        ++count;
    }
}

From source file:com.cloudbees.demo.beesshop.service.AmazonS3FileStorageService.java

License:Apache License

/**
 * @param in             bytes to store/*from  w ww .j a v  a  2  s  .  c o  m*/
 * @param objectMetadata Amazon S3 metadata
 * @return Amazon S3 URL
 */
@Nonnull
public String storeFile(InputStream in, ObjectMetadata objectMetadata) {
    String extension = defaultFileExtensionByContentType.get(objectMetadata.getContentType());
    String fileName = Joiner.on(".").skipNulls().join(Math.abs(random.nextLong()), extension);
    amazonS3.putObject(amazonS3BucketName, fileName, in, objectMetadata);

    return "s3://" + amazonS3BucketName + "/" + fileName;
}

From source file:com.emc.ecs.sync.model.object.S3SyncObject.java

License:Open Source License

protected SyncMetadata toSyncMeta(ObjectMetadata s3meta) {
    SyncMetadata meta = new SyncMetadata();

    meta.setCacheControl(s3meta.getCacheControl());
    meta.setContentDisposition(s3meta.getContentDisposition());
    meta.setContentEncoding(s3meta.getContentEncoding());
    if (s3meta.getContentMD5() != null)
        meta.setChecksum(new Checksum("MD5", s3meta.getContentMD5()));
    meta.setContentType(s3meta.getContentType());
    meta.setHttpExpires(s3meta.getHttpExpiresDate());
    meta.setExpirationDate(s3meta.getExpirationTime());
    meta.setModificationTime(s3meta.getLastModified());
    meta.setContentLength(s3meta.getContentLength());
    meta.setUserMetadata(toMetaMap(s3meta.getUserMetadata()));

    return meta;/*from  ww  w.ja v  a  2  s .  c  o m*/
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects./*from   www .  j a  va2s. com*/
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

protected void populateResponseMetadata(final ObjectStorageDataResponseType reply,
        final ObjectMetadata metadata) {
    reply.setSize(metadata.getContentLength());
    reply.setContentDisposition(metadata.getContentDisposition());
    reply.setContentType(metadata.getContentType());
    reply.setEtag(metadata.getETag());// ww w  . j ava2 s  .co m
    reply.setLastModified(metadata.getLastModified());

    if (metadata.getUserMetadata() != null && metadata.getUserMetadata().size() > 0) {
        if (reply.getMetaData() == null)
            reply.setMetaData(new ArrayList<MetaDataEntry>());

        for (String k : metadata.getUserMetadata().keySet()) {
            reply.getMetaData().add(new MetaDataEntry(k, metadata.getUserMetadata().get(k)));
        }
    }

}

From source file:com.sangupta.urn.service.impl.AmazonS3UrnStorageServiceImpl.java

License:Apache License

@Override
protected UrnObject get(String objectKey) {
    S3Object object = this.client.getObject(this.bucketName, objectKey);
    if (object == null) {
        return null;
    }/*from  w  w  w .j  a v  a2  s  .co  m*/

    try {
        InputStream stream = object.getObjectContent();

        byte[] bytes = IOUtils.toByteArray(stream);

        UrnObject urnObject = new UrnObject(objectKey, bytes);

        // TODO: read and populate metadata
        ObjectMetadata metadata = object.getObjectMetadata();
        if (metadata != null) {
            if (metadata.getHttpExpiresDate() != null) {
                urnObject.expiry = metadata.getHttpExpiresDate().getTime();
            }

            urnObject.mime = metadata.getContentType();
            urnObject.stored = metadata.getLastModified().getTime();

            // TODO:parse the value to extract the filename if available
            urnObject.name = metadata.getContentDisposition();
        }

        // return the object
        return urnObject;
    } catch (IOException e) {
        // happens when we cannot read data from S3
        LOGGER.debug("Exception reading data from S3 for object key: " + objectKey, e);
        return null;
    } finally {
        if (object != null) {
            try {
                object.close();
            } catch (IOException e) {
                LOGGER.warn("Unable to close S3 object during/after reading the object");
            }
        }
    }
}

From source file:com.scoyo.tools.s3cacheenhancer.S3HeaderEnhancer.java

License:Apache License

private void setHeaders(ObjectListing listing, final String maxAgeHeader, ExecutorService executorService) {

    for (final S3ObjectSummary summary : listing.getObjectSummaries()) {
        executorService.submit(new Runnable() {
            @Override/*from w w w . jav  a 2  s . com*/
            public void run() {
                String bucket = summary.getBucketName();
                String key = summary.getKey();

                ObjectMetadata metadata = null;
                try {
                    metadata = s3.getObjectMetadata(bucket, key);
                } catch (AmazonS3Exception exception) {
                    System.out.println("Could not update " + key + " [" + exception.getMessage() + "]");
                    return;
                }

                if ("application/x-directory".equals(metadata.getContentType())) {
                    System.out.println("Skipping because content-type " + key);
                    return;
                }

                if (!maxAgeHeader.equals(metadata.getCacheControl())) {
                    metadata.setCacheControl(maxAgeHeader);
                } else {
                    System.out.println("Skipping because header is already correct " + key);
                    return;
                }

                AccessControlList acl = s3.getObjectAcl(summary.getBucketName(), summary.getKey());

                CopyObjectRequest copyReq = new CopyObjectRequest(bucket, key, bucket, key)
                        .withAccessControlList(acl).withNewObjectMetadata(metadata);

                CopyObjectResult result = s3.copyObject(copyReq);

                if (result != null) {
                    System.out.println("Updated " + key);
                } else {
                    System.out.println("Could not update " + key);
                }
            }
        });
    }
}

From source file:fr.ens.biologie.genomique.eoulsan.data.protocols.S3DataProtocol.java

License:LGPL

@Override
public DataFileMetadata getMetadata(final DataFile src) throws IOException {

    if (!exists(src, true)) {
        throw new FileNotFoundException("File not found: " + src);
    }//w ww  . j  a v  a2 s . c om

    final ObjectMetadata md = new S3URL(src).getMetaData();

    final SimpleDataFileMetadata result = new SimpleDataFileMetadata();
    result.setContentLength(md.getContentLength());
    result.setLastModified(md.getLastModified().getTime());
    result.setContentType(md.getContentType());
    result.setContentEncoding(md.getContentEncoding());
    result.setDataFormat(DataFormatRegistry.getInstance().getDataFormatFromFilename(src.getName()));

    return result;
}

From source file:io.druid.storage.s3.S3Utils.java

License:Apache License

public static boolean isDirectoryPlaceholder(String key, ObjectMetadata objectMetadata) {
    // Recognize "standard" directory place-holder indications used by
    // Amazon's AWS Console and Panic's Transmit.
    if (key.endsWith("/") && objectMetadata.getContentLength() == 0) {
        return true;
    }/*  w w  w .  jav a 2  s.  c o  m*/
    // Recognize s3sync.rb directory placeholders by MD5/ETag value.
    if ("d66759af42f282e1ba19144df2d405d0".equals(objectMetadata.getETag())) {
        return true;
    }
    // Recognize place-holder objects created by the Google Storage console
    // or S3 Organizer Firefox extension.
    if (key.endsWith("_$folder$") && objectMetadata.getContentLength() == 0) {
        return true;
    }

    // We don't use JetS3t APIs anymore, but the below check is still needed for backward compatibility.

    // Recognize legacy JetS3t directory place-holder objects, only gives
    // accurate results if an object's metadata is populated.
    if (objectMetadata.getContentLength() == 0
            && MIMETYPE_JETS3T_DIRECTORY.equals(objectMetadata.getContentType())) {
        return true;
    }
    return false;
}

From source file:io.konig.camel.aws.s3.DeleteObjectEndpoint.java

License:Apache License

public Exchange createExchange(ExchangePattern pattern, final S3Object s3Object) {
    LOG.trace("Getting object with key [{}] from bucket [{}]...", s3Object.getKey(), s3Object.getBucketName());

    ObjectMetadata objectMetadata = s3Object.getObjectMetadata();

    LOG.trace("Got object [{}]", s3Object);

    Exchange exchange = super.createExchange(pattern);
    Message message = exchange.getIn();//  www.j  a  v  a2  s  .c om

    if (configuration.isIncludeBody()) {
        message.setBody(s3Object.getObjectContent());
    } else {
        message.setBody(null);
    }

    message.setHeader(S3Constants.KEY, s3Object.getKey());
    message.setHeader(S3Constants.BUCKET_NAME, s3Object.getBucketName());
    message.setHeader(S3Constants.E_TAG, objectMetadata.getETag());
    message.setHeader(S3Constants.LAST_MODIFIED, objectMetadata.getLastModified());
    message.setHeader(S3Constants.VERSION_ID, objectMetadata.getVersionId());
    message.setHeader(S3Constants.CONTENT_TYPE, objectMetadata.getContentType());
    message.setHeader(S3Constants.CONTENT_MD5, objectMetadata.getContentMD5());
    message.setHeader(S3Constants.CONTENT_LENGTH, objectMetadata.getContentLength());
    message.setHeader(S3Constants.CONTENT_ENCODING, objectMetadata.getContentEncoding());
    message.setHeader(S3Constants.CONTENT_DISPOSITION, objectMetadata.getContentDisposition());
    message.setHeader(S3Constants.CACHE_CONTROL, objectMetadata.getCacheControl());
    message.setHeader(S3Constants.S3_HEADERS, objectMetadata.getRawMetadata());
    message.setHeader(S3Constants.SERVER_SIDE_ENCRYPTION, objectMetadata.getSSEAlgorithm());
    message.setHeader(S3Constants.USER_METADATA, objectMetadata.getUserMetadata());
    message.setHeader(S3Constants.EXPIRATION_TIME, objectMetadata.getExpirationTime());
    message.setHeader(S3Constants.REPLICATION_STATUS, objectMetadata.getReplicationStatus());
    message.setHeader(S3Constants.STORAGE_CLASS, objectMetadata.getStorageClass());

    /**
    * If includeBody != true, it is safe to close the object here. If
    * includeBody == true, the caller is responsible for closing the stream
    * and object once the body has been fully consumed. As of 2.17, the
    * consumer does not close the stream or object on commit.
    */
    if (!configuration.isIncludeBody()) {
        IOHelper.close(s3Object);
    } else {
        if (configuration.isAutocloseBody()) {
            exchange.addOnCompletion(new SynchronizationAdapter() {
                @Override
                public void onDone(Exchange exchange) {
                    IOHelper.close(s3Object);
                }
            });
        }
    }

    return exchange;
}