Example usage for com.amazonaws.services.s3.model ObjectMetadata getRawMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getRawMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getRawMetadata.

Prototype

public Map<String, Object> getRawMetadata() 

Source Link

Document

Gets a map of the raw metadata/headers for the associated object.

Usage

From source file:com.clicktravel.infrastructure.persistence.aws.s3.S3FileStore.java

License:Apache License

/**
 * Returns a Map of the user meta-data associated with the given S3 Object
 *
 * This is a work-around for a bug in the AWS Java SDK which treats HTTP headers in a case-insensitive manner.
 *
 * @see <a href="https://github.com/aws/aws-sdk-java/pull/326">https://github.com/aws/aws-sdk-java/pull/326</a>
 *
 * @param s3Object The S3Object for which the user meta-data is to be obtained.
 * @return key-value map for user meta-data
 *//*from w  w w .  ja  va2 s .  com*/
private Map<String, String> getUserMetaData(final S3Object s3Object) {
    final ObjectMetadata objectMetaData = s3Object.getObjectMetadata();
    final Map<String, String> userMetaData = objectMetaData.getUserMetadata();
    if (userMetaData.isEmpty()) {
        for (final Entry<String, Object> entry : objectMetaData.getRawMetadata().entrySet()) {
            final String normalisedKey = entry.getKey().toLowerCase();
            if (normalisedKey.startsWith(USER_METADATA_HEADER_PREFIX)) {
                final String value = String.valueOf(entry.getValue());
                userMetaData.put(normalisedKey.substring(USER_METADATA_HEADER_PREFIX.length()), value);
            }
        }
    }
    return userMetaData;
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

public AppendObjectResult appendObject(AppendObjectRequest request) throws AmazonClientException {
    ObjectMetadata returnedMetadata = doPut(request);
    AppendObjectResult result = new AppendObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setAppendOffset(//www.  j  a va  2  s . com
            Long.parseLong("" + returnedMetadata.getRawMetadata().get(ViPRConstants.APPEND_OFFSET_HEADER)));
    return result;
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects.//from  w  w w  .j  a  v a  2 s . co  m
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//from   ww  w.  jav  a  2 s.co m
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.intuit.tank.vmManager.environment.amazon.AmazonS3.java

License:Open Source License

/**
 * gets the metadata associated with the file
 * //from  ww  w  .  j  a  v  a2s .  c o m
 * @param bucketName
 *            the base bucketname
 * @param path
 *            the
 * @return
 */
public Map<String, String> getFileMetaData(String bucketName, String path) {
    Map<String, String> ret = new HashMap<String, String>();
    try {
        ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, path);
        if (objectMetadata != null) {
            for (Entry<String, Object> entry : objectMetadata.getRawMetadata().entrySet()) {
                ret.put(entry.getKey(), entry.getValue().toString());
            }
        }
    } catch (Exception e) {
        LOG.error("Error getting MetaData: " + e, e);
        throw new RuntimeException(e);
    }
    return ret;
}

From source file:io.konig.camel.aws.s3.DeleteObjectEndpoint.java

License:Apache License

public Exchange createExchange(ExchangePattern pattern, final S3Object s3Object) {
    LOG.trace("Getting object with key [{}] from bucket [{}]...", s3Object.getKey(), s3Object.getBucketName());

    ObjectMetadata objectMetadata = s3Object.getObjectMetadata();

    LOG.trace("Got object [{}]", s3Object);

    Exchange exchange = super.createExchange(pattern);
    Message message = exchange.getIn();//from   ww  w. j a  v  a2 s  .c om

    if (configuration.isIncludeBody()) {
        message.setBody(s3Object.getObjectContent());
    } else {
        message.setBody(null);
    }

    message.setHeader(S3Constants.KEY, s3Object.getKey());
    message.setHeader(S3Constants.BUCKET_NAME, s3Object.getBucketName());
    message.setHeader(S3Constants.E_TAG, objectMetadata.getETag());
    message.setHeader(S3Constants.LAST_MODIFIED, objectMetadata.getLastModified());
    message.setHeader(S3Constants.VERSION_ID, objectMetadata.getVersionId());
    message.setHeader(S3Constants.CONTENT_TYPE, objectMetadata.getContentType());
    message.setHeader(S3Constants.CONTENT_MD5, objectMetadata.getContentMD5());
    message.setHeader(S3Constants.CONTENT_LENGTH, objectMetadata.getContentLength());
    message.setHeader(S3Constants.CONTENT_ENCODING, objectMetadata.getContentEncoding());
    message.setHeader(S3Constants.CONTENT_DISPOSITION, objectMetadata.getContentDisposition());
    message.setHeader(S3Constants.CACHE_CONTROL, objectMetadata.getCacheControl());
    message.setHeader(S3Constants.S3_HEADERS, objectMetadata.getRawMetadata());
    message.setHeader(S3Constants.SERVER_SIDE_ENCRYPTION, objectMetadata.getSSEAlgorithm());
    message.setHeader(S3Constants.USER_METADATA, objectMetadata.getUserMetadata());
    message.setHeader(S3Constants.EXPIRATION_TIME, objectMetadata.getExpirationTime());
    message.setHeader(S3Constants.REPLICATION_STATUS, objectMetadata.getReplicationStatus());
    message.setHeader(S3Constants.STORAGE_CLASS, objectMetadata.getStorageClass());

    /**
    * If includeBody != true, it is safe to close the object here. If
    * includeBody == true, the caller is responsible for closing the stream
    * and object once the body has been fully consumed. As of 2.17, the
    * consumer does not close the stream or object on commit.
    */
    if (!configuration.isIncludeBody()) {
        IOHelper.close(s3Object);
    } else {
        if (configuration.isAutocloseBody()) {
            exchange.addOnCompletion(new SynchronizationAdapter() {
                @Override
                public void onDone(Exchange exchange) {
                    IOHelper.close(s3Object);
                }
            });
        }
    }

    return exchange;
}

From source file:org.alanwilliamson.amazon.s3.GetInfo.java

License:Open Source License

@Override
public cfData execute(cfSession _session, cfArgStructData argStruct) throws cfmRunTimeException {

    AmazonKey amazonKey = getAmazonKey(_session, argStruct);
    AmazonS3 s3Client = getAmazonS3(amazonKey);

    String bucket = getNamedStringParam(argStruct, "bucket", null);
    if (bucket == null)
        throwException(_session, "Please specify a bucket");

    String key = getNamedStringParam(argStruct, "key", null);
    if (key == null)
        throwException(_session, "Please specify a key");

    if (key.charAt(0) == '/')
        key = key.substring(1);//from  ww w  .j  ava 2  s. com

    try {
        GetObjectMetadataRequest g = new GetObjectMetadataRequest(bucket, key);

        ObjectMetadata metadata = s3Client.getObjectMetadata(g);

        cfStructData s = new cfStructData();

        s.setData("bucket", new cfStringData(bucket));
        s.setData("key", new cfStringData(key));
        s.setData("host", new cfStringData(amazonKey.getAmazonRegion().toAWSRegion().getDomain()));

        Map m = metadata.getRawMetadata();
        Iterator<String> it = m.keySet().iterator();
        while (it.hasNext()) {
            String k = it.next();
            s.setData(k, tagUtils.convertToCfData(m.get(k)));
        }

        return s;
    } catch (Exception e) {
        throwException(_session, "AmazonS3: " + e.getMessage());
        return cfBooleanData.FALSE;
    }
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

private Map<String, String> prepContentProperties(ObjectMetadata objMetadata) {
    Map<String, String> contentProperties = new HashMap<>();

    // Set the user properties
    Map<String, String> userProperties = objMetadata.getUserMetadata();
    for (String metaName : userProperties.keySet()) {
        String metaValue = userProperties.get(metaName);
        contentProperties.put(getWithSpace(decodeHeaderKey(metaName)), decodeHeaderValue(metaValue));
    }//w  ww. j a v  a 2 s .  com

    // Set the response metadata
    Map<String, Object> responseMeta = objMetadata.getRawMetadata();
    for (String metaName : responseMeta.keySet()) {
        Object metaValue = responseMeta.get(metaName);
        if (metaValue instanceof String) {
            contentProperties.put(metaName, (String) metaValue);
        }
    }

    // Set MIMETYPE
    String contentType = objMetadata.getContentType();
    if (contentType != null) {
        contentProperties.put(PROPERTIES_CONTENT_MIMETYPE, contentType);
        contentProperties.put(Headers.CONTENT_TYPE, contentType);
    }

    // Set CONTENT_ENCODING
    String encoding = objMetadata.getContentEncoding();
    if (encoding != null) {
        contentProperties.put(Headers.CONTENT_ENCODING, encoding);
    }

    // Set SIZE
    long contentLength = objMetadata.getContentLength();
    if (contentLength >= 0) {
        String size = String.valueOf(contentLength);
        contentProperties.put(PROPERTIES_CONTENT_SIZE, size);
        contentProperties.put(Headers.CONTENT_LENGTH, size);
    }

    // Set CHECKSUM
    String checksum = objMetadata.getETag();
    if (checksum != null) {
        String eTagValue = getETagValue(checksum);
        contentProperties.put(PROPERTIES_CONTENT_CHECKSUM, eTagValue);
        contentProperties.put(PROPERTIES_CONTENT_MD5, eTagValue);
        contentProperties.put(Headers.ETAG, eTagValue);
    }

    // Set MODIFIED
    Date modified = objMetadata.getLastModified();
    if (modified != null) {
        String modDate = formattedDate(modified);
        contentProperties.put(PROPERTIES_CONTENT_MODIFIED, modDate);
        contentProperties.put(Headers.LAST_MODIFIED, modDate);
    }

    return contentProperties;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private Map<String, String> getUserMetaData(ObjectMetadata obj) {
    this.s3clientLock.readLock().lock();
    try {// w  w  w  .  j  av a2s . c  o  m
        if (simpleS3) {
            HashMap<String, String> omd = new HashMap<String, String>();
            Set<String> mdk = obj.getRawMetadata().keySet();
            SDFSLogger.getLog().debug("md sz=" + mdk.size());
            for (String k : mdk) {
                if (k.toLowerCase().startsWith(Headers.S3_USER_METADATA_PREFIX)) {
                    String key = k.substring(Headers.S3_USER_METADATA_PREFIX.length()).toLowerCase();
                    omd.put(key, (String) obj.getRawMetadataValue(k));
                }
                SDFSLogger.getLog().debug("key=" + k + " value=" + obj.getRawMetadataValue(k));
            }
            Map<String, String> zd = obj.getUserMetadata();
            mdk = zd.keySet();
            SDFSLogger.getLog().debug("md sz=" + mdk.size());
            for (String k : mdk) {
                omd.put(k.toLowerCase(), zd.get(k));
                SDFSLogger.getLog().debug("key=" + k.toLowerCase() + " value=" + zd.get(k));
            }
            return omd;
        } else {
            return obj.getUserMetadata();
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}