Example usage for com.amazonaws.services.s3.model ObjectMetadata getETag

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getETag

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getETag.

Prototype

public String getETag() 

Source Link

Document

The entity tag is a hash of the object.

Usage

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

public UpdateObjectResult updateObject(UpdateObjectRequest request) throws AmazonClientException {
    ObjectMetadata returnedMetadata = doPut(request);
    UpdateObjectResult result = new UpdateObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    return result;
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

public AppendObjectResult appendObject(AppendObjectRequest request) throws AmazonClientException {
    ObjectMetadata returnedMetadata = doPut(request);
    AppendObjectResult result = new AppendObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setAppendOffset(/*from w ww  . java2 s  . c  o  m*/
            Long.parseLong("" + returnedMetadata.getRawMetadata().get(ViPRConstants.APPEND_OFFSET_HEADER)));
    return result;
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects./*  w  w  w  .  ja v a  2 s.c om*/
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

protected void populateResponseMetadata(final ObjectStorageDataResponseType reply,
        final ObjectMetadata metadata) {
    reply.setSize(metadata.getContentLength());
    reply.setContentDisposition(metadata.getContentDisposition());
    reply.setContentType(metadata.getContentType());
    reply.setEtag(metadata.getETag());
    reply.setLastModified(metadata.getLastModified());

    if (metadata.getUserMetadata() != null && metadata.getUserMetadata().size() > 0) {
        if (reply.getMetaData() == null)
            reply.setMetaData(new ArrayList<MetaDataEntry>());

        for (String k : metadata.getUserMetadata().keySet()) {
            reply.getMetaData().add(new MetaDataEntry(k, metadata.getUserMetadata().get(k)));
        }//from   w w  w . j a v  a2 s .  c  o m
    }

}

From source file:com.github.rholder.esthree.command.GetMultipart.java

License:Apache License

@Override
public Integer call() throws Exception {
    ObjectMetadata om = amazonS3Client.getObjectMetadata(bucket, key);
    contentLength = om.getContentLength();

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    chunkSize = chunkSize == null ? DEFAULT_CHUNK_SIZE : chunkSize;
    fileParts = Parts.among(contentLength, chunkSize);
    for (Part fp : fileParts) {

        /*// ww w  .ja v  a 2s  .  c om
         * We'll need to compute the digest on the full incoming stream for
         * each valid chunk that comes in. Invalid chunks will need to be
         * recomputed and fed through a copy of the MD5 that was valid up
         * until the latest chunk.
         */
        currentDigest = retryingGetWithRange(fp.start, fp.end);
    }

    // TODO fix total content length progress bar
    if (progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    String fullETag = om.getETag();
    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        if (verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: "
                    + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;
}

From source file:com.openkm.util.backup.RepositoryS3Backup.java

License:Open Source License

/**
 * Performs a recursive repository content export with metadata
 *///from ww w  .  j ava 2s . c  o  m
private static ImpExpStats backupHelper(String token, String fldPath, AmazonS3 s3, String bucket,
        boolean metadata, Writer out, InfoDecorator deco)
        throws FileNotFoundException, PathNotFoundException, AccessDeniedException, ParseException,
        NoSuchGroupException, RepositoryException, IOException, DatabaseException {
    log.info("backup({}, {}, {}, {}, {}, {})", new Object[] { token, fldPath, bucket, metadata, out, deco });
    ImpExpStats stats = new ImpExpStats();
    DocumentModule dm = ModuleManager.getDocumentModule();
    FolderModule fm = ModuleManager.getFolderModule();
    MetadataAdapter ma = MetadataAdapter.getInstance(token);
    Gson gson = new Gson();

    for (Iterator<Document> it = dm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        File tmpDoc = null;
        InputStream is = null;
        FileOutputStream fos = null;
        boolean upload = true;

        try {
            Document docChild = it.next();
            String path = docChild.getPath().substring(1);
            ObjectMetadata objMeta = new ObjectMetadata();

            if (Config.REPOSITORY_CONTENT_CHECKSUM) {
                if (exists(s3, bucket, path)) {
                    objMeta = s3.getObjectMetadata(bucket, path);

                    if (docChild.getActualVersion().getChecksum().equals(objMeta.getETag())) {
                        upload = false;
                    }
                }
            }

            if (upload) {
                tmpDoc = FileUtils.createTempFileFromMime(docChild.getMimeType());
                fos = new FileOutputStream(tmpDoc);
                is = dm.getContent(token, docChild.getPath(), false);
                IOUtils.copy(is, fos);
                PutObjectRequest request = new PutObjectRequest(bucket, path, tmpDoc);

                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);
                }

                request.setMetadata(objMeta);
                s3.putObject(request);
                out.write(deco.print(docChild.getPath(), docChild.getActualVersion().getSize(), null));
                out.flush();
            } else {
                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);

                    // Update object metadata
                    CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, path, bucket, path);
                    copyObjReq.setNewObjectMetadata(objMeta);
                    s3.copyObject(copyObjReq);
                }

                log.info("Don't need to upload document {}", docChild.getPath());
            }

            // Stats
            stats.setSize(stats.getSize() + docChild.getActualVersion().getSize());
            stats.setDocuments(stats.getDocuments() + 1);
        } finally {
            IOUtils.closeQuietly(is);
            IOUtils.closeQuietly(fos);
            FileUtils.deleteQuietly(tmpDoc);
        }
    }

    for (Iterator<Folder> it = fm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        InputStream is = null;

        try {
            Folder fldChild = it.next();
            String path = fldChild.getPath().substring(1) + "/";
            is = new ByteArrayInputStream(new byte[0]);
            ObjectMetadata objMeta = new ObjectMetadata();
            objMeta.setContentLength(0);
            PutObjectRequest request = new PutObjectRequest(bucket, path, is, objMeta);

            // Metadata
            if (metadata) {
                FolderMetadata fmd = ma.getMetadata(fldChild);
                String json = gson.toJson(fmd);
                objMeta.addUserMetadata("okm", json);
            }

            request.setMetadata(objMeta);
            s3.putObject(request);

            ImpExpStats tmp = backupHelper(token, fldChild.getPath(), s3, bucket, metadata, out, deco);

            // Stats
            stats.setSize(stats.getSize() + tmp.getSize());
            stats.setDocuments(stats.getDocuments() + tmp.getDocuments());
            stats.setFolders(stats.getFolders() + tmp.getFolders() + 1);
            stats.setOk(stats.isOk() && tmp.isOk());
        } finally {
            IOUtils.closeQuietly(is);
        }
    }

    log.debug("backupHelper: {}", stats);
    return stats;
}

From source file:com.proofpoint.event.collector.combiner.S3StorageHelper.java

License:Apache License

public static StoredObject updateStoredObject(URI location, ObjectMetadata metadata) {
    Preconditions.checkNotNull(location, "location is null");
    Preconditions.checkNotNull(metadata, "metadata is null");

    return new StoredObject(location, metadata.getETag(), metadata.getContentLength(),
            metadata.getLastModified().getTime());
}

From source file:com.proofpoint.event.collector.combiner.S3StorageSystem.java

License:Apache License

private StoredObject createCombinedObjectLarge(CombinedStoredObject combinedObject) {
    URI location = combinedObject.getLocation();
    log.info("starting multipart upload: %s", location);

    String bucket = getS3Bucket(location);
    String key = getS3ObjectKey(location);

    String uploadId = s3Service.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key))
            .getUploadId();//from w ww  .  j  a va 2 s  .  co  m

    try {
        List<PartETag> parts = newArrayList();
        int partNumber = 1;
        for (StoredObject newCombinedObjectPart : combinedObject.getSourceParts()) {
            CopyPartResult part = s3Service.copyPart(new CopyPartRequest().withUploadId(uploadId)
                    .withPartNumber(partNumber).withDestinationBucketName(bucket).withDestinationKey(key)
                    .withSourceBucketName(getS3Bucket(newCombinedObjectPart.getLocation()))
                    .withSourceKey(getS3ObjectKey(newCombinedObjectPart.getLocation())));
            parts.add(new PartETag(partNumber, part.getETag()));
            partNumber++;
        }

        String etag = s3Service
                .completeMultipartUpload(new CompleteMultipartUploadRequest(bucket, key, uploadId, parts))
                .getETag();

        ObjectMetadata newObject = s3Service.getObjectMetadata(bucket, key);
        log.info("completed multipart upload: %s", location);

        if (!etag.equals(newObject.getETag())) {
            // this might happen in rare cases due to S3's eventual consistency
            throw new IllegalStateException("completed etag is different from combined object etag");
        }

        return updateStoredObject(location, newObject);
    } catch (AmazonClientException e) {
        try {
            s3Service.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
        } catch (AmazonClientException ignored) {
        }
        throw Throwables.propagate(e);
    }
}

From source file:com.proofpoint.event.collector.combiner.S3StorageSystem.java

License:Apache License

@Override
public StoredObject putObject(final URI location, File source) {
    try {// w w w.  j av  a2  s.co m
        log.info("starting upload: %s", location);
        final AtomicLong totalTransferred = new AtomicLong();
        Upload upload = s3TransferManager.upload(getS3Bucket(location), getS3ObjectKey(location), source);
        upload.addProgressListener(new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                // NOTE: This may be invoked by multiple threads.
                long transferred = totalTransferred.addAndGet(progressEvent.getBytesTransferred());
                log.debug("upload progress: %s: transferred=%d code=%d", location, transferred,
                        progressEvent.getEventCode());
            }
        });
        UploadResult uploadResult = upload.waitForUploadResult();
        ObjectMetadata metadata = s3Service.getObjectMetadata(getS3Bucket(location), getS3ObjectKey(location));
        if (!uploadResult.getETag().equals(metadata.getETag())) {
            // this might happen in rare cases due to S3's eventual consistency
            throw new IllegalStateException("uploaded etag is different from retrieved object etag");
        }
        log.info("completed upload: %s (size=%d bytes)", location, totalTransferred.get());
        return updateStoredObject(location, metadata);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3KeyCopyJob.java

License:Apache License

boolean objectChanged(ObjectMetadata metadata) {
    final KeyFingerprint sourceFingerprint = new KeyFingerprint(summary.getSize(), summary.getETag());
    final KeyFingerprint destFingerprint = new KeyFingerprint(metadata.getContentLength(), metadata.getETag());
    return !sourceFingerprint.equals(destFingerprint);
}