Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentMD5

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentMD5

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentMD5.

Prototype

public void setContentMD5(String md5Base64) 

Source Link

Document

Sets the base64 encoded 128-bit MD5 digest of the associated object (content - not including headers) according to RFC 1864.

Usage

From source file:S3DataManager.java

License:Open Source License

public UploadToS3Output uploadSourceToS3(AbstractBuild build, Launcher launcher, BuildListener listener)
        throws Exception {
    Validation.checkS3SourceUploaderConfig(projectName, workspace);

    SCM scm = build.getProject().getScm();
    if (scm.getType().equals("hudson.scm.NullSCM")) {
        throw new Exception("Select a valid option in Source Code Management.");
    }/*from   w ww. j a v  a2 s.c  o  m*/
    scm.checkout(build, launcher, workspace, listener, null, null);
    String localfileName = this.projectName + "-" + "source.zip";
    String sourceFilePath = workspace.getRemote();
    String zipFilePath = sourceFilePath.substring(0, sourceFilePath.lastIndexOf("/")) + "/" + localfileName;
    File zipFile = new File(zipFilePath);

    if (!zipFile.getParentFile().exists()) {
        boolean dirMade = zipFile.getParentFile().mkdirs();
        if (!dirMade) {
            throw new Exception("Unable to create directory: " + zipFile.getParentFile().getAbsolutePath());
        }
    }

    ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipFilePath));
    try {
        zipSource(sourceFilePath, out, sourceFilePath);
    } finally {
        out.close();
    }

    File sourceZipFile = new File(zipFilePath);
    PutObjectRequest putObjectRequest = new PutObjectRequest(s3InputBucket, s3InputKey, sourceZipFile);

    // Add MD5 checksum as S3 Object metadata
    String zipFileMD5;
    try (FileInputStream fis = new FileInputStream(zipFilePath)) {
        zipFileMD5 = new String(org.apache.commons.codec.binary.Base64.encodeBase64(DigestUtils.md5(fis)),
                "UTF-8");
    }
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentMD5(zipFileMD5);
    objectMetadata.setContentLength(sourceZipFile.length());
    putObjectRequest.setMetadata(objectMetadata);

    LoggingHelper.log(listener, "Uploading code to S3 at location " + putObjectRequest.getBucketName() + "/"
            + putObjectRequest.getKey() + ". MD5 checksum is " + zipFileMD5);
    PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest);

    return new UploadToS3Output(putObjectRequest.getBucketName() + "/" + putObjectRequest.getKey(),
            putObjectResult.getVersionId());
}

From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java

License:Apache License

/**
 * Initializes multipart upload./* ww  w. j  ava2 s .c  o  m*/
 */
private void initMultiPartUpload() throws IOException {
    // Generate the object metadata by setting server side encryption, md5 checksum,
    // and encoding as octet stream since no assumptions are made about the file type
    ObjectMetadata meta = new ObjectMetadata();
    if (mSseEnabled) {
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    if (mHash != null) {
        meta.setContentMD5(Base64.encodeAsString(mHash.digest()));
    }
    meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);

    AmazonClientException lastException;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(mBucketName, mKey)
            .withObjectMetadata(meta);
    do {
        try {
            mUploadId = mClient.initiateMultipartUpload(initRequest).getUploadId();
            return;
        } catch (AmazonClientException e) {
            lastException = e;
        }
    } while (mRetryPolicy.attempt());
    // This point is only reached if the operation failed more
    // than the allowed retry count
    throw new IOException("Unable to init multipart upload to " + mKey, lastException);
}

From source file:alluxio.underfs.s3a.S3AOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (mClosed) {
        return;//  w  w  w  . jav a  2 s.c o  m
    }
    mLocalOutputStream.close();
    try {
        // Generate the object metadata by setting server side encryption, md5 checksum, the file
        // length, and encoding as octet stream since no assumptions are made about the file type
        ObjectMetadata meta = new ObjectMetadata();
        if (SSE_ENABLED) {
            meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        if (mHash != null) {
            meta.setContentMD5(new String(Base64.encode(mHash.digest())));
        }
        meta.setContentLength(mFile.length());
        meta.setContentEncoding(Mimetypes.MIMETYPE_OCTET_STREAM);

        // Generate the put request and wait for the transfer manager to complete the upload, then
        // delete the temporary file on the local machine
        PutObjectRequest putReq = new PutObjectRequest(mBucketName, mKey, mFile).withMetadata(meta);
        mManager.upload(putReq).waitForUploadResult();
        if (!mFile.delete()) {
            LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
        }
    } catch (Exception e) {
        LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath());
        throw new IOException(e);
    }

    // Set the closed flag, close can be retried until mFile.delete is called successfully
    mClosed = true;
}

From source file:alluxio.underfs.s3a.S3AUnderFileSystem.java

License:Apache License

/**
 * Creates a directory flagged file with the key and folder suffix.
 *
 * @param key the key to create a folder
 * @return true if the operation was successful, false otherwise
 *///from w w w .  j  ava  2 s.  co m
private boolean mkdirsInternal(String key) {
    try {
        String keyAsFolder = convertToFolderName(stripPrefixIfPresent(key));
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(0);
        meta.setContentMD5(DIR_HASH);
        meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
        mClient.putObject(
                new PutObjectRequest(mBucketName, keyAsFolder, new ByteArrayInputStream(new byte[0]), meta));
        return true;
    } catch (AmazonClientException e) {
        LOG.error("Failed to create directory: {}", key, e);
        return false;
    }
}

From source file:com.davidsoergel.s3napback.S3ops.java

License:Apache License

public static void upload(TransferManager tx, String bucket, String filename, int chunkSize)
        throws InterruptedException, IOException {
    //throw new NotImplementedException();

    // break input stream into chunks

    // fully read each chunk into memory before sending, in order to know the size and the md5

    // ** prepare the next chunk while the last is sending; need to deal with multithreading properly
    // ** 4 concurrent streams?

    InputStream in = new BufferedInputStream(System.in);
    int chunkNum = 0;
    while (in.available() > 0) {
        byte[] buf = new byte[chunkSize];
        int bytesRead = in.read(buf);

        String md5 = new MD5(buf);

        // presume AWS does its own buffering, no need for BufferedInputStream (?)

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(bytesRead);
        meta.setContentMD5(md5);

        Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta);
        UploadResult result = myUpload.waitForUploadResult();

        while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);// w w w . ja  v a  2 s  .  com
        }
    }
}

From source file:com.emc.ecs.sync.util.AwsS3Util.java

License:Open Source License

public static ObjectMetadata s3MetaFromSyncMeta(SyncMetadata syncMeta) {
    ObjectMetadata om = new ObjectMetadata();
    if (syncMeta.getCacheControl() != null)
        om.setCacheControl(syncMeta.getCacheControl());
    if (syncMeta.getContentDisposition() != null)
        om.setContentDisposition(syncMeta.getContentDisposition());
    if (syncMeta.getContentEncoding() != null)
        om.setContentEncoding(syncMeta.getContentEncoding());
    om.setContentLength(syncMeta.getContentLength());
    if (syncMeta.getChecksum() != null && syncMeta.getChecksum().getAlgorithm().equals("MD5"))
        om.setContentMD5(syncMeta.getChecksum().getValue());
    if (syncMeta.getContentType() != null)
        om.setContentType(syncMeta.getContentType());
    if (syncMeta.getHttpExpires() != null)
        om.setHttpExpiresDate(syncMeta.getHttpExpires());
    om.setUserMetadata(formatUserMetadata(syncMeta));
    if (syncMeta.getModificationTime() != null)
        om.setLastModified(syncMeta.getModificationTime());
    return om;/*from  www. j a  v a2  s .  co m*/
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects./*from   w ww  .j a va  2s .co m*/
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

protected ObjectMetadata getS3ObjectMetadata(PutObjectType request) {
    ObjectMetadata meta = new ObjectMetadata();
    if (request.getMetaData() != null) {
        for (MetaDataEntry m : request.getMetaData()) {
            meta.addUserMetadata(m.getName(), m.getValue());
        }/*from   ww w  . j  a  v  a  2 s .  c o  m*/
    }

    if (!Strings.isNullOrEmpty(request.getContentLength())) {
        meta.setContentLength(Long.parseLong(request.getContentLength()));
    }

    if (!Strings.isNullOrEmpty(request.getContentMD5())) {
        meta.setContentMD5(request.getContentMD5());
    }

    if (!Strings.isNullOrEmpty(request.getContentType())) {
        meta.setContentType(request.getContentType());
    }

    return meta;
}

From source file:com.netflix.exhibitor.core.s3.S3Utils.java

License:Apache License

public static ObjectMetadata simpleUploadFile(S3Client client, byte[] bytes, String bucket, String key)
        throws Exception {
    byte[] md5 = md5(bytes, bytes.length);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bytes.length);
    metadata.setLastModified(new Date());
    metadata.setContentMD5(S3Utils.toBase64(md5));
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(bytes),
            metadata);/*w  w  w  . ja v a  2s . c  o  m*/
    PutObjectResult putObjectResult = client.putObject(putObjectRequest);

    if (!putObjectResult.getETag().equals(S3Utils.toHex(md5))) {
        throw new Exception("Unable to match MD5 for config");
    }

    return metadata;
}

From source file:com.netflix.spinnaker.front50.model.S3StorageService.java

License:Apache License

@Override
public <T extends Timestamped> void storeObject(ObjectType objectType, String objectKey, T item) {
    if (readOnlyMode) {
        throw new ReadOnlyModeException();
    }//from  ww  w  .  ja  va 2  s.c om
    try {
        item.setLastModifiedBy(AuthenticatedRequest.getSpinnakerUser().orElse("anonymous"));
        byte[] bytes = objectMapper.writeValueAsBytes(item);

        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(bytes.length);
        objectMetadata.setContentMD5(
                new String(org.apache.commons.codec.binary.Base64.encodeBase64(DigestUtils.md5(bytes))));

        amazonS3.putObject(bucket, buildS3Key(objectType.group, objectKey, objectType.defaultMetadataFilename),
                new ByteArrayInputStream(bytes), objectMetadata);
        writeLastModified(objectType.group);
    } catch (JsonProcessingException e) {
        throw new IllegalStateException(e);
    }
}