List of usage examples for com.amazonaws.services.s3.model PutObjectRequest getBucketName
public String getBucketName()
From source file:S3DataManager.java
License:Open Source License
public UploadToS3Output uploadSourceToS3(AbstractBuild build, Launcher launcher, BuildListener listener) throws Exception { Validation.checkS3SourceUploaderConfig(projectName, workspace); SCM scm = build.getProject().getScm(); if (scm.getType().equals("hudson.scm.NullSCM")) { throw new Exception("Select a valid option in Source Code Management."); }//from w w w. j av a2 s. co m scm.checkout(build, launcher, workspace, listener, null, null); String localfileName = this.projectName + "-" + "source.zip"; String sourceFilePath = workspace.getRemote(); String zipFilePath = sourceFilePath.substring(0, sourceFilePath.lastIndexOf("/")) + "/" + localfileName; File zipFile = new File(zipFilePath); if (!zipFile.getParentFile().exists()) { boolean dirMade = zipFile.getParentFile().mkdirs(); if (!dirMade) { throw new Exception("Unable to create directory: " + zipFile.getParentFile().getAbsolutePath()); } } ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipFilePath)); try { zipSource(sourceFilePath, out, sourceFilePath); } finally { out.close(); } File sourceZipFile = new File(zipFilePath); PutObjectRequest putObjectRequest = new PutObjectRequest(s3InputBucket, s3InputKey, sourceZipFile); // Add MD5 checksum as S3 Object metadata String zipFileMD5; try (FileInputStream fis = new FileInputStream(zipFilePath)) { zipFileMD5 = new String(org.apache.commons.codec.binary.Base64.encodeBase64(DigestUtils.md5(fis)), "UTF-8"); } ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentMD5(zipFileMD5); objectMetadata.setContentLength(sourceZipFile.length()); putObjectRequest.setMetadata(objectMetadata); LoggingHelper.log(listener, "Uploading code to S3 at location " + putObjectRequest.getBucketName() + "/" + putObjectRequest.getKey() + ". MD5 checksum is " + zipFileMD5); PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest); return new UploadToS3Output(putObjectRequest.getBucketName() + "/" + putObjectRequest.getKey(), putObjectResult.getVersionId()); }
From source file:br.com.ingenieux.mojo.aws.util.BeanstalkerS3Client.java
License:Apache License
@Override public PutObjectResult putObject(PutObjectRequest req) throws AmazonClientException, AmazonServiceException { if (!multipartUpload) { return super.putObject(req); }//from w ww . j a va 2s .c o m final long contentLen = TransferManagerUtils.getContentLength(req); String tempFilename = req.getKey() + ".tmp"; String origFilename = req.getKey(); req.setKey(tempFilename); XProgressListener progressListener = new XProgressListener(); req.setGeneralProgressListener(new ProgressListenerChain(progressListener)); progressListener.setContentLen(contentLen); progressListener.setUpload(transferManager.upload(req)); progressListener.setSilentUpload(silentUpload); try { progressListener.getUpload().waitForCompletion(); } catch (InterruptedException e) { throw new AmazonClientException(e.getMessage(), e); } CopyObjectRequest copyReq = new CopyObjectRequest(req.getBucketName(), tempFilename, req.getBucketName(), origFilename); copyObject(copyReq); deleteObject(new DeleteObjectRequest(req.getBucketName(), tempFilename)); return null; }
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects.//from w w w .j a v a 2 s . c o m * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:edu.si.services.beans.cameratrap.AmazonS3ClientMock.java
License:Apache License
@SuppressWarnings("resource") @Override/*from w ww . j a v a 2 s .co m*/ public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException { putObjectRequests.add(putObjectRequest); S3Object s3Object = new S3Object(); s3Object.setBucketName(putObjectRequest.getBucketName()); s3Object.setKey(putObjectRequest.getKey()); if (putObjectRequest.getFile() != null) { try { s3Object.setObjectContent(new FileInputStream(putObjectRequest.getFile())); } catch (FileNotFoundException e) { throw new AmazonServiceException("Cannot store the file object.", e); } } else { s3Object.setObjectContent(putObjectRequest.getInputStream()); } objects.add(s3Object); PutObjectResult putObjectResult = new PutObjectResult(); putObjectResult.setETag("3a5c8b1ad448bca04584ecb55b836264"); return putObjectResult; }
From source file:org.apache.camel.component.aws.s3.AmazonS3ClientMock.java
License:Apache License
@Override public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException { putObjectRequests.add(putObjectRequest); S3Object s3Object = new S3Object(); s3Object.setBucketName(putObjectRequest.getBucketName()); s3Object.setKey(putObjectRequest.getKey()); s3Object.setObjectContent(putObjectRequest.getInputStream()); objects.add(s3Object);/*from w ww.j a va2s . c o m*/ PutObjectResult putObjectResult = new PutObjectResult(); putObjectResult.setETag("3a5c8b1ad448bca04584ecb55b836264"); return putObjectResult; }
From source file:org.apache.camel.itest.osgi.aws.AmazonS3ClientMock.java
License:Apache License
@Override public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException { S3Object s3Object = new S3Object(); s3Object.setBucketName(putObjectRequest.getBucketName()); s3Object.setKey(putObjectRequest.getKey()); s3Object.setObjectContent(putObjectRequest.getInputStream()); objects.add(s3Object);//www . java 2 s . c om PutObjectResult putObjectResult = new PutObjectResult(); putObjectResult.setETag("3a5c8b1ad448bca04584ecb55b836264"); return putObjectResult; }
From source file:org.finra.dm.dao.impl.MockS3OperationsImpl.java
License:Apache License
/** * Puts an object into a bucket. Creates a new bucket if the bucket does not already exist. * * @throws IllegalArgumentException when there is an error reading from input stream. *//* w w w.j a va 2 s. com*/ @Override public PutObjectResult putObject(PutObjectRequest putObjectRequest, AmazonS3Client s3Client) { LOGGER.debug("putObject(): putObjectRequest.getBucketName() = " + putObjectRequest.getBucketName() + ", putObjectRequest.getKey() = " + putObjectRequest.getKey()); String s3BucketName = putObjectRequest.getBucketName(); InputStream inputStream = putObjectRequest.getInputStream(); ObjectMetadata metadata = putObjectRequest.getMetadata(); if (metadata == null) { metadata = new ObjectMetadata(); } File file = putObjectRequest.getFile(); if (file != null) { try { inputStream = new FileInputStream(file); metadata.setContentLength(file.length()); } catch (FileNotFoundException e) { throw new IllegalArgumentException("File not found " + file, e); } } String s3ObjectKey = putObjectRequest.getKey(); byte[] s3ObjectData; try { s3ObjectData = IOUtils.toByteArray(inputStream); } catch (IOException e) { throw new IllegalArgumentException("Error converting input stream into byte array", e); } finally { try { inputStream.close(); } catch (IOException e) { LOGGER.error("Error closing stream " + inputStream, e); } } MockS3Bucket mockS3Bucket = getOrCreateBucket(s3BucketName); MockS3Object mockS3Object = new MockS3Object(); mockS3Object.setKey(s3ObjectKey); mockS3Object.setData(s3ObjectData); mockS3Object.setObjectMetadata(metadata); mockS3Bucket.getObjects().put(s3ObjectKey, mockS3Object); return new PutObjectResult(); }
From source file:org.finra.dm.dao.impl.MockS3OperationsImpl.java
License:Apache License
/** * Puts an object./*w w w .j av a2 s . c o m*/ */ @Override public Upload upload(PutObjectRequest putObjectRequest, TransferManager transferManager) throws AmazonServiceException, AmazonClientException { LOGGER.debug("upload(): putObjectRequest.getBucketName() = " + putObjectRequest.getBucketName() + ", putObjectRequest.getKey() = " + putObjectRequest.getKey()); putObject(putObjectRequest, (AmazonS3Client) transferManager.getAmazonS3Client()); long contentLength = putObjectRequest.getFile().length(); TransferProgress progress = new TransferProgress(); progress.setTotalBytesToTransfer(contentLength); progress.updateProgress(contentLength); UploadImpl upload = new UploadImpl(null, progress, null, null); upload.setState(TransferState.Completed); return upload; }
From source file:org.finra.herd.dao.impl.MockS3OperationsImpl.java
License:Apache License
/** * {@inheritDoc}/*from w ww . j a va 2 s .c om*/ * <p/> * This implementation creates a new bucket if the bucket does not already exist. */ @Override public PutObjectResult putObject(PutObjectRequest putObjectRequest, AmazonS3 s3Client) { LOGGER.debug("putObject(): putObjectRequest.getBucketName() = " + putObjectRequest.getBucketName() + ", putObjectRequest.getKey() = " + putObjectRequest.getKey()); String s3BucketName = putObjectRequest.getBucketName(); InputStream inputStream = putObjectRequest.getInputStream(); ObjectMetadata metadata = putObjectRequest.getMetadata(); if (metadata == null) { metadata = new ObjectMetadata(); } File file = putObjectRequest.getFile(); if (file != null) { try { inputStream = new FileInputStream(file); metadata.setContentLength(file.length()); } catch (FileNotFoundException e) { throw new IllegalArgumentException("File not found " + file, e); } } String s3ObjectKey = putObjectRequest.getKey(); String s3ObjectVersion = MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED.equals(putObjectRequest.getBucketName()) ? UUID.randomUUID().toString() : null; String s3ObjectKeyVersion = s3ObjectKey + (s3ObjectVersion != null ? s3ObjectVersion : ""); byte[] s3ObjectData; try { s3ObjectData = IOUtils.toByteArray(inputStream); metadata.setContentLength(s3ObjectData.length); } catch (IOException e) { throw new IllegalArgumentException("Error converting input stream into byte array", e); } finally { try { inputStream.close(); } catch (IOException e) { LOGGER.error("Error closing stream " + inputStream, e); } } // Update the Last-Modified header value. This value not being set causes NullPointerException in S3Dao download related unit tests. metadata.setLastModified(new Date()); MockS3Bucket mockS3Bucket = getOrCreateBucket(s3BucketName); MockS3Object mockS3Object = new MockS3Object(); mockS3Object.setKey(s3ObjectKey); mockS3Object.setVersion(s3ObjectVersion); mockS3Object.setData(s3ObjectData); mockS3Object.setObjectMetadata(metadata); if (putObjectRequest.getTagging() != null) { mockS3Object.setTags(putObjectRequest.getTagging().getTagSet()); } mockS3Bucket.getObjects().put(s3ObjectKey, mockS3Object); mockS3Bucket.getVersions().put(s3ObjectKeyVersion, mockS3Object); return new PutObjectResult(); }
From source file:org.finra.herd.dao.impl.MockS3OperationsImpl.java
License:Apache License
@Override public Upload upload(PutObjectRequest putObjectRequest, TransferManager transferManager) { LOGGER.debug("upload(): putObjectRequest.getBucketName() = " + putObjectRequest.getBucketName() + ", putObjectRequest.getKey() = " + putObjectRequest.getKey()); putObject(putObjectRequest, transferManager.getAmazonS3Client()); long contentLength = putObjectRequest.getFile().length(); TransferProgress progress = new TransferProgress(); progress.setTotalBytesToTransfer(contentLength); progress.updateProgress(contentLength); UploadImpl upload = new UploadImpl(null, progress, null, null); upload.setState(TransferState.Completed); return upload; }