List of usage examples for com.amazonaws.services.s3.model CompleteMultipartUploadRequest CompleteMultipartUploadRequest
public CompleteMultipartUploadRequest(String bucketName, String key, String uploadId,
List<PartETag> partETags)
From source file:alluxio.underfs.s3a.S3ALowLevelOutputStream.java
License:Apache License
/** * Completes multipart upload.//from www .j a v a2 s. c o m */ private void completeMultiPartUpload() throws IOException { AmazonClientException lastException; CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(mBucketName, mKey, mUploadId, mTags); do { try { mClient.completeMultipartUpload(completeRequest); LOG.debug("Completed multipart upload for key {} and id '{}' with {} partitions.", mKey, mUploadId, mTags.size()); return; } catch (AmazonClientException e) { lastException = e; } } while (mRetryPolicy.attempt()); // This point is only reached if the operation failed more // than the allowed retry count throw new IOException("Unable to complete multipart upload with id '" + mUploadId + "' to " + mKey, lastException); }
From source file:c3.ops.priam.aws.S3PartUploader.java
License:Apache License
public void completeUpload() throws BackupRestoreException { CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(dataPart.getBucketName(), dataPart.getS3key(), dataPart.getUploadID(), partETags); client.completeMultipartUpload(compRequest); }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) { if (fileStream == null) return false; try {/*from ww w . j ava2 s. c om*/ ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(file.getFileSize()); meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + ""); meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); List<PartETag> partTags = new ArrayList<>(); String fileKey = toAbsoluteFilePath(file.getRelativePath()); InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta); InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request); long contentLength = file.getFileSize(); long partSize = 256 * 1024 * 1024; try { // Uploading the file, part by part. long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { partSize = Math.min(partSize, (contentLength - filePosition)); // Creating the request for a part upload UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName) .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i) .withInputStream(fileStream).withPartSize(partSize); // Upload part and add response to the result list. partTags.add(s3.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of " + Utils.readableFileSize(contentLength)); } } catch (Exception e) { System.out.println("UploadPartRequest failed: " + e.getMessage()); s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId())); return false; } s3.completeMultipartUpload( new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags)); } catch (AmazonClientException ex) { System.out.println("Upload failed: " + ex.getMessage()); return false; } return true; }
From source file:com.emc.vipr.s3.s3api.java
License:Open Source License
public static void CreateLargeObject(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT, String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, File file, String metaKey, String metaValue) throws Exception { System.out.println("Access ID:" + S3_ACCESS_KEY_ID); System.out.println("Access secret:" + S3_SECRET_KEY); System.out.println("Access URL:" + S3_ENDPOINT); System.out.println("Access namespace:" + S3_ViPR_NAMESPACE); System.out.println("Access bucket:" + S3_BUCKET); System.out.println("Access key:" + key); ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE); ObjectMetadata objmeta = new ObjectMetadata(); if (!(metaKey.equals("") && metaValue.equals(""))) { objmeta.addUserMetadata(metaKey, metaValue); }/*from www.j av a 2 s. co m*/ InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(S3_BUCKET, key) .withObjectMetadata(objmeta); InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest); long partSize = 1 * 1024 * 1024; // Set part size to 1 MB. // list of UploadPartResponse objects for each part that is uploaded List<PartETag> partETags = new ArrayList<PartETag>(); long filePosition = 0; for (int i = 1; filePosition < file.length(); i++) { // get the size of the chunk. Note - the last part can be less than the chunk size partSize = Math.min(partSize, (file.length() - filePosition)); System.out.println(String.format("Sending chunk [%d] starting at position [%d]", i, filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(S3_BUCKET).withKey(key) .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition) .withFile(file).withPartSize(partSize); // Upload part and add response to our list. PartETag eTagPart = s3.uploadPart(uploadRequest).getPartETag(); partETags.add(eTagPart); // set file position to the next part in the file filePosition += partSize; } System.out.println("Waiting for completion of multi-part upload"); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(S3_BUCKET, key, initResponse.getUploadId(), partETags); s3.completeMultipartUpload(compRequest); }
From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java
License:Open Source License
private String finalizeMultipartUpload(List<PartETag> partETags) throws SnapshotFinalizeMpuException { CompleteMultipartUploadResult result; try {//from w w w.ja v a 2 s . com LOG.info("Finalizing multipart upload: snapshotId=" + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName + ", uploadId=" + uploadId); result = retryAfterRefresh( new Function<CompleteMultipartUploadRequest, CompleteMultipartUploadResult>() { @Override @Nullable public CompleteMultipartUploadResult apply(@Nullable CompleteMultipartUploadRequest arg0) { eucaS3Client.refreshEndpoint(); return eucaS3Client.completeMultipartUpload(arg0); } }, new CompleteMultipartUploadRequest(bucketName, keyName, uploadId, partETags), REFRESH_TOKEN_RETRIES); return result.getETag(); } catch (Exception ex) { LOG.debug("Failed to finalize multipart upload for snapshotId=" + snapshotId + ", bucketName=" + ", keyName=" + keyName, ex); throw new SnapshotFinalizeMpuException( "Failed to initialize multipart upload part after for snapshotId=" + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName); } }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public CompleteMultipartUploadResponseType completeMultipartUpload(CompleteMultipartUploadType request) throws S3Exception { CompleteMultipartUploadResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; String bucketName = request.getBucket(); String key = request.getKey(); String uploadId = request.getUploadId(); List<Part> parts = request.getParts(); List<PartETag> partETags = new ArrayList<>(); for (Part part : parts) { PartETag partETag = new PartETag(part.getPartNumber(), part.getEtag()); partETags.add(partETag);//w w w . j a v a 2 s . co m } CompleteMultipartUploadRequest multipartRequest = new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags); try { internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(multipartRequest); reply.setEtag(result.getETag()); reply.setBucket(bucketName); reply.setKey(key); reply.setLocation(result.getLocation()); reply.setLastModified(new Date()); } catch (AmazonServiceException e) { LOG.debug("Error from backend", e); throw S3ExceptionMapper.fromAWSJavaSDK(e); } return reply; }
From source file:com.example.jinux.mydemo.s3.Uploader.java
License:Apache License
/** * Initiate a multipart file upload to Amazon S3 * // www.ja v a 2 s .c om * @return the URL of a successfully uploaded file */ public String start() { // initialize List<PartETag> partETags = new ArrayList<PartETag>(); final long contentLength = file.length(); long filePosition = 0; int startPartNumber = 1; userInterrupted = false; userAborted = false; bytesUploaded = 0; // check if we can resume an incomplete download String uploadId = getCachedUploadId(); Utils.log("start uploading"); if (uploadId != null) { // we can resume the download Log.i(TAG, "resuming upload for " + uploadId); // get the cached etags List<PartETag> cachedEtags = getCachedPartEtags(); partETags.addAll(cachedEtags); // calculate the start position for resume startPartNumber = cachedEtags.size() + 1; filePosition = (startPartNumber - 1) * partSize; bytesUploaded = filePosition; Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition); } else { // initiate a new multi part upload Log.i(TAG, "initiating new upload"); Utils.log("the bucket = " + s3bucketName); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key); configureInitiateRequest(initRequest); initRequest.getRequestClientOptions() .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion()); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); uploadId = initResponse.getUploadId(); } final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId); for (int k = startPartNumber; filePosition < contentLength; k++) { long thisPartSize = Math.min(partSize, (contentLength - filePosition)); Log.i(TAG, "starting file part " + k + " with size " + thisPartSize); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key) .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file) .withPartSize(thisPartSize); ProgressListener s3progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { // bail out if user cancelled // TODO calling shutdown too brute force? if (userInterrupted) { s3Client.shutdown(); throw new UploadIterruptedException("User interrupted"); } else if (userAborted) { // aborted requests cannot be resumed, so clear any cached etags clearProgressCache(); s3Client.abortMultipartUpload(abortRequest); s3Client.shutdown(); } bytesUploaded += progressEvent.getBytesTransfered(); //Log.d(TAG, "bytesUploaded=" + bytesUploaded); // broadcast progress float fpercent = ((bytesUploaded * 100) / contentLength); int percent = Math.round(fpercent); if (progressListener != null) { progressListener.progressChanged(progressEvent, bytesUploaded, percent); } } }; uploadRequest.setProgressListener(s3progressListener); UploadPartResult result = s3Client.uploadPart(uploadRequest); partETags.add(result.getPartETag()); // cache the part progress for this upload if (k == 1) { initProgressCache(uploadId); } // store part etag cachePartEtag(result); filePosition += thisPartSize; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags); CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest); bytesUploaded = 0; Log.i(TAG, "upload complete for " + uploadId); clearProgressCache(); return result.getLocation(); }
From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java
License:MIT License
@Override public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException { logger.debug("={}", storage); logger.debug("?={}", binary); AmazonS3 s3client = getS3Client(binary.getBucketName()); ObjectMetadata oMetadata = new ObjectMetadata(); oMetadata.setContentType(binary.getContentType()); // ???/*from www .j a v a 2 s .co m*/ InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), oMetadata); InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB); byte[] data = baos.toByteArray(); InputStream awsInputStream = new ByteArrayInputStream(data); if (written < BINARY_PART_SIZE_5MB) { oMetadata.setContentLength(written); s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata); } else { int firstByte = 0; int partNumber = 1; boolean isFirstChunck = true; boolean overSizeLimit = false; List<PartETag> partETags = new ArrayList<PartETag>(); InputStream firstChunck = new ByteArrayInputStream(data); PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1); long maxSize = BINARY_PART_SIZE_5MB * 1024; String maxSizeStr = "5GB"; String prefix = MDC.get("requestId"); while (-1 != (firstByte = chunckableInputStream.read())) { long partSize = 0; chunckableInputStream.unread(firstByte); File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)), null); tempFile.deleteOnExit(); try (OutputStream os = new BufferedOutputStream( new FileOutputStream(tempFile.getAbsolutePath()))) { if (isFirstChunck) { partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB)); isFirstChunck = false; } else { partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB)); } written += partSize; if (written > maxSize) { // 5GB overSizeLimit = true; logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written); break; } } FileInputStream chunk = new FileInputStream(tempFile); Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read()); if (!isLastPart) { chunckableInputStream.unread(firstByte); } oMetadata.setContentLength(partSize); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName()) .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId()) .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize) .withPartNumber(partNumber).withLastPart(isLastPart); UploadPartResult result = s3client.uploadPart(uploadRequest); partETags.add(result.getPartETag()); partNumber++; } if (overSizeLimit) { ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest( binary.getBucketName()); MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest); int timesIterated = 20; // loop and abort all the multipart uploads while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); Thread.sleep(1000); timesIterated--; listResult = s3client.listMultipartUploads(listRequest); logger.debug("Files that haven't been aborted are: {}", listResult.getMultipartUploads().listIterator().toString()); } if (timesIterated == 0) { logger.warn("Files parts that couldn't be aborted in 20 seconds are:"); Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads() .iterator(); while (multipartUploadIterator.hasNext()) { logger.warn(multipartUploadIterator.next().getKey()); } } throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE, maxSizeStr); } else { CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags); CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest); logger.debug("CompleteMultipartUploadResult={}", comMPUResult); } } } catch (AmazonServiceException ase) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase, binary.toString()); } catch (AmazonClientException ace) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace, binary.toString()); } catch (IOException ioe) { throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe, binary.toString()); } catch (InterruptedException itre) { throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre, binary.toString()); } finally { if (inStream != null) { try { inStream.close(); } catch (Exception e) { } } } return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName()); }
From source file:com.lithium.flow.filer.S3Filer.java
License:Apache License
@Override @Nonnull/*from w ww . j av a 2 s.co m*/ public OutputStream writeFile(@Nonnull String path) throws IOException { String key = path.substring(1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); List<Future<PartETag>> futureTags = new ArrayList<>(); Lazy<String> uploadId = new Lazy<>( () -> s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)).getUploadId()); return new OutputStream() { @Override public void write(int b) throws IOException { baos.write(b); flip(partSize); } @Override public void write(byte[] b) throws IOException { baos.write(b); flip(partSize); } @Override public void write(byte[] b, int off, int len) throws IOException { baos.write(b, off, len); flip(partSize); } @Override public void close() throws IOException { if (futureTags.size() == 0) { InputStream in = new ByteArrayInputStream(baos.toByteArray()); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(baos.size()); s3.putObject(bucket, key, in, metadata); } else { flip(1); List<PartETag> tags = Lists.newArrayList(); for (Future<PartETag> futureTag : futureTags) { try { tags.add(futureTag.get()); } catch (Exception e) { s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId.get())); throw new IOException("failed to upload: " + path, e); } } s3.completeMultipartUpload( new CompleteMultipartUploadRequest(bucket, key, uploadId.get(), tags)); } } private void flip(long minSize) throws IOException { if (baos.size() < minSize) { return; } File file = new File(tempDir, UUID.randomUUID().toString()); file.deleteOnExit(); OutputStream out = new FileOutputStream(file); out.write(baos.toByteArray()); out.close(); baos.reset(); UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(uploadId.get()) .withBucketName(bucket).withKey(key).withPartNumber(futureTags.size() + 1) .withPartSize(file.length()).withFile(file); futureTags.add(service.submit(() -> { try { return s3.uploadPart(uploadRequest).getPartETag(); } finally { file.delete(); } })); } }; }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Backup.java
License:Apache License
/** * Uses the Amazon S3 API to upload the AOF/RDB to S3 * Filename: Backup location + DC + Rack + App + Token */// www .j av a 2s . co m @Override public boolean upload(File file, DateTime todayStart) { logger.info("Snapshot backup: sending " + file.length() + " bytes to S3"); /* Key name is comprised of the * backupDir + DC + Rack + token + Date */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + todayStart.getMillis(); // Get bucket location. logger.info("Key in Bucket: " + keyName); logger.info("S3 Bucket Name:" + config.getBucketName()); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); return false; } else { logger.info("Uploading data to S3\n"); // Create a list of UploadPartResponse objects. You get one of these for // each part upload. List<PartETag> partETags = new ArrayList<PartETag>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest( config.getBucketName(), keyName); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); long contentLength = file.length(); long filePosition = 0; long partSize = this.initPartSize; try { for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than initPartSize (500MB). Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .withBucketName(config.getBucketName()).withKey(keyName) .withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition).withFile(file).withPartSize(partSize); // Upload part and add response to our list. partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( config.getBucketName(), keyName, initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); } catch (Exception e) { logger.error("Abosting multipart upload due to error"); s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(config.getBucketName(), keyName, initResponse.getUploadId())); } return true; } } catch (AmazonServiceException ase) { logger.error( "AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); return false; } }