List of usage examples for com.amazonaws.services.s3 AmazonS3 abortMultipartUpload
public void abortMultipartUpload(AbortMultipartUploadRequest request) throws SdkClientException, AmazonServiceException;
From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java
License:MIT License
@Override public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException { logger.debug("={}", storage); logger.debug("?={}", binary); AmazonS3 s3client = getS3Client(binary.getBucketName()); ObjectMetadata oMetadata = new ObjectMetadata(); oMetadata.setContentType(binary.getContentType()); // ???//from w ww . j a va 2 s. co m InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), oMetadata); InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB); byte[] data = baos.toByteArray(); InputStream awsInputStream = new ByteArrayInputStream(data); if (written < BINARY_PART_SIZE_5MB) { oMetadata.setContentLength(written); s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata); } else { int firstByte = 0; int partNumber = 1; boolean isFirstChunck = true; boolean overSizeLimit = false; List<PartETag> partETags = new ArrayList<PartETag>(); InputStream firstChunck = new ByteArrayInputStream(data); PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1); long maxSize = BINARY_PART_SIZE_5MB * 1024; String maxSizeStr = "5GB"; String prefix = MDC.get("requestId"); while (-1 != (firstByte = chunckableInputStream.read())) { long partSize = 0; chunckableInputStream.unread(firstByte); File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)), null); tempFile.deleteOnExit(); try (OutputStream os = new BufferedOutputStream( new FileOutputStream(tempFile.getAbsolutePath()))) { if (isFirstChunck) { partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB)); isFirstChunck = false; } else { partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB)); } written += partSize; if (written > maxSize) { // 5GB overSizeLimit = true; logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written); break; } } FileInputStream chunk = new FileInputStream(tempFile); Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read()); if (!isLastPart) { chunckableInputStream.unread(firstByte); } oMetadata.setContentLength(partSize); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName()) .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId()) .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize) .withPartNumber(partNumber).withLastPart(isLastPart); UploadPartResult result = s3client.uploadPart(uploadRequest); partETags.add(result.getPartETag()); partNumber++; } if (overSizeLimit) { ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest( binary.getBucketName()); MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest); int timesIterated = 20; // loop and abort all the multipart uploads while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); Thread.sleep(1000); timesIterated--; listResult = s3client.listMultipartUploads(listRequest); logger.debug("Files that haven't been aborted are: {}", listResult.getMultipartUploads().listIterator().toString()); } if (timesIterated == 0) { logger.warn("Files parts that couldn't be aborted in 20 seconds are:"); Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads() .iterator(); while (multipartUploadIterator.hasNext()) { logger.warn(multipartUploadIterator.next().getKey()); } } throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE, maxSizeStr); } else { CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags); CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest); logger.debug("CompleteMultipartUploadResult={}", comMPUResult); } } } catch (AmazonServiceException ase) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase, binary.toString()); } catch (AmazonClientException ace) { s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(), binary.getFileName(), initResponse.getUploadId())); throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace, binary.toString()); } catch (IOException ioe) { throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe, binary.toString()); } catch (InterruptedException itre) { throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre, binary.toString()); } finally { if (inStream != null) { try { inStream.close(); } catch (Exception e) { } } } return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName()); }
From source file:com.netflix.bdp.s3.S3Util.java
License:Apache License
public static void abortCommit(AmazonS3 client, PendingUpload commit) { client.abortMultipartUpload(commit.newAbortRequest()); }
From source file:com.netflix.bdp.s3.S3Util.java
License:Apache License
public static PendingUpload multipartUpload(AmazonS3 client, File localFile, String partition, String bucket, String key, long uploadPartSize) { InitiateMultipartUploadResult initiate = client .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)); String uploadId = initiate.getUploadId(); boolean threw = true; try {/*from w w w . j av a 2 s . c o m*/ Map<Integer, String> etags = Maps.newLinkedHashMap(); long offset = 0; long numParts = (localFile.length() / uploadPartSize + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0)); Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile); for (int partNumber = 1; partNumber <= numParts; partNumber += 1) { long size = Math.min(localFile.length() - offset, uploadPartSize); UploadPartRequest part = new UploadPartRequest().withBucketName(bucket).withKey(key) .withPartNumber(partNumber).withUploadId(uploadId).withFile(localFile) .withFileOffset(offset).withPartSize(size).withLastPart(partNumber == numParts); UploadPartResult partResult = client.uploadPart(part); PartETag etag = partResult.getPartETag(); etags.put(etag.getPartNumber(), etag.getETag()); offset += uploadPartSize; } PendingUpload pending = new PendingUpload(partition, bucket, key, uploadId, etags); threw = false; return pending; } finally { if (threw) { try { client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId)); } catch (AmazonClientException e) { LOG.error("Failed to abort multi-part upload", e); } } } }
From source file:org.deeplearning4j.aws.s3.uploader.S3Uploader.java
License:Apache License
private void doMultiPart(AmazonS3 s3Client, String bucketName, File file) { // Create a list of UploadPartResponse objects. You get one of these // for each part upload. List<PartETag> partETags = new ArrayList<>(); // Step 1: Initialize. InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, file.getName()); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); long contentLength = file.length(); long partSize = 5242880; // Set part size to 5 MB. try {//from ww w. j av a2 s. c om // Step 2: Upload parts. long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than 5 MB. Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName) .withKey(file.getName()).withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition).withFile(file).withPartSize(partSize); // Upload part and add response to our list. partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } // Step 3: Complete. CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, file.getName(), initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); } catch (Exception e) { s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucketName, file.getName(), initResponse.getUploadId())); } }
From source file:org.finra.herd.dao.impl.S3OperationsImpl.java
License:Apache License
@Override public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest, AmazonS3 s3Client) { s3Client.abortMultipartUpload(abortMultipartUploadRequest); }