List of usage examples for com.amazonaws.services.glacier.model UploadMultipartPartRequest UploadMultipartPartRequest
public UploadMultipartPartRequest()
From source file:com.vrane.metaGlacier.Archive.java
private String uploadParts(String uploadId) throws AmazonServiceException, NoSuchAlgorithmException, AmazonClientException, IOException { int filePosition = 0; long currentPosition = 0; byte[] buffer = new byte[(int) granularity]; List<byte[]> binaryChecksums = new LinkedList<>(); final File file = new File(localPath); String contentRange;// ww w .java 2s. c om int read = 0; try (FileInputStream fileToUpload = new FileInputStream(file)) { while (currentPosition < file.length()) { read = fileToUpload.read(buffer, filePosition, buffer.length); if (read == -1) { break; } if (Thread.currentThread().isInterrupted()) { LGR.warning("upload job is interrupted."); return null; } LGR.log(Level.FINE, "reading position {0} for file {1}", new Object[] { currentPosition, localPath }); byte[] bytesRead = Arrays.copyOf(buffer, read); contentRange = String.format("bytes %s-%s/*", currentPosition, currentPosition + read - 1); String checksum = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(bytesRead)); byte[] binaryChecksum = BinaryUtils.fromHex(checksum); binaryChecksums.add(binaryChecksum); //Upload part. UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest().withVaultName(vaultName) .withBody(new ByteArrayInputStream(bytesRead)).withChecksum(checksum) .withRange(contentRange).withUploadId(uploadId); try { GlacierFrame.getClient(region).uploadMultipartPart(partRequest); } catch (RequestTimeoutException e) { LGR.log(Level.SEVERE, "Request time out at {0}. Retrying in {1} s", new Object[] { HumanBytes.convert(currentPosition), RETRY_SEC }); LGR.log(Level.SEVERE, null, e); try { Thread.sleep(RETRY_SEC * 1000); } catch (InterruptedException ex) { LGR.log(Level.SEVERE, null, ex); return null; } try { GlacierFrame.getClient(region).uploadMultipartPart(partRequest); } catch (RequestTimeoutException ex) { LGR.log(Level.SEVERE, null, ex); LGR.severe("2nd time out. Giving up"); JOptionPane.showMessageDialog(null, "Error uploading"); return null; } } catch (Exception e) { LGR.log(Level.SEVERE, null, e); LGR.severe("Unanticipated error. Giving up."); return null; } if (Thread.currentThread().isInterrupted()) { LGR.warning("upload job is interrupted."); return null; } currentPosition = currentPosition + read; progress_reporter.setFilePosition(currentPosition); } } String checksum = TreeHashGenerator.calculateTreeHash(binaryChecksums); return checksum; }
From source file:de.kopis.glacier.commands.UploadMultipartArchiveCommand.java
License:Open Source License
private String uploadParts(String uploadId, File file, final String vaultName, final Long partSize) throws AmazonServiceException, NoSuchAlgorithmException, AmazonClientException, IOException { FileInputStream fileToUpload = null; String checksum = ""; try {/*from w w w. j ava2 s . c o m*/ long currentPosition = 0; List<byte[]> binaryChecksums = new LinkedList<byte[]>(); fileToUpload = new FileInputStream(file); int counter = 1; int total = (int) Math.ceil(file.length() / (double) partSize); while (currentPosition < file.length()) { long length = partSize; if (currentPosition + partSize > file.length()) { length = file.length() - currentPosition; } Exception failedException = null; boolean completed = false; int tries = 0; while (!completed && tries < 5) { tries++; InputStream inputSubStream = newInputSubstream(file, currentPosition, length); inputSubStream.mark(-1); checksum = TreeHashGenerator.calculateTreeHash(inputSubStream); byte[] binaryChecksum = BinaryUtils.fromHex(checksum); inputSubStream.reset(); String range = "bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*"; UploadMultipartPartRequest req = new UploadMultipartPartRequest().withChecksum(checksum) .withBody(inputSubStream).withRange(range).withUploadId(uploadId) .withVaultName(vaultName); try { UploadMultipartPartResult partResult = client.uploadMultipartPart(req); log.info(String.format("Part %d/%d (%s) uploaded, checksum: %s", counter, total, range, partResult.getChecksum())); completed = true; binaryChecksums.add(binaryChecksum); } catch (Exception e) { failedException = e; } finally { if (inputSubStream != null) { try { inputSubStream.close(); } catch (IOException ex) { log.debug("Ignore failure in closing the Closeable", ex); } } } } if (!completed && failedException != null) throw new AmazonClientException("Failed operation", failedException); currentPosition += partSize; ++counter; } checksum = TreeHashGenerator.calculateTreeHash(binaryChecksums); } finally { if (fileToUpload != null) { fileToUpload.close(); } } return checksum; }
From source file:glacierpipe.GlacierPipe.java
License:Apache License
public String pipe(AmazonGlacierClient client, String vaultName, String archiveDesc, InputStream in) throws IOException { long currentPosition = 0; int partId = 0; try {/*from w ww . ja v a 2 s. c o m*/ byte[] buffer = new byte[4096]; TreeHashMessageDigest completeHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256")); in = new DigestInputStream(in, completeHash); /**** Create an upload ID for the current upload ****/ InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest().withVaultName(vaultName) .withArchiveDescription(archiveDesc).withPartSize(Long.toString(partSize)); InitiateMultipartUploadResult result = client.initiateMultipartUpload(request); String uploadId = result.getUploadId(); this.observer.gotUploadId(uploadId); /**** While there are still chunks to process ****/ do { TreeHashMessageDigest partHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256")); // Fill up the buffer try (OutputStream bufferOut = this.buffer.getOutputStream(); OutputStream observedOut = new ObservedOutputStream(bufferOut, new BufferingObserver(this.observer, partId)); DigestOutputStream out = new DigestOutputStream(observedOut, partHash);) { int read = 0; while (this.buffer.getRemaining() > 0 && (read = in.read(buffer, 0, (int) Math.min(this.buffer.getRemaining(), buffer.length))) >= 0) { out.write(buffer, 0, read); } } currentPosition += this.buffer.getLength(); // If we read zero bytes, we reached the end of the stream. Break. if (this.buffer.getLength() == 0) { break; } // Report the Tree Hash of this chunk byte[] byteChecksum = partHash.digest(); String checksum = BinaryUtils.toHex(byteChecksum); this.observer.computedTreeHash(partId, byteChecksum); // Try to upload this chunk int attempts = 0; do { try (InputStream bufferIn = this.buffer.getInputStream(); // KLUDGE: Throttling really belongs closer to EntitySerializer.serialize(), but there // wasn't an easy hook for it. Throttling on input would work well enough, but // client.uploadMultipartPart() calculates a SHA-256 checksum on the request before it // sends it, then calls reset() on the stream. Because we know this, don't throttle until // reset() has been called at least once. InputStream throttledIn = this.throttlingStrategy == null ? bufferIn : new ThrottledInputStream(bufferIn, this.throttlingStrategy) { private long resets = 0; @Override public void setBytesPerSecond() { if (this.resets > 0) { super.setBytesPerSecond(); } } @Override protected long getMaxRead(long currentTime) { return this.resets > 0 ? super.getMaxRead(currentTime) : Long.MAX_VALUE; } @Override public synchronized void reset() throws IOException { super.reset(); this.resets++; } }; InputStream observedIn = new ObservedInputStream(throttledIn, new UploadObserver(this.observer, partId));) { UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest() .withVaultName(vaultName).withBody(observedIn).withChecksum(checksum) .withRange(String.format("bytes %d-%d/*", currentPosition - this.buffer.getLength(), currentPosition - 1)) .withUploadId(uploadId).withAccountId("-"); UploadMultipartPartResult partResult = client.uploadMultipartPart(partRequest); if (!Arrays.equals(BinaryUtils.fromHex(partResult.getChecksum()), byteChecksum)) { throw new AmazonClientException("Checksum mismatch"); } break; } catch (AmazonClientException e) { attempts++; observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries); if (attempts >= this.maxRetries) { throw new IOException("Failed to upload after " + attempts + " attempts", e); } } catch (IOException e) { attempts++; observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries); if (attempts >= this.maxRetries) { throw new IOException("Failed to upload after " + attempts + " attempts", e); } } try { long sleepingFor = 1000 * (attempts < 15 ? (long) Math.pow(1.5, attempts) : 300); this.observer.sleepingBeforeRetry(sleepingFor); Thread.sleep(sleepingFor); } catch (InterruptedException e) { throw new IOException("Upload interrupted", e); } } while (true); partId++; } while (this.buffer.getRemaining() == 0); byte[] complateHash = completeHash.digest(); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest() .withVaultName(vaultName).withUploadId(uploadId).withChecksum(BinaryUtils.toHex(complateHash)) .withArchiveSize(Long.toString(currentPosition)); CompleteMultipartUploadResult compResult = client.completeMultipartUpload(compRequest); String location = compResult.getLocation(); this.observer.done(complateHash, location); return location; } catch (IOException e) { this.observer.fatalException(e); throw e; } catch (AmazonClientException e) { this.observer.fatalException(e); throw e; } catch (NoSuchAlgorithmException e) { throw new RuntimeException("SHA-256 not available", e); } }