Example usage for com.amazonaws.services.glacier AmazonGlacierClient completeMultipartUpload

List of usage examples for com.amazonaws.services.glacier AmazonGlacierClient completeMultipartUpload

Introduction

In this page you can find the example usage for com.amazonaws.services.glacier AmazonGlacierClient completeMultipartUpload.

Prototype

@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) 

Source Link

Document

You call this operation to inform Amazon S3 Glacier (Glacier) that all the archive parts have been uploaded and that Glacier can now assemble the archive from the uploaded parts.

Usage

From source file:glacierpipe.GlacierPipe.java

License:Apache License

public String pipe(AmazonGlacierClient client, String vaultName, String archiveDesc, InputStream in)
        throws IOException {

    long currentPosition = 0;
    int partId = 0;

    try {/*from  ww w.j a  v a  2s. co  m*/
        byte[] buffer = new byte[4096];

        TreeHashMessageDigest completeHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));
        in = new DigestInputStream(in, completeHash);

        /**** Create an upload ID for the current upload ****/
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest().withVaultName(vaultName)
                .withArchiveDescription(archiveDesc).withPartSize(Long.toString(partSize));

        InitiateMultipartUploadResult result = client.initiateMultipartUpload(request);
        String uploadId = result.getUploadId();

        this.observer.gotUploadId(uploadId);

        /**** While there are still chunks to process ****/
        do {
            TreeHashMessageDigest partHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));

            // Fill up the buffer
            try (OutputStream bufferOut = this.buffer.getOutputStream();
                    OutputStream observedOut = new ObservedOutputStream(bufferOut,
                            new BufferingObserver(this.observer, partId));
                    DigestOutputStream out = new DigestOutputStream(observedOut, partHash);) {
                int read = 0;
                while (this.buffer.getRemaining() > 0 && (read = in.read(buffer, 0,
                        (int) Math.min(this.buffer.getRemaining(), buffer.length))) >= 0) {
                    out.write(buffer, 0, read);
                }
            }

            currentPosition += this.buffer.getLength();

            // If we read zero bytes, we reached the end of the stream.  Break.
            if (this.buffer.getLength() == 0) {
                break;
            }

            // Report the Tree Hash of this chunk
            byte[] byteChecksum = partHash.digest();
            String checksum = BinaryUtils.toHex(byteChecksum);
            this.observer.computedTreeHash(partId, byteChecksum);

            // Try to upload this chunk
            int attempts = 0;
            do {
                try (InputStream bufferIn = this.buffer.getInputStream();

                        // KLUDGE: Throttling really belongs closer to EntitySerializer.serialize(), but there
                        // wasn't an easy hook for it.  Throttling on input would work well enough, but
                        // client.uploadMultipartPart() calculates a SHA-256 checksum on the request before it
                        // sends it, then calls reset() on the stream.  Because we know this, don't throttle until
                        // reset() has been called at least once.
                        InputStream throttledIn = this.throttlingStrategy == null ? bufferIn
                                : new ThrottledInputStream(bufferIn, this.throttlingStrategy) {
                                    private long resets = 0;

                                    @Override
                                    public void setBytesPerSecond() {
                                        if (this.resets > 0) {
                                            super.setBytesPerSecond();
                                        }
                                    }

                                    @Override
                                    protected long getMaxRead(long currentTime) {
                                        return this.resets > 0 ? super.getMaxRead(currentTime) : Long.MAX_VALUE;
                                    }

                                    @Override
                                    public synchronized void reset() throws IOException {
                                        super.reset();
                                        this.resets++;
                                    }
                                };

                        InputStream observedIn = new ObservedInputStream(throttledIn,
                                new UploadObserver(this.observer, partId));) {

                    UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest()
                            .withVaultName(vaultName).withBody(observedIn).withChecksum(checksum)
                            .withRange(String.format("bytes %d-%d/*", currentPosition - this.buffer.getLength(),
                                    currentPosition - 1))
                            .withUploadId(uploadId).withAccountId("-");

                    UploadMultipartPartResult partResult = client.uploadMultipartPart(partRequest);

                    if (!Arrays.equals(BinaryUtils.fromHex(partResult.getChecksum()), byteChecksum)) {
                        throw new AmazonClientException("Checksum mismatch");
                    }

                    break;
                } catch (AmazonClientException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                } catch (IOException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                }

                try {
                    long sleepingFor = 1000 * (attempts < 15 ? (long) Math.pow(1.5, attempts) : 300);
                    this.observer.sleepingBeforeRetry(sleepingFor);
                    Thread.sleep(sleepingFor);
                } catch (InterruptedException e) {
                    throw new IOException("Upload interrupted", e);
                }
            } while (true);

            partId++;
        } while (this.buffer.getRemaining() == 0);

        byte[] complateHash = completeHash.digest();

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
                .withVaultName(vaultName).withUploadId(uploadId).withChecksum(BinaryUtils.toHex(complateHash))
                .withArchiveSize(Long.toString(currentPosition));

        CompleteMultipartUploadResult compResult = client.completeMultipartUpload(compRequest);
        String location = compResult.getLocation();

        this.observer.done(complateHash, location);
        return location;

    } catch (IOException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (AmazonClientException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("SHA-256 not available", e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void upload(String lrzname) {
    try {/* w  w w  .ja  v  a  2 s  .com*/
        System.out.println("Uploading to Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        File file = new File(lrzname);
        String archiveid = "";
        if (file.length() < 5 * 1024 * 1024) {
            System.out.println("File is small, uploading as single chunk");
            String treehash = TreeHashGenerator.calculateTreeHash(file);

            InputStream is = new FileInputStream(file);
            byte[] buffer = new byte[(int) file.length()];
            int bytes = is.read(buffer);
            if (bytes != file.length())
                throw new RuntimeException("Only read " + bytes + " of " + file.length()
                        + " byte file when preparing for upload.");
            InputStream bais = new ByteArrayInputStream(buffer);

            UploadArchiveRequest request = new UploadArchiveRequest(vaultname, lrzname, treehash, bais);
            UploadArchiveResult result = client.uploadArchive(request);
            archiveid = result.getArchiveId();
        } else {
            long chunks = file.length() / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = file.length() / chunksize;
            }
            String chunksizestr = new Integer(chunksize).toString();
            System.out.println(
                    "Starting multipart upload: " + chunks + " full chunks of " + chunksizestr + " bytes");

            InitiateMultipartUploadResult imures = client.initiateMultipartUpload(
                    new InitiateMultipartUploadRequest(vaultname, lrzname, chunksizestr));

            String uploadid = imures.getUploadId();
            RandomAccessFile raf = new RandomAccessFile(file, "r");

            byte[] buffer = new byte[chunksize];

            for (long x = 0; x < chunks; x++) {
                try {
                    System.out.println("Uploading chunk " + x + "/" + chunks);

                    raf.seek(x * chunksize);
                    raf.read(buffer);

                    String parthash = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(buffer));
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                            range, new ByteArrayInputStream(buffer)));
                } catch (Exception e) {
                    e.printStackTrace();
                    System.err.println("Error uploading chunk " + x + ", retrying...");
                    x--;
                }
            }

            if (file.length() > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Uploading final partial chunk");
                        raf.seek(chunks * chunksize);
                        int bytes = raf.read(buffer);

                        String parthash = TreeHashGenerator
                                .calculateTreeHash(new ByteArrayInputStream(buffer, 0, bytes));
                        String range = "bytes " + (chunks * chunksize) + "-" + (file.length() - 1) + "/*";

                        client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                                range, new ByteArrayInputStream(buffer, 0, bytes)));
                    } catch (Exception e) {
                        e.printStackTrace();
                        System.err.println("Error uploading final chunk, retrying...");
                        continue;
                    }
                } while (false);
            }

            System.out.println("Completing upload");
            String treehash = TreeHashGenerator.calculateTreeHash(file);
            CompleteMultipartUploadResult result = client
                    .completeMultipartUpload(new CompleteMultipartUploadRequest(vaultname, uploadid,
                            new Long(file.length()).toString(), treehash));
            archiveid = result.getArchiveId();
        }

        System.out.println("Uploaded " + lrzname + " to Glacier as ID " + archiveid);

        File listfile = new File(cachedir, "archives.lst");
        FileWriter fw = new FileWriter(listfile, true);
        fw.write(archiveid + " " + lrzname + "\n");
        fw.close();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}