Example usage for com.amazonaws.services.s3 AmazonS3 getCachedResponseMetadata

List of usage examples for com.amazonaws.services.s3 AmazonS3 getCachedResponseMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 getCachedResponseMetadata.

Prototype

public S3ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request);

Source Link

Document

Gets additional metadata for a previously executed successful request.

Usage

From source file:c3.ops.priam.aws.S3FileSystem.java

License:Apache License

@Override
public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException {
    uploadCount.incrementAndGet();//ww w  . ja v  a 2s  .co  m
    AmazonS3 s3Client = getS3Client();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(),
            path.getRemotePath());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
    List<PartETag> partETags = Lists.newArrayList();
    long chunkSize = config.getBackupChunkSize();
    if (path.getSize() > 0)
        chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1))
                : chunkSize;
    logger.info(String.format("Uploading to %s/%s with chunk size %d", config.getBackupPrefix(),
            path.getRemotePath(), chunkSize));
    try {
        Iterator<byte[]> chunks = compress.compress(in, chunkSize);
        // Upload parts.
        int partNum = 0;
        while (chunks.hasNext()) {
            byte[] chunk = chunks.next();
            rateLimiter.acquire(chunk.length);
            DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(),
                    initResponse.getUploadId());
            S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
            executor.submit(partUploader);
            bytesUploaded.addAndGet(chunk.length);
        }
        executor.sleepTillEmpty();
        if (partNum != partETags.size())
            throw new BackupRestoreException("Number of parts(" + partNum
                    + ")  does not match the uploaded parts(" + partETags.size() + ")");
        new S3PartUploader(s3Client, part, partETags).completeUpload();

        if (logger.isDebugEnabled()) {
            final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
            final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
            final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
            logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
        }

    } catch (Exception e) {
        new S3PartUploader(s3Client, part, partETags).abortUpload();
        throw new BackupRestoreException("Error uploading file " + path.getFileName(), e);
    } finally {
        IOUtils.closeQuietly(in);
    }
}