Example usage for com.amazonaws.services.s3.model UploadPartResult getPartETag

List of usage examples for com.amazonaws.services.s3.model UploadPartResult getPartETag

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model UploadPartResult getPartETag.

Prototype

public PartETag getPartETag() 

Source Link

Document

Returns an identifier which identifies the upload part by its part number and the entity tag computed from the part's data.

Usage

From source file:c3.ops.priam.aws.S3PartUploader.java

License:Apache License

private Void uploadPart() throws AmazonClientException, BackupRestoreException {
    UploadPartRequest req = new UploadPartRequest();
    req.setBucketName(dataPart.getBucketName());
    req.setKey(dataPart.getS3key());//from ww  w  .  j a  v a  2 s .c  o  m
    req.setUploadId(dataPart.getUploadID());
    req.setPartNumber(dataPart.getPartNo());
    req.setPartSize(dataPart.getPartData().length);
    req.setMd5Digest(SystemUtils.toBase64(dataPart.getMd5()));
    req.setInputStream(new ByteArrayInputStream(dataPart.getPartData()));
    UploadPartResult res = client.uploadPart(req);
    PartETag partETag = res.getPartETag();
    if (!partETag.getETag().equals(SystemUtils.toHex(dataPart.getMd5())))
        throw new BackupRestoreException("Unable to match MD5 for part " + dataPart.getPartNo());
    partETags.add(partETag);
    return null;
}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private PartETag uploadPart(SnapshotPart part, SnapshotProgressCallback progressCallback)
        throws SnapshotUploadPartException {
    try {//from w  w w . j  a va2s. c  o  m
        part = part.updateStateUploading();
    } catch (Exception e) {
        LOG.debug("Failed to update part status in DB. Moving on. " + part);
    }

    try {
        LOG.debug("Uploading " + part);
        UploadPartResult uploadPartResult = retryAfterRefresh(
                new Function<UploadPartRequest, UploadPartResult>() {

                    @Override
                    @Nullable
                    public UploadPartResult apply(@Nullable UploadPartRequest arg0) {
                        eucaS3Client.refreshEndpoint();
                        return eucaS3Client.uploadPart(arg0);
                    }
                },
                new UploadPartRequest().withBucketName(part.getBucketName()).withKey(part.getKeyName())
                        .withUploadId(part.getUploadId()).withPartNumber(part.getPartNumber())
                        .withPartSize(part.getSize()).withFile(new File(part.getFileName())),
                REFRESH_TOKEN_RETRIES);

        progressCallback.update(part.getInputFileBytesRead());

        try {
            part = part.updateStateUploaded(uploadPartResult.getPartETag().getETag());
        } catch (Exception e) {
            LOG.debug("Failed to update part status in DB. Moving on. " + part);
        }
        LOG.debug("Uploaded " + part);
        return uploadPartResult.getPartETag();
    } catch (Exception e) {
        LOG.error("Failed to upload part " + part, e);
        try {
            part = part.updateStateFailed();
        } catch (Exception ie) {
            LOG.debug("Failed to update part status in DB. Moving on. " + part);
        }
        throw new SnapshotUploadPartException("Failed to upload part " + part, e);
    } finally {
        deleteFile(part.getFileName());
    }
}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * //w  ww  .  ja  va  2 s  .  c  om
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();
    Utils.log("start uploading");
    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        Utils.log("the bucket = " + s3bucketName);
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        initRequest.getRequestClientOptions()
                .appendUserAgent("TransferService_multipart/" + VersionInfoUtils.getVersion());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.example.jinux.mydemo.s3.Uploader.java

License:Apache License

private void cachePartEtag(UploadPartResult result) {
    String serialEtag = result.getPartETag().getPartNumber() + PREFS_ETAG_SEP + result.getPartETag().getETag();
    ArrayList<String> etags = SharedPreferencesUtils.getStringArrayPref(prefs, s3key + PREFS_ETAGS);
    etags.add(serialEtag);/*from   w  ww.  j av  a2  s.  co m*/
    SharedPreferencesUtils.setStringArrayPref(prefs, s3key + PREFS_ETAGS, etags);
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???/* w  ww  .j  a  v  a2 s .co  m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.netflix.bdp.s3.S3Util.java

License:Apache License

public static PendingUpload multipartUpload(AmazonS3 client, File localFile, String partition, String bucket,
        String key, long uploadPartSize) {

    InitiateMultipartUploadResult initiate = client
            .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key));
    String uploadId = initiate.getUploadId();

    boolean threw = true;
    try {//from  www .  j  a va  2  s .  co  m
        Map<Integer, String> etags = Maps.newLinkedHashMap();

        long offset = 0;
        long numParts = (localFile.length() / uploadPartSize
                + ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

        Preconditions.checkArgument(numParts > 0, "Cannot upload 0 byte file: " + localFile);

        for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
            long size = Math.min(localFile.length() - offset, uploadPartSize);
            UploadPartRequest part = new UploadPartRequest().withBucketName(bucket).withKey(key)
                    .withPartNumber(partNumber).withUploadId(uploadId).withFile(localFile)
                    .withFileOffset(offset).withPartSize(size).withLastPart(partNumber == numParts);

            UploadPartResult partResult = client.uploadPart(part);
            PartETag etag = partResult.getPartETag();
            etags.put(etag.getPartNumber(), etag.getETag());

            offset += uploadPartSize;
        }

        PendingUpload pending = new PendingUpload(partition, bucket, key, uploadId, etags);

        threw = false;

        return pending;

    } finally {
        if (threw) {
            try {
                client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
            } catch (AmazonClientException e) {
                LOG.error("Failed to abort multi-part upload", e);
            }
        }
    }
}

From source file:com.netflix.exhibitor.core.backup.s3.S3BackupProvider.java

License:Apache License

private PartETag uploadChunk(byte[] buffer, int bytesRead, InitiateMultipartUploadResult initResponse,
        int index) throws Exception {
    byte[] md5 = S3Utils.md5(buffer, bytesRead);

    UploadPartRequest request = new UploadPartRequest();
    request.setBucketName(initResponse.getBucketName());
    request.setKey(initResponse.getKey());
    request.setUploadId(initResponse.getUploadId());
    request.setPartNumber(index);//from  www .j a v a 2  s .c o m
    request.setPartSize(bytesRead);
    request.setMd5Digest(S3Utils.toBase64(md5));
    request.setInputStream(new ByteArrayInputStream(buffer, 0, bytesRead));

    UploadPartResult response = s3Client.uploadPart(request);
    PartETag partETag = response.getPartETag();
    if (!response.getPartETag().getETag().equals(S3Utils.toHex(md5))) {
        throw new Exception("Unable to match MD5 for part " + index);
    }

    return partETag;
}

From source file:com.readystatesoftware.simpl3r.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 * //w  w  w . j  av a  2 s .  c o m
 * @return the URL of a successfully uploaded file
 */
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();

    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();

    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:com.travoca.app.utils.amazon.Uploader.java

License:Apache License

/**
 * Initiate a multipart file upload to Amazon S3
 *
 * @return the URL of a successfully uploaded file
 *//*from w ww.  j av a  2s  . c  o m*/
public String start() {

    // initialize
    List<PartETag> partETags = new ArrayList<PartETag>();
    final long contentLength = file.length();
    long filePosition = 0;
    int startPartNumber = 1;

    userInterrupted = false;
    userAborted = false;
    bytesUploaded = 0;

    // check if we can resume an incomplete download
    String uploadId = getCachedUploadId();

    if (uploadId != null) {
        // we can resume the download
        Log.i(TAG, "resuming upload for " + uploadId);

        // get the cached etags
        List<PartETag> cachedEtags = getCachedPartEtags();
        partETags.addAll(cachedEtags);

        // calculate the start position for resume
        startPartNumber = cachedEtags.size() + 1;
        filePosition = (startPartNumber - 1) * partSize;
        bytesUploaded = filePosition;

        Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);

    } else {
        // initiate a new multi part upload
        Log.i(TAG, "initiating new upload");

        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
        configureInitiateRequest(initRequest);
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        uploadId = initResponse.getUploadId();
        Log.i(TAG, "upload to " + s3bucketName + "/" + s3key + " id=" + uploadId);
    }

    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key,
            uploadId);

    for (int k = startPartNumber; filePosition < contentLength; k++) {

        long thisPartSize = Math.min(partSize, (contentLength - filePosition));

        Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);

        UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key)
                .withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file)
                .withPartSize(thisPartSize);

        ProgressListener s3progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {

                // bail out if user cancelled
                // TODO calling shutdown too brute force?
                if (userInterrupted) {
                    s3Client.shutdown();
                    throw new UploadIterruptedException("User interrupted");
                } else if (userAborted) {
                    // aborted requests cannot be resumed, so clear any cached etags
                    clearProgressCache();
                    s3Client.abortMultipartUpload(abortRequest);
                    s3Client.shutdown();
                }

                bytesUploaded += progressEvent.getBytesTransfered();

                //Log.d(TAG, "bytesUploaded=" + bytesUploaded);

                // broadcast progress
                float fpercent = ((bytesUploaded * 100) / contentLength);
                int percent = Math.round(fpercent);
                if (progressListener != null) {
                    progressListener.progressChanged(progressEvent, bytesUploaded, percent);
                }

            }
        };

        uploadRequest.setProgressListener(s3progressListener);

        UploadPartResult result = s3Client.uploadPart(uploadRequest);

        partETags.add(result.getPartETag());

        // cache the part progress for this upload
        if (k == 1) {
            initProgressCache(uploadId);
        }
        // store part etag
        cachePartEtag(result);

        filePosition += thisPartSize;
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key,
            uploadId, partETags);

    CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
    bytesUploaded = 0;

    Log.i(TAG, "upload complete for " + uploadId);

    clearProgressCache();

    return result.getLocation();

}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

private void uploadPartAndFlushBuffer() throws IOException {

    boolean operationSuccessful = false;

    if (this.uploadId == null) {
        this.uploadId = initiateMultipartUpload();
    }/*from ww  w.  jav  a  2s.  c  o  m*/

    try {

        if (this.partNumber >= MAX_PART_NUMBER) {
            throw new IOException("Cannot upload any more data: maximum part number reached");
        }

        final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final UploadPartRequest request = new UploadPartRequest();
        request.setBucketName(this.bucket);
        request.setKey(this.object);
        request.setInputStream(inputStream);
        request.setUploadId(this.uploadId);
        request.setPartSize(this.bytesWritten);
        request.setPartNumber(this.partNumber++);

        final UploadPartResult result = this.s3Client.uploadPart(request);
        this.partETags.add(result.getPartETag());

        this.bytesWritten = 0;
        operationSuccessful = true;

    } catch (AmazonServiceException e) {
        throw new IOException(StringUtils.stringifyException(e));
    } finally {
        if (!operationSuccessful) {
            abortUpload();
        }
    }
}