Example usage for com.amazonaws.services.s3.transfer Upload waitForCompletion

List of usage examples for com.amazonaws.services.s3.transfer Upload waitForCompletion

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer Upload waitForCompletion.

Prototype

public void waitForCompletion() throws AmazonClientException, AmazonServiceException, InterruptedException;

Source Link

Document

Waits for this transfer to complete.

Usage

From source file:fr.eurecom.hybris.kvs.drivers.AmazonKvs.java

License:Apache License

public void put(String key, byte[] value) throws IOException {
    try {/*from w w w.ja  v  a  2 s .co  m*/
        ByteArrayInputStream bais = new ByteArrayInputStream(value);

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(value.length);

        PutObjectRequest request = new PutObjectRequest(this.rootContainer, key, bais, om);
        request.setStorageClass(StorageClass.ReducedRedundancy);

        Upload upload = this.tm.upload(request); // NB: asynchronous, returns immediately
        upload.waitForCompletion();
    } catch (AmazonClientException | InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java

License:Apache License

/**
 * @param file Local file to upload//from   w w  w .  j  a  va  2s. c o m
 * @param endpoint S3 endpoint URL
 * @param accessKey Access key
 * @param secretKey Secret key
 * @param bucket Bucket name 
 * @param path Key name (path + file name)
 * @throws Exception On any error
 */
public static void upload(File file, String endpoint, String accessKey, String secretKey, String bucket,
        String path) throws Exception {
    AmazonS3Client amazonS3Client = null;
    try {
        AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration clientConfiguration = new ClientConfiguration();
        clientConfiguration.setMaxConnections(MAX_CONNECTIONS);
        clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY);
        clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT);
        amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration);
        S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true);
        amazonS3Client.setS3ClientOptions(clientOptions);
        amazonS3Client.setEndpoint(endpoint);
        //         amazonS3Client.putObject(new PutObjectRequest(bucket, path, file)); // up to 5GB
        TransferManager tm = new TransferManager(amazonS3Client); // up to 5TB
        Upload upload = tm.upload(bucket, path, file);
        // while (!upload.isDone()) { upload.getProgress().getBytesTransferred(); Thread.sleep(1000); } // to get progress
        upload.waitForCompletion();
        tm.shutdownNow();
    } catch (AmazonServiceException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } catch (AmazonClientException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } finally {
        if (amazonS3Client != null) {
            try {
                amazonS3Client.shutdown();
            } catch (Exception e) {
            }
        }
    }
}

From source file:jenkins.plugins.itemstorage.s3.Uploads.java

License:Open Source License

private void finishUploading(File file, Upload upload) throws InterruptedException {
    if (upload == null) {
        LOGGER.info("File: " + file.getName() + " already was uploaded");
        return;//from   ww w .j ava 2 s .co m
    }
    try {
        upload.waitForCompletion();
    } finally {
        closeStream(file, openedStreams.remove(file));
    }
}

From source file:jp.classmethod.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java

License:Apache License

@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();//  ww  w.  j av  a2 s . c  o m
    File file = getFile();

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }
    if (file == null) {
        throw new GradleException("file is not specified");
    }
    if (file.isFile() == false) {
        throw new GradleException("file must be regular file");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    TransferManager s3mgr = TransferManagerBuilder.standard().withS3Client(s3).build();
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);

    Upload upload = s3mgr.upload(
            new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {

        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(s3.getUrl(bucketName, key).toString());
    getLogger().info("Upload completed: {}", getResourceUrl());
}

From source file:n3phele.agent.repohandlers.S3Large.java

License:Open Source License

public Origin put(InputStream input, long length, String encoding) {
    Origin result = new Origin(source + "/" + root + "/" + key, 0, null, null);
    TransferManager tm = null;//  ww  w.  ja  va2s. c om
    try {
        tm = new TransferManager(this.credentials);
        tm.getAmazonS3Client().setEndpoint(source.toString());

        objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(this.length = length);
        this.encoding = encoding;
        if (encoding != null)
            objectMetadata.setContentType(this.encoding);
        log.info("Output: " + source + "/" + root + "/" + key + " Content-Type: " + encoding + "length: "
                + length);
        Upload upload = tm.upload(root, key, input, objectMetadata);
        upload.waitForCompletion();
        // PutObjectResult object = s3().putObject(root, key, input, objectMetadata);
        result.setLength(length);
        ObjectMetadata od = s3().getObjectMetadata(root, key);
        result.setModified(od.getLastModified());
    } catch (AmazonServiceException e) {
        throw e;
    } catch (AmazonClientException e) {
        throw e;
    } catch (InterruptedException e) {
        throw new AmazonClientException(e.getMessage());
    } finally {
        try {
            input.close();
        } catch (IOException e) {
        }
        try {
            tm.shutdownNow();
        } catch (Exception e) {
        }
        try {
            s3().shutdown();
        } catch (Exception e) {
        }
    }
    return result;

}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void uploadFile(Map<String, Object> jobFile) {

    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);/*from www.jav  a  2  s . co m*/
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));

    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);

        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"),
                localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));

        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));

        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));

        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");

            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }

        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();

        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));

        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);

        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();

    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());

        callbackCfc(jobFile, false, e.getMessage());

        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;

        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }

        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));

    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }

}

From source file:org.kuali.rice.kew.notes.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

@Override
public void persistAttachedFileAndSetAttachmentBusinessObjectValue(Attachment attachment) throws Exception {
    if (attachment.getFileLoc() == null) {
        String s3Url = generateS3Url(attachment);
        attachment.setFileLoc(s3Url);//from   w w  w .j a v  a2 s .  c om
    }
    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    if (attachment.getMimeType() != null) {
        metadata.setContentType(attachment.getMimeType());
    }
    if (attachment.getFileName() != null) {
        metadata.setContentDisposition(
                "attachment; filename=" + URLEncoder.encode(attachment.getFileName(), "UTF-8"));
    }
    Upload upload = manager.upload(this.bucketName, parseObjectKey(attachment.getFileLoc()),
            attachment.getAttachedObject(), metadata);
    upload.waitForCompletion();
}

From source file:org.kuali.rice.krad.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

/**
 * @see org.kuali.rice.krad.service.AttachmentService#createAttachment(GloballyUnique,
 * String, String, int, java.io.InputStream, String)
 *///from ww w  .j  a va  2s  . com
@Override
public Attachment createAttachment(GloballyUnique parent, String uploadedFileName, String mimeType,
        int fileSize, InputStream fileContents, String attachmentTypeCode) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("starting to create attachment for document: " + parent.getObjectId());
    }
    if (parent == null) {
        throw new IllegalArgumentException("invalid (null or uninitialized) document");
    }
    if (StringUtils.isBlank(uploadedFileName)) {
        throw new IllegalArgumentException("invalid (blank) fileName");
    }
    if (StringUtils.isBlank(mimeType)) {
        throw new IllegalArgumentException("invalid (blank) mimeType");
    }
    if (fileSize <= 0) {
        throw new IllegalArgumentException("invalid (non-positive) fileSize");
    }
    if (fileContents == null) {
        throw new IllegalArgumentException("invalid (null) inputStream");
    }

    String uniqueFileNameGuid = UUID.randomUUID().toString();

    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(mimeType);
    metadata.setContentDisposition("attachment; filename=" + URLEncoder.encode(uploadedFileName, "UTF-8"));
    metadata.setContentLength(fileSize);
    Upload upload = manager.upload(this.bucketName, generateObjectKey(uniqueFileNameGuid), fileContents,
            metadata);
    try {
        upload.waitForCompletion();
    } catch (InterruptedException e) {
        throw new IllegalStateException("Failed to upload file to s3", e);
    }

    // create DocumentAttachment
    Attachment attachment = new Attachment();
    attachment.setAttachmentIdentifier(uniqueFileNameGuid);
    attachment.setAttachmentFileName(uploadedFileName);
    attachment.setAttachmentFileSize(new Long(fileSize));
    attachment.setAttachmentMimeTypeCode(mimeType);
    attachment.setAttachmentTypeCode(attachmentTypeCode);

    if (LOG.isDebugEnabled()) {
        LOG.debug("finished creating attachment for document: " + parent.getObjectId());
    }
    return attachment;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private void multiPartUpload(PutObjectRequest req)
        throws AmazonServiceException, AmazonClientException, InterruptedException {
    TransferManager tx = null;/*from  ww  w .  j ava 2  s .  c  om*/
    try {
        if (awsCredentials != null)
            tx = new TransferManager(awsCredentials);
        else
            tx = new TransferManager(new InstanceProfileCredentialsProvider());
        Upload myUpload = tx.upload(req);
        myUpload.waitForCompletion();
    } finally {
        if (tx != null)
            tx.shutdownNow();
    }

}

From source file:org.springframework.integration.aws.s3.core.AmazonS3OperationsImpl.java

License:Apache License

public void putObject(String bucketName, String folder, String objectName, AmazonS3Object s3Object) {
    if (logger.isDebugEnabled()) {
        logger.debug("Putting object to bucket " + bucketName + " and folder " + folder);
        logger.debug("Object Name is " + objectName);
    }/*from   w w  w  .j  a v a2  s  . co m*/

    if (objectName == null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Object Name is Mandatory");

    boolean isTempFile = false;
    File file = s3Object.getFileSource();
    InputStream in = s3Object.getInputStream();

    if (file != null && in != null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "File Object and Input Stream in the S3 Object are mutually exclusive");

    if (file == null && in == null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "At lease one of File object or Input Stream in the S3 Object are mandatory");

    String key;
    if (folder != null) {
        key = folder.endsWith(PATH_SEPARATOR) ? folder + objectName : folder + PATH_SEPARATOR + objectName;
    } else {
        key = objectName;
    }

    if (in != null) {
        file = getTempFile(in, bucketName, objectName);
        isTempFile = true;
    }

    PutObjectRequest request;
    if (file != null) {
        request = new PutObjectRequest(bucketName, key, file);
        //if the size of the file is greater than the threshold for multipart upload,
        //set the Content-MD5 header for this upload. This header will also come handy
        //later in inbound-channel-adapter where we cant find the MD5 sum of the 
        //multipart upload file from its ETag
        String stringContentMD5 = null;
        try {
            stringContentMD5 = AmazonWSCommonUtils.encodeHex(AmazonWSCommonUtils.getContentsMD5AsBytes(file));
        } catch (UnsupportedEncodingException e) {
            logger.error("Exception while generating the content's MD5 of the file " + file.getAbsolutePath(),
                    e);
        }

        if (stringContentMD5 != null) {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentMD5(stringContentMD5);
            request.withMetadata(metadata);
        }
    } else
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Unable to get the File handle to upload the file to S3");

    Upload upload;
    try {
        upload = transferManager.upload(request);
    } catch (Exception e) {
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Encountered Exception while invoking upload on multipart/single thread file, "
                        + "see nested exceptions for more details",
                e);
    }
    //Wait till the upload completes, the call to putObject is synchronous
    try {
        if (logger.isInfoEnabled())
            logger.info("Waiting for Upload to complete");
        upload.waitForCompletion();
        if (logger.isInfoEnabled())
            logger.info("Upload completed");
    } catch (Exception e) {
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Encountered Exception while uploading the multipart/single thread file, "
                        + "see nested exceptions for more details",
                e);
    }
    if (isTempFile) {
        //Delete the temp file
        if (logger.isDebugEnabled())
            logger.debug("Deleting temp file: " + file.getName());
        file.delete();
    }

    //Now since the object is present on S3, set the AccessControl list on it
    //Please note that it is not possible to set the object ACL with the
    //put object request, and hence both these operations cannot be atomic
    //it is possible the objects is uploaded and the ACl not set due to some
    //failure

    AmazonS3ObjectACL acl = s3Object.getObjectACL();
    AccessControlList objectACL = getAccessControlList(bucketName, key, acl);
    if (objectACL != null) {
        if (logger.isInfoEnabled())
            logger.info("Setting Access control list for key " + key);
        try {
            client.setObjectAcl(bucketName, key, objectACL);
        } catch (Exception e) {
            throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                    "Encountered Exception while setting the Object ACL for key , " + key
                            + "see nested exceptions for more details",
                    e);
        }
        if (logger.isDebugEnabled())
            logger.debug("Successfully set the object ACL");
    } else {
        if (logger.isInfoEnabled())
            logger.info("No Object ACL found to be set");
    }
}