Example usage for com.amazonaws.services.s3.transfer TransferManager upload

List of usage examples for com.amazonaws.services.s3.transfer TransferManager upload

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager upload.

Prototype

public Upload upload(final PutObjectRequest putObjectRequest)
        throws AmazonServiceException, AmazonClientException 

Source Link

Document

Schedules a new transfer to upload data to Amazon S3.

Usage

From source file:com.mweagle.tereus.aws.S3Resource.java

License:Open Source License

public Optional<String> upload() {
    try {/*from   w  ww  . j av a 2  s. co m*/
        DefaultAWSCredentialsProviderChain credentialProviderChain = new DefaultAWSCredentialsProviderChain();
        final TransferManager transferManager = new TransferManager(credentialProviderChain.getCredentials());

        final ObjectMetadata metadata = new ObjectMetadata();
        if (this.inputStreamLength.isPresent()) {
            metadata.setContentLength(this.inputStreamLength.get());
        }
        final PutObjectRequest uploadRequest = new PutObjectRequest(bucketName, keyName, this.inputStream,
                metadata);
        final Upload templateUpload = transferManager.upload(uploadRequest);

        templateUpload.waitForUploadResult();
        this.resourceURL = Optional.of(getS3Path());
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
    return this.resourceURL;
}

From source file:com.shelfmap.simplequery.domain.impl.DefaultBlobReference.java

License:Apache License

@Override
public Upload uploadFrom(InputStream uploadSource, ObjectMetadata metadata) throws BlobOutputException {
    String bucket = resourceInfo.getBucketName();
    String key = resourceInfo.getKey();

    try {//from   www.  j av  a  2 s  .c  o m
        PutObjectRequest request = new PutObjectRequest(bucket, key, uploadSource, metadata);
        AmazonS3 s3 = getContext().getS3();
        TransferManager transfer = new TransferManager(s3);
        this.lastUpload = transfer.upload(request);
        return this.lastUpload;
    } catch (AmazonServiceException ex) {
        throw new BlobOutputException("a problem occured in Amazon S3.", ex);
    } catch (AmazonClientException ex) {
        throw new BlobOutputException("Client had an problem when uploading data.", ex);
    }
}

From source file:jenkins.plugins.itemstorage.s3.Uploads.java

License:Open Source License

public void startUploading(TransferManager manager, File file, InputStream inputStream, Destination dest,
        ObjectMetadata metadata) throws AmazonServiceException {
    final PutObjectRequest request = new PutObjectRequest(dest.bucketName, dest.objectName, inputStream,
            metadata);//  w  w w.  ja  v  a 2 s.  c  o m

    // Set the buffer size (ReadLimit) equal to the multipart upload size,
    // allowing us to resend data if the connection breaks.
    request.getRequestClientOptions().setReadLimit(MULTIPART_UPLOAD_THRESHOLD);
    manager.getConfiguration().setMultipartUploadThreshold((long) MULTIPART_UPLOAD_THRESHOLD);

    final Upload upload = manager.upload(request);
    startedUploads.put(file, upload);
    openedStreams.put(file, inputStream);
}

From source file:jetbrains.buildServer.codepipeline.CodePipelineBuildListener.java

License:Apache License

private void processJobOutput(@NotNull final AgentRunningBuild build,
        @NotNull final BuildFinishedStatus buildStatus) {
    if (myJobID == null)
        return;/* w  w w. j  av a2  s  . com*/

    AWSCommonParams.withAWSClients(build.getSharedConfigParameters(),
            new AWSCommonParams.WithAWSClients<Void, RuntimeException>() {
                @Nullable
                @Override
                public Void run(@NotNull AWSClients clients) throws RuntimeException {
                    AWSCodePipelineClient codePipelineClient = null;
                    try {
                        codePipelineClient = clients.createCodePipeLineClient();
                        if (build.isBuildFailingOnServer()) {
                            publishJobFailure(codePipelineClient, build, "Build failed");
                        } else if (BuildFinishedStatus.INTERRUPTED == buildStatus) {
                            publishJobFailure(codePipelineClient, build, "Build interrupted");
                        } else {
                            final Map<String, String> params = build.getSharedConfigParameters();
                            final JobData jobData = getJobData(codePipelineClient, params);

                            final List<Artifact> outputArtifacts = jobData.getOutputArtifacts();
                            if (outputArtifacts.isEmpty()) {
                                LOG.debug(msgForBuild(
                                        "No output artifacts expected for the job with ID: " + myJobID, build));
                            } else {
                                final File artifactOutputFolder = new File(
                                        params.get(ARTIFACT_OUTPUT_FOLDER_CONFIG_PARAM));

                                S3Util.withTransferManager(
                                        getArtifactS3Client(jobData.getArtifactCredentials(), params),
                                        new S3Util.WithTransferManager<Upload>() {
                                            @NotNull
                                            @Override
                                            public Collection<Upload> run(
                                                    @NotNull final TransferManager manager) throws Throwable {
                                                return CollectionsUtil.convertCollection(outputArtifacts,
                                                        new Converter<Upload, Artifact>() {
                                                            @Override
                                                            public Upload createFrom(
                                                                    @NotNull Artifact artifact) {
                                                                final File buildArtifact = getBuildArtifact(
                                                                        artifact,
                                                                        jobData.getPipelineContext()
                                                                                .getPipelineName(),
                                                                        artifactOutputFolder, build);
                                                                final S3ArtifactLocation s3Location = artifact
                                                                        .getLocation().getS3Location();

                                                                build.getBuildLogger().message(
                                                                        "Uploading job output artifact "
                                                                                + s3Location.getObjectKey()
                                                                                + " from " + buildArtifact
                                                                                        .getAbsolutePath());
                                                                return manager.upload(new PutObjectRequest(
                                                                        s3Location.getBucketName(),
                                                                        s3Location.getObjectKey(),
                                                                        buildArtifact)
                                                                                .withSSEAwsKeyManagementParams(
                                                                                        getSSEAwsKeyManagementParams(
                                                                                                jobData.getEncryptionKey())));
                                                            }
                                                        });
                                            }
                                        });

                                publishJobSuccess(codePipelineClient, build);
                            }
                        }
                    } catch (Throwable e) {
                        failOnException(codePipelineClient, build, e);
                    }
                    return null;
                }
            });
}

From source file:jp.classmethod.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java

License:Apache License

@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();/*from   w  w  w  .j  av  a2s .c om*/
    File file = getFile();

    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }
    if (file == null) {
        throw new GradleException("file is not specified");
    }
    if (file.isFile() == false) {
        throw new GradleException("file must be regular file");
    }

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    TransferManager s3mgr = TransferManagerBuilder.standard().withS3Client(s3).build();
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);

    Upload upload = s3mgr.upload(
            new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {

        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(s3.getUrl(bucketName, key).toString());
    getLogger().info("Upload completed: {}", getResourceUrl());
}

From source file:msv_upload_tool.FXMLDocumentController.java

private void uploadObject() {

    final Long max = file.length();

    task = new Task<Void>() {
        @Override//from w w  w .  j av a 2 s.  c om
        protected Void call() {

            boolean doLoop = true;
            long total = 0;

            while (doLoop) {

                lock.readLock().lock();

                try {
                    total = totalBytes;
                } finally {
                    lock.readLock().unlock();
                }

                updateProgress(total, max);
                if (total == max)
                    doLoop = false;

                try {
                    Thread.sleep(50); //1000 milliseconds is one second.
                } catch (InterruptedException ex) {
                    Thread.currentThread().interrupt();
                }

            }

            updateProgress(-1, max);

            this.succeeded();
            return null;

        }
    };

    uploadProgress.progressProperty().bind(task.progressProperty());
    task.setOnSucceeded(new EventHandler() {

        @Override
        public void handle(Event event) {

            label.setText("");

            label2.setText("");
            button.setDisable(true);
            button2.setDisable(false);

        }

    });

    Thread th = new Thread(task);

    th.setDaemon(true);

    //disable the buttons
    button.setDisable(true);
    button2.setDisable(true);

    th.start();

    String existingBucketName = "mstargeneralfiles";
    String keyName = "duh/" + file.getName();
    String filePath = file.getAbsolutePath();

    TransferManager tm = new TransferManager(new ProfileCredentialsProvider());

    // For more advanced uploads, you can create a request object 
    // and supply additional request parameters (ex: progress listeners,
    // canned ACLs, etc.)
    PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));

    // You can ask the upload for its progress, or you can 
    // add a ProgressListener to your request to receive notifications 
    // when bytes are transferred.
    request.setGeneralProgressListener(new ProgressListener() {

        @Override
        public void progressChanged(ProgressEvent progressEvent) {

            System.out.println(progressEvent.toString());

            lock.writeLock().lock();

            try {
                totalBytes += progressEvent.getBytesTransferred();
            } finally {
                lock.writeLock().unlock();
            }

        }
    });

    // TransferManager processes all transfers asynchronously, 
    // so this call will return immediately.
    Upload upload = tm.upload(request);

}

From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java

License:Open Source License

private void uploadFile(Map<String, Object> jobFile) {

    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);//from  w  ww .  j  ava2 s .com
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }

    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));

    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);

        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"),
                localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));

        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));

        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));

        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");

            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }

        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();

        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));

        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);

        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();

    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());

        callbackCfc(jobFile, false, e.getMessage());

        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;

        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }

        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));

    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }

}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//  www.  j  a  va  2  s . c o  m
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;//from  w  ww. ja  v  a 2 s . com
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}

From source file:org.finra.dm.dao.impl.S3OperationsImpl.java

License:Apache License

/**
 * Implementation delegates to {@link TransferManager#upload(PutObjectRequest)}.
 *//*from w  ww  .  jav  a2  s .c o m*/
@Override
public Upload upload(PutObjectRequest putObjectRequest, TransferManager transferManager)
        throws AmazonServiceException, AmazonClientException {
    return transferManager.upload(putObjectRequest);
}