Example usage for com.amazonaws.services.s3.transfer Upload waitForUploadResult

List of usage examples for com.amazonaws.services.s3.transfer Upload waitForUploadResult

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer Upload waitForUploadResult.

Prototype

public UploadResult waitForUploadResult()
        throws AmazonClientException, AmazonServiceException, InterruptedException;

Source Link

Document

Waits for this upload to complete and returns the result of this upload.

Usage

From source file:ch.entwine.weblounge.maven.S3DeployMojo.java

License:Open Source License

/**
 * //from  www  . jav  a 2s .c om
 * {@inheritDoc}
 * 
 * @see org.apache.maven.plugin.Mojo#execute()
 */
public void execute() throws MojoExecutionException, MojoFailureException {

    // Setup AWS S3 client
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    AmazonS3Client uploadClient = new AmazonS3Client(credentials);
    TransferManager transfers = new TransferManager(credentials);

    // Make sure key prefix does not start with a slash but has one at the
    // end
    if (keyPrefix.startsWith("/"))
        keyPrefix = keyPrefix.substring(1);
    if (!keyPrefix.endsWith("/"))
        keyPrefix = keyPrefix + "/";

    // Keep track of how much data has been transferred
    long totalBytesTransferred = 0L;
    int items = 0;
    Queue<Upload> uploads = new LinkedBlockingQueue<Upload>();

    try {
        // Check if S3 bucket exists
        getLog().debug("Checking whether bucket " + bucket + " exists");
        if (!uploadClient.doesBucketExist(bucket)) {
            getLog().error("Desired bucket '" + bucket + "' does not exist!");
            return;
        }

        getLog().debug("Collecting files to transfer from " + resources.getDirectory());
        List<File> res = getResources();
        for (File file : res) {
            // Make path of resource relative to resources directory
            String filename = file.getName();
            String extension = FilenameUtils.getExtension(filename);
            String path = file.getPath().substring(resources.getDirectory().length());
            String key = concat("/", keyPrefix, path).substring(1);

            // Delete old file version in bucket
            getLog().debug("Removing existing object at " + key);
            uploadClient.deleteObject(bucket, key);

            // Setup meta data
            ObjectMetadata meta = new ObjectMetadata();
            meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600));

            FileInputStream fis = null;
            GZIPOutputStream gzipos = null;
            final File fileToUpload;

            if (gzip && ("js".equals(extension) || "css".equals(extension))) {
                try {
                    fis = new FileInputStream(file);
                    File gzFile = File.createTempFile(file.getName(), null);
                    gzipos = new GZIPOutputStream(new FileOutputStream(gzFile));
                    IOUtils.copy(fis, gzipos);
                    fileToUpload = gzFile;
                    meta.setContentEncoding("gzip");
                    if ("js".equals(extension))
                        meta.setContentType("text/javascript");
                    if ("css".equals(extension))
                        meta.setContentType("text/css");
                } catch (FileNotFoundException e) {
                    getLog().error(e);
                    continue;
                } catch (IOException e) {
                    getLog().error(e);
                    continue;
                } finally {
                    IOUtils.closeQuietly(fis);
                    IOUtils.closeQuietly(gzipos);
                }
            } else {
                fileToUpload = file;
            }

            // Do a random check for existing errors before starting the next upload
            if (erroneousUpload != null)
                break;

            // Create put object request
            long bytesToTransfer = fileToUpload.length();
            totalBytesTransferred += bytesToTransfer;
            PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload);
            request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer));
            request.setMetadata(meta);

            // Schedule put object request
            getLog().info(
                    "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")");
            Upload upload = transfers.upload(request);
            uploads.add(upload);
            items++;
        }
    } catch (AmazonServiceException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    } catch (AmazonClientException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    }

    // Wait for uploads to be finished
    String currentUpload = null;
    try {
        Thread.sleep(1000);
        getLog().info("Waiting for " + uploads.size() + " uploads to finish...");
        while (!uploads.isEmpty()) {
            Upload upload = uploads.poll();
            currentUpload = upload.getDescription().substring("Uploading to ".length());
            if (TransferState.InProgress.equals(upload.getState()))
                getLog().debug("Waiting for upload " + currentUpload + " to finish");
            upload.waitForUploadResult();
        }
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (InterruptedException e) {
        getLog().debug("Interrupted while waiting for upload to finish");
    }

    // Check for errors that happened outside of the actual uploading
    if (erroneousUpload != null) {
        throw new MojoExecutionException("Error while uploading " + erroneousUpload);
    }

    getLog().info("Deployed " + items + " files ("
            + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket);
}

From source file:com.davidsoergel.s3napback.S3ops.java

License:Apache License

public static void upload(TransferManager tx, String bucket, String filename, int chunkSize)
        throws InterruptedException, IOException {
    //throw new NotImplementedException();

    // break input stream into chunks

    // fully read each chunk into memory before sending, in order to know the size and the md5

    // ** prepare the next chunk while the last is sending; need to deal with multithreading properly
    // ** 4 concurrent streams?

    InputStream in = new BufferedInputStream(System.in);
    int chunkNum = 0;
    while (in.available() > 0) {
        byte[] buf = new byte[chunkSize];
        int bytesRead = in.read(buf);

        String md5 = new MD5(buf);

        // presume AWS does its own buffering, no need for BufferedInputStream (?)

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(bytesRead);
        meta.setContentMD5(md5);//w  ww  .j  ava  2  s .  c  o m

        Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta);
        UploadResult result = myUpload.waitForUploadResult();

        while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
        }
    }
}

From source file:com.emc.ecs.sync.target.S3Target.java

License:Open Source License

protected void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om = AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata());
    if (obj.isDirectory())
        om.setContentType(AwsS3Util.TYPE_DIRECTORY);

    PutObjectRequest req;//from  w w  w.j av a 2s  . co m
    if (obj.isDirectory()) {
        req = new PutObjectRequest(bucketName, targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (obj instanceof FileSyncObject) {
        req = new PutObjectRequest(bucketName, targetKey, ((FileSyncObject) obj).getRawSourceIdentifier());
    } else {
        req = new PutObjectRequest(bucketName, targetKey, obj.getInputStream(), om);
    }

    if (includeAcl)
        req.setAccessControlList(AwsS3Util.s3AclFromSyncAcl(obj.getMetadata().getAcl(), ignoreInvalidAcls));

    // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
    // and abort if it fails
    TransferManagerConfiguration xferConfig = new TransferManagerConfiguration();
    xferConfig.setMultipartUploadThreshold((long) mpuThresholdMB * 1024 * 1024);
    xferConfig.setMinimumUploadPartSize((long) mpuPartSizeMB * 1024 * 1024);
    TransferManager xferManager = new TransferManager(s3, Executors.newFixedThreadPool(mpuThreadCount));
    xferManager.setConfiguration(xferConfig);

    Upload upload = xferManager.upload(req);
    try {
        log.debug("Wrote {}, etag: {}", targetKey, upload.waitForUploadResult().getETag());
    } catch (InterruptedException e) {
        throw new RuntimeException("upload thread was interrupted", e);
    } finally {
        // make sure bytes read is accurate if we bypassed the counting stream
        if (obj instanceof FileSyncObject) {
            try {
                ((FileSyncObject) obj).setOverrideBytesRead(upload.getProgress().getBytesTransferred());
            } catch (Throwable t) {
                log.warn("could not get bytes transferred from upload", t);
            }
        }
    }
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata,
        Statistics statistics) throws IOException {
    try {//ww  w .  j  a v  a  2s.  c  o m
        String objNameWithoutBuket = objName;
        if (objName.startsWith(mBucket + "/")) {
            objNameWithoutBuket = objName.substring(mBucket.length() + 1);
        }
        if (blockUploadEnabled) {
            return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBuket,
                    new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true),
                    partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBuket),
                    metadata), null);
        }

        if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
            return new FSDataOutputStream(
                    new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this),
                    statistics);
        } else {
            final InputStream im = new InputStream() {
                @Override
                public int read() throws IOException {
                    return -1;
                }
            };
            final ObjectMetadata om = new ObjectMetadata();
            om.setContentLength(0L);
            om.setContentType(contentType);
            om.setUserMetadata(metadata);
            // Remove the bucket name prefix from key path
            if (objName.startsWith(mBucket + "/")) {
                objName = objName.substring(mBucket.length() + 1);
            }
            /*
            if (!objName.endsWith("/")) {
              objName = objName + "/";
            }*/
            LOG.debug("bucket: {}, key {}", mBucket, objName);
            PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
            Upload upload = transfers.upload(putObjectRequest);
            upload.waitForUploadResult();
            OutputStream fakeStream = new OutputStream() {

                @Override
                public void write(int b) throws IOException {
                }

                @Override
                public void close() throws IOException {
                    super.close();
                }
            };
            return new FSDataOutputStream(fakeStream, statistics);
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted creating " + objName);
    } catch (IOException e) {
        LOG.error(e.getMessage());
        throw e;
    }
}

From source file:com.ibm.stocator.fs.cos.COSOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (closed.getAndSet(true)) {
        return;/* w  w w. j  a v a2s .  com*/
    }
    mBackupOutputStream.close();
    LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
    try {
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(mBackupFile.length());
        om.setContentType(mContentType);
        om.setUserMetadata(mMetadata);

        PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        upload.waitForUploadResult();
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
    } catch (AmazonClientException e) {
        throw new IOException(String.format("saving output %s %s", mKey, e));
    } finally {
        if (!mBackupFile.delete()) {
            LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
        }
        super.close();
    }
    LOG.debug("OutputStream for key '{}' upload complete", mKey);
}

From source file:com.mweagle.tereus.aws.S3Resource.java

License:Open Source License

public Optional<String> upload() {
    try {//w  w w.j a va 2  s  .co m
        DefaultAWSCredentialsProviderChain credentialProviderChain = new DefaultAWSCredentialsProviderChain();
        final TransferManager transferManager = new TransferManager(credentialProviderChain.getCredentials());

        final ObjectMetadata metadata = new ObjectMetadata();
        if (this.inputStreamLength.isPresent()) {
            metadata.setContentLength(this.inputStreamLength.get());
        }
        final PutObjectRequest uploadRequest = new PutObjectRequest(bucketName, keyName, this.inputStream,
                metadata);
        final Upload templateUpload = transferManager.upload(uploadRequest);

        templateUpload.waitForUploadResult();
        this.resourceURL = Optional.of(getS3Path());
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
    return this.resourceURL;
}

From source file:com.proofpoint.event.collector.combiner.S3StorageSystem.java

License:Apache License

@Override
public StoredObject putObject(final URI location, File source) {
    try {/*from  ww  w .j  a v  a  2  s. c om*/
        log.info("starting upload: %s", location);
        final AtomicLong totalTransferred = new AtomicLong();
        Upload upload = s3TransferManager.upload(getS3Bucket(location), getS3ObjectKey(location), source);
        upload.addProgressListener(new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                // NOTE: This may be invoked by multiple threads.
                long transferred = totalTransferred.addAndGet(progressEvent.getBytesTransferred());
                log.debug("upload progress: %s: transferred=%d code=%d", location, transferred,
                        progressEvent.getEventCode());
            }
        });
        UploadResult uploadResult = upload.waitForUploadResult();
        ObjectMetadata metadata = s3Service.getObjectMetadata(getS3Bucket(location), getS3ObjectKey(location));
        if (!uploadResult.getETag().equals(metadata.getETag())) {
            // this might happen in rare cases due to S3's eventual consistency
            throw new IllegalStateException("uploaded etag is different from retrieved object etag");
        }
        log.info("completed upload: %s (size=%d bytes)", location, totalTransferred.get());
        return updateStoredObject(location, metadata);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.// w w  w.  j  a  v a2  s.c o m
 *
 * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be
 * used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }

    LOG.info("Copying local file from " + src + " to " + dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java

License:Apache License

@Override
public synchronized void close() throws IOException {
    if (closed) {
        return;//  w w w.  j  ava  2s . c o  m
    }

    backupStream.close();
    LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);

    try {
        TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
        transferConfiguration.setMinimumUploadPartSize(partSize);
        transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);

        TransferManager transfers = new TransferManager(client);
        transfers.setConfiguration(transferConfiguration);

        final ObjectMetadata om = new ObjectMetadata();
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            om.setServerSideEncryption(serverSideEncryptionAlgorithm);
        }

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
        putObjectRequest.setCannedAcl(cannedACL);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics);
        upload.addProgressListener(listener);

        upload.waitForUploadResult();

        long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
        if (statistics != null && delta != 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
            }
            statistics.incrementBytesWritten(delta);
        }

        // This will delete unnecessary fake parent directories
        fs.finishedWrite(key);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (!backupFile.delete()) {
            LOG.warn("Could not delete temporary s3a file: " + backupFile);
        }
        super.close();
        closed = true;
    }

    LOG.info("OutputStream for key '" + key + "' upload complete");
}

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

License:Apache License

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//from  ww  w  .  ja  v  a 2 s. c om
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException {
    String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new IOException(dst + " already exists");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Copying local file from " + src + " to " + dst);
    }

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            default:
                break;
            }
        }
    };

    Upload up = transfers.upload(putObjectRequest);
    up.addProgressListener(progressListener);
    try {
        up.waitForUploadResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    }

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}