Example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager

List of usage examples for com.amazonaws.services.s3.transfer TransferManager TransferManager

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManager TransferManager.

Prototype

protected TransferManager(TransferManagerBuilder builder) 

Source Link

Document

Constructor for use by classes that need to extend the TransferManager.

Usage

From source file:br.org.fiscal65.amazonaws.AWSTransferModel.java

License:Open Source License

protected TransferManager getTransferManager() {
    if (mManager == null) {
        mManager = new TransferManager(AWSUtil.getCredProvider(mContext));
    }//from   ww  w  .ja  v a 2  s  .  c  o  m
    return mManager;
}

From source file:br.puc_rio.ele.lvc.interimage.common.udf.ROIStorage.java

License:Apache License

/**
  * Method invoked on every tuple during foreach evaluation.
  * @param input tuple<br>/* w ww. j ava 2  s. c  o m*/
  * first column is assumed to have the geometry<br>
  * second column is assumed to have the class name<br>
  * third column is assumed to have the output path
  * @exception java.io.IOException
  * @return true if successful, false otherwise
  */
@Override
public Boolean exec(Tuple input) throws IOException {
    if (input == null || input.size() < 3)
        return null;

    try {

        Object objGeometry = input.get(0);
        Geometry geometry = _geometryParser.parseGeometry(objGeometry);
        String className = DataType.toString(input.get(1));
        String path = DataType.toString(input.get(2));

        AWSCredentials credentials = new BasicAWSCredentials(_accessKey, _secretKey);
        AmazonS3 conn = new AmazonS3Client(credentials);
        conn.setEndpoint("https://s3.amazonaws.com");

        /*File temp = File.createTempFile(className, ".wkt");
                
         // Delete temp file when program exits.
         temp.deleteOnExit();
                     
         BufferedWriter out = new BufferedWriter(new FileWriter(temp));
         out.write(new WKTWriter().write(geometry));
         out.close();*/

        /*
                
        File temp = File.createTempFile(className, ".wkt.snappy");
                   
        temp.deleteOnExit();*/

        String geom = new WKTWriter().write(geometry);

        ByteArrayOutputStream out = new ByteArrayOutputStream();

        OutputStream snappyOut = new SnappyOutputStream(out);
        snappyOut.write(geom.getBytes());
        snappyOut.close();

        /*PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, path + className + ".wkt.snappy", temp);
        putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all*/

        PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, path + className + ".wkts",
                new ByteArrayInputStream(out.toByteArray()), new ObjectMetadata());
        putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all

        TransferManager tx = new TransferManager(credentials);
        tx.upload(putObjectRequest);

        return true;

    } catch (Exception e) {
        throw new IOException("Caught exception processing input row ", e);
    }
}

From source file:br.puc_rio.ele.lvc.interimage.core.datamanager.AWSSource.java

License:Apache License

 public AWSSource(String accessKey, String secretKey, String bucket) {
   _accessKey = accessKey;/*  w w w  .  ja v  a 2  s . co  m*/
   _secretKey = secretKey;
   _bucket = bucket;
      
   AWSCredentials credentials = new BasicAWSCredentials(_accessKey, _secretKey);
      
   ClientConfiguration conf = new ClientConfiguration();
      
   conf.setConnectionTimeout(0);
   conf.setSocketTimeout(0);
      
   AmazonS3 conn = new AmazonS3Client(credentials);
   conn.setEndpoint("https://s3.amazonaws.com");
      
   _manager = new TransferManager(conn);
            
}

From source file:br.puc_rio.ele.lvc.interimage.core.datamanager.AWSSource.java

License:Apache License

public AWSSource(String accessKey, String secretKey, String bucket) {
        _accessKey = accessKey;/*from  w w w . j a v  a  2s  .com*/
        _secretKey = secretKey;
        _bucket = bucket;

        AWSCredentials credentials = new BasicAWSCredentials(_accessKey, _secretKey);

        ClientConfiguration conf = new ClientConfiguration();

        conf.setConnectionTimeout(0);
        conf.setSocketTimeout(0);

        AmazonS3 conn = new AmazonS3Client(credentials);
        conn.setEndpoint("https://s3.amazonaws.com");

        _manager = new TransferManager(conn);

    }

From source file:ch.entwine.weblounge.maven.S3DeployMojo.java

License:Open Source License

/**
 * /*from   w w w .j av a2 s  .  c  o m*/
 * {@inheritDoc}
 * 
 * @see org.apache.maven.plugin.Mojo#execute()
 */
public void execute() throws MojoExecutionException, MojoFailureException {

    // Setup AWS S3 client
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    AmazonS3Client uploadClient = new AmazonS3Client(credentials);
    TransferManager transfers = new TransferManager(credentials);

    // Make sure key prefix does not start with a slash but has one at the
    // end
    if (keyPrefix.startsWith("/"))
        keyPrefix = keyPrefix.substring(1);
    if (!keyPrefix.endsWith("/"))
        keyPrefix = keyPrefix + "/";

    // Keep track of how much data has been transferred
    long totalBytesTransferred = 0L;
    int items = 0;
    Queue<Upload> uploads = new LinkedBlockingQueue<Upload>();

    try {
        // Check if S3 bucket exists
        getLog().debug("Checking whether bucket " + bucket + " exists");
        if (!uploadClient.doesBucketExist(bucket)) {
            getLog().error("Desired bucket '" + bucket + "' does not exist!");
            return;
        }

        getLog().debug("Collecting files to transfer from " + resources.getDirectory());
        List<File> res = getResources();
        for (File file : res) {
            // Make path of resource relative to resources directory
            String filename = file.getName();
            String extension = FilenameUtils.getExtension(filename);
            String path = file.getPath().substring(resources.getDirectory().length());
            String key = concat("/", keyPrefix, path).substring(1);

            // Delete old file version in bucket
            getLog().debug("Removing existing object at " + key);
            uploadClient.deleteObject(bucket, key);

            // Setup meta data
            ObjectMetadata meta = new ObjectMetadata();
            meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600));

            FileInputStream fis = null;
            GZIPOutputStream gzipos = null;
            final File fileToUpload;

            if (gzip && ("js".equals(extension) || "css".equals(extension))) {
                try {
                    fis = new FileInputStream(file);
                    File gzFile = File.createTempFile(file.getName(), null);
                    gzipos = new GZIPOutputStream(new FileOutputStream(gzFile));
                    IOUtils.copy(fis, gzipos);
                    fileToUpload = gzFile;
                    meta.setContentEncoding("gzip");
                    if ("js".equals(extension))
                        meta.setContentType("text/javascript");
                    if ("css".equals(extension))
                        meta.setContentType("text/css");
                } catch (FileNotFoundException e) {
                    getLog().error(e);
                    continue;
                } catch (IOException e) {
                    getLog().error(e);
                    continue;
                } finally {
                    IOUtils.closeQuietly(fis);
                    IOUtils.closeQuietly(gzipos);
                }
            } else {
                fileToUpload = file;
            }

            // Do a random check for existing errors before starting the next upload
            if (erroneousUpload != null)
                break;

            // Create put object request
            long bytesToTransfer = fileToUpload.length();
            totalBytesTransferred += bytesToTransfer;
            PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload);
            request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer));
            request.setMetadata(meta);

            // Schedule put object request
            getLog().info(
                    "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")");
            Upload upload = transfers.upload(request);
            uploads.add(upload);
            items++;
        }
    } catch (AmazonServiceException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    } catch (AmazonClientException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    }

    // Wait for uploads to be finished
    String currentUpload = null;
    try {
        Thread.sleep(1000);
        getLog().info("Waiting for " + uploads.size() + " uploads to finish...");
        while (!uploads.isEmpty()) {
            Upload upload = uploads.poll();
            currentUpload = upload.getDescription().substring("Uploading to ".length());
            if (TransferState.InProgress.equals(upload.getState()))
                getLog().debug("Waiting for upload " + currentUpload + " to finish");
            upload.waitForUploadResult();
        }
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (InterruptedException e) {
        getLog().debug("Interrupted while waiting for upload to finish");
    }

    // Check for errors that happened outside of the actual uploading
    if (erroneousUpload != null) {
        throw new MojoExecutionException("Error while uploading " + erroneousUpload);
    }

    getLog().info("Deployed " + items + " files ("
            + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket);
}

From source file:cloudExplorer.Put.java

License:Open Source License

public void run() {
    try {/*from ww  w  .j  a v  a 2s  .co m*/
        AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
        AmazonS3 s3Client = new AmazonS3Client(credentials,
                new ClientConfiguration().withSignerOverride("S3SignerType"));
        s3Client.setEndpoint(endpoint);
        TransferManager tx = new TransferManager(s3Client);
        File file = new File(what);
        PutObjectRequest putRequest;
        if (!rrs) {
            putRequest = new PutObjectRequest(bucket, ObjectKey, file);
        } else {
            putRequest = new PutObjectRequest(bucket, ObjectKey, file)
                    .withStorageClass(StorageClass.ReducedRedundancy);
        }
        MimetypesFileTypeMap mimeTypesMap = new MimetypesFileTypeMap();
        String mimeType = mimeTypesMap.getContentType(file);
        mimeType = mimeTypesMap.getContentType(file);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (encrypt) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        if ((ObjectKey.contains(".html")) || ObjectKey.contains(".txt")) {
            objectMetadata.setContentType("text/html");
        } else {
            objectMetadata.setContentType(mimeType);
        }
        long t1 = System.currentTimeMillis();
        putRequest.setMetadata(objectMetadata);
        Upload myUpload = tx.upload(putRequest);
        myUpload.waitForCompletion();
        tx.shutdownNow();
        long t2 = System.currentTimeMillis();
        long diff = t2 - t1;

        if (!mainFrame.perf) {
            if (terminal) {
                System.out.print("\nUploaded object: " + ObjectKey + " in " + diff / 1000 + " second(s).\n");
            } else {
                mainFrame.jTextArea1
                        .append("\nUploaded object: " + ObjectKey + " in " + diff / 1000 + " second(s).");
            }
        }
    } catch (AmazonServiceException ase) {
        if (NewJFrame.gui) {
            mainFrame.jTextArea1.append("\n\nError Message:    " + ase.getMessage());
            mainFrame.jTextArea1.append("\nHTTP Status Code: " + ase.getStatusCode());
            mainFrame.jTextArea1.append("\nAWS Error Code:   " + ase.getErrorCode());
            mainFrame.jTextArea1.append("\nError Type:       " + ase.getErrorType());
            mainFrame.jTextArea1.append("\nRequest ID:       " + ase.getRequestId());
            calibrate();
        } else {
            System.out.print("\n\nError Message:    " + ase.getMessage());
            System.out.print("\nHTTP Status Code: " + ase.getStatusCode());
            System.out.print("\nAWS Error Code:   " + ase.getErrorCode());
            System.out.print("\nError Type:       " + ase.getErrorType());
            System.out.print("\nRequest ID:       " + ase.getRequestId());
        }
    } catch (Exception put) {
    }

    calibrate();
}

From source file:com.bye.project.S3TransferProgressSample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    /*/*from  ww  w.j a  v  a  2s .com*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * TransferManager manages a pool of threads, so we create a
     * single instance and share it throughout our application.
     */
    AmazonS3 s3 = new AmazonS3Client(credentials = new PropertiesCredentials(
            S3TransferProgressSample.class.getResourceAsStream("AwsCredentials.properties")));
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    tx = new TransferManager(s3);

    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();

    new S3TransferProgressSample();
}

From source file:com.cloudbees.plugins.binarydeployer.s3.S3Repository.java

License:Open Source License

@Override
protected void deploy(List<Binary> binaries, Run run) throws IOException {
    log.debug("Will deploy files to S3::{}" + bucketName);
    AWSCredentialsImpl credentials = CredentialsProvider.findCredentialById(credentialsId,
            AWSCredentialsImpl.class, run, Lists.<DomainRequirement>newArrayList());

    TransferManager transferManager = new TransferManager(credentials);
    for (Binary binary : binaries) {
        transferManager.upload(prepareUpload(binary.getFile(), binary.getName()));
    }//from   ww w .  j  a  v a 2s.  co m
}

From source file:com.emc.vipr.s3.sample._08_CreateLargeObject.java

License:Open Source License

public static void main(String[] args) throws Exception {
    // create the ViPR S3 Client
    ViPRS3Client s3 = ViPRS3Factory.getS3Client();

    // retrieve object key/value from user
    System.out.println("Enter the object key:");
    String key = new BufferedReader(new InputStreamReader(System.in)).readLine();
    System.out.println("Enter the file location (C:\\Users\\vandrk\\EMC\\NameSpaceList.zip) :");
    String filePath = new BufferedReader(new InputStreamReader(System.in)).readLine();

    TransferManager manager = new TransferManager(s3);
    manager.upload(ViPRS3Factory.S3_BUCKET, key, new File(filePath)).waitForUploadResult();

    // print bucket key/value and content for validation
    System.out.println(String.format("completed mulit-part upload for object [%s/%s] with file path: [%s]",
            ViPRS3Factory.S3_BUCKET, key, filePath));
}

From source file:com.facebook.presto.rakam.S3BackupStore.java

License:Apache License

@Override
public void restoreShard(java.util.UUID uuid, java.io.File target) {
    try {/*from ww  w . ja v a  2s  . co m*/
        new TransferManager(s3Client).download(config.getS3Bucket(), uuid.toString(), target)
                .waitForCompletion();
    } catch (InterruptedException e) {
        throw Throwables.propagate(e);
    }
}