Example usage for com.amazonaws.services.s3 AmazonS3 copyObject

List of usage examples for com.amazonaws.services.s3 AmazonS3 copyObject

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 copyObject.

Prototype

public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
        throws SdkClientException, AmazonServiceException;

Source Link

Document

Copies a source object to a new destination in Amazon S3.

Usage

From source file:com.openkm.util.backup.RepositoryS3Backup.java

License:Open Source License

/**
 * Performs a recursive repository content export with metadata
 *///from  www.jav  a 2 s .  c  om
private static ImpExpStats backupHelper(String token, String fldPath, AmazonS3 s3, String bucket,
        boolean metadata, Writer out, InfoDecorator deco)
        throws FileNotFoundException, PathNotFoundException, AccessDeniedException, ParseException,
        NoSuchGroupException, RepositoryException, IOException, DatabaseException {
    log.info("backup({}, {}, {}, {}, {}, {})", new Object[] { token, fldPath, bucket, metadata, out, deco });
    ImpExpStats stats = new ImpExpStats();
    DocumentModule dm = ModuleManager.getDocumentModule();
    FolderModule fm = ModuleManager.getFolderModule();
    MetadataAdapter ma = MetadataAdapter.getInstance(token);
    Gson gson = new Gson();

    for (Iterator<Document> it = dm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        File tmpDoc = null;
        InputStream is = null;
        FileOutputStream fos = null;
        boolean upload = true;

        try {
            Document docChild = it.next();
            String path = docChild.getPath().substring(1);
            ObjectMetadata objMeta = new ObjectMetadata();

            if (Config.REPOSITORY_CONTENT_CHECKSUM) {
                if (exists(s3, bucket, path)) {
                    objMeta = s3.getObjectMetadata(bucket, path);

                    if (docChild.getActualVersion().getChecksum().equals(objMeta.getETag())) {
                        upload = false;
                    }
                }
            }

            if (upload) {
                tmpDoc = FileUtils.createTempFileFromMime(docChild.getMimeType());
                fos = new FileOutputStream(tmpDoc);
                is = dm.getContent(token, docChild.getPath(), false);
                IOUtils.copy(is, fos);
                PutObjectRequest request = new PutObjectRequest(bucket, path, tmpDoc);

                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);
                }

                request.setMetadata(objMeta);
                s3.putObject(request);
                out.write(deco.print(docChild.getPath(), docChild.getActualVersion().getSize(), null));
                out.flush();
            } else {
                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);

                    // Update object metadata
                    CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, path, bucket, path);
                    copyObjReq.setNewObjectMetadata(objMeta);
                    s3.copyObject(copyObjReq);
                }

                log.info("Don't need to upload document {}", docChild.getPath());
            }

            // Stats
            stats.setSize(stats.getSize() + docChild.getActualVersion().getSize());
            stats.setDocuments(stats.getDocuments() + 1);
        } finally {
            IOUtils.closeQuietly(is);
            IOUtils.closeQuietly(fos);
            FileUtils.deleteQuietly(tmpDoc);
        }
    }

    for (Iterator<Folder> it = fm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        InputStream is = null;

        try {
            Folder fldChild = it.next();
            String path = fldChild.getPath().substring(1) + "/";
            is = new ByteArrayInputStream(new byte[0]);
            ObjectMetadata objMeta = new ObjectMetadata();
            objMeta.setContentLength(0);
            PutObjectRequest request = new PutObjectRequest(bucket, path, is, objMeta);

            // Metadata
            if (metadata) {
                FolderMetadata fmd = ma.getMetadata(fldChild);
                String json = gson.toJson(fmd);
                objMeta.addUserMetadata("okm", json);
            }

            request.setMetadata(objMeta);
            s3.putObject(request);

            ImpExpStats tmp = backupHelper(token, fldChild.getPath(), s3, bucket, metadata, out, deco);

            // Stats
            stats.setSize(stats.getSize() + tmp.getSize());
            stats.setDocuments(stats.getDocuments() + tmp.getDocuments());
            stats.setFolders(stats.getFolders() + tmp.getFolders() + 1);
            stats.setOk(stats.isOk() && tmp.isOk());
        } finally {
            IOUtils.closeQuietly(is);
        }
    }

    log.debug("backupHelper: {}", stats);
    return stats;
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

private void copyObject(AmazonS3 s3Client, Exchange exchange) {
    String bucketNameDestination;
    String destinationKey;//from w  ww  .j a  va 2 s  . co m
    String sourceKey;
    String bucketName;
    String versionId;

    bucketName = exchange.getIn().getHeader(S3Constants.BUCKET_NAME, String.class);
    if (ObjectHelper.isEmpty(bucketName)) {
        bucketName = getConfiguration().getBucketName();
    }
    sourceKey = exchange.getIn().getHeader(S3Constants.KEY, String.class);
    destinationKey = exchange.getIn().getHeader(S3Constants.DESTINATION_KEY, String.class);
    bucketNameDestination = exchange.getIn().getHeader(S3Constants.BUCKET_DESTINATION_NAME, String.class);
    versionId = exchange.getIn().getHeader(S3Constants.VERSION_ID, String.class);

    if (ObjectHelper.isEmpty(bucketName)) {
        throw new IllegalArgumentException("Bucket Name must be specified for copyObject Operation");
    }
    if (ObjectHelper.isEmpty(bucketNameDestination)) {
        throw new IllegalArgumentException(
                "Bucket Name Destination must be specified for copyObject Operation");
    }
    if (ObjectHelper.isEmpty(sourceKey)) {
        throw new IllegalArgumentException("Source Key must be specified for copyObject Operation");
    }
    if (ObjectHelper.isEmpty(destinationKey)) {
        throw new IllegalArgumentException("Destination Key must be specified for copyObject Operation");
    }
    CopyObjectRequest copyObjectRequest;
    if (ObjectHelper.isEmpty(versionId)) {
        copyObjectRequest = new CopyObjectRequest(bucketName, sourceKey, bucketNameDestination, destinationKey);
    } else {
        copyObjectRequest = new CopyObjectRequest(bucketName, sourceKey, versionId, bucketNameDestination,
                destinationKey);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        copyObjectRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    CopyObjectResult copyObjectResult = s3Client.copyObject(copyObjectRequest);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, copyObjectResult.getETag());
    if (copyObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, copyObjectResult.getVersionId());
    }
}

From source file:org.alanwilliamson.amazon.s3.Copy.java

License:Open Source License

public cfData execute(cfSession _session, cfArgStructData argStruct) throws cfmRunTimeException {

    AmazonKey amazonKey = getAmazonKey(_session, argStruct);
    AmazonS3 s3Client = getAmazonS3(amazonKey);

    String srcbucket = getNamedStringParam(argStruct, "srcbucket", null);
    String srckey = getNamedStringParam(argStruct, "srckey", null);
    String srcaes256key = getNamedStringParam(argStruct, "srcaes256key", null);

    String destbucket = getNamedStringParam(argStruct, "destbucket", null);
    String deskey = getNamedStringParam(argStruct, "destkey", null);
    String destaes256key = getNamedStringParam(argStruct, "destaes256key", null);
    String deststorageclass = getNamedStringParam(argStruct, "deststorageclass", null);
    String destacl = getNamedStringParam(argStruct, "destacl", null);

    if (srckey != null && srckey.charAt(0) == '/')
        srckey = srckey.substring(1);/*w w  w. j a v a  2 s  . co m*/

    if (deskey != null && deskey.charAt(0) == '/')
        deskey = deskey.substring(1);

    CopyObjectRequest cor = new CopyObjectRequest(srcbucket, srckey, destbucket, deskey);

    if (srcaes256key != null && !srcaes256key.isEmpty())
        cor.setSourceSSECustomerKey(new SSECustomerKey(srcaes256key));

    if (destaes256key != null && !destaes256key.isEmpty())
        cor.setDestinationSSECustomerKey(new SSECustomerKey(destaes256key));

    if (deststorageclass != null && !deststorageclass.isEmpty())
        cor.setStorageClass(amazonKey.getAmazonStorageClass(deststorageclass));

    if (destacl != null && !destacl.isEmpty())
        cor.setCannedAccessControlList(amazonKey.getAmazonCannedAcl(destacl));

    try {
        s3Client.copyObject(cor);
        return cfBooleanData.TRUE;
    } catch (Exception e) {
        throwException(_session, "AmazonS3: " + e.getMessage());
        return cfBooleanData.FALSE;
    }
}

From source file:org.alanwilliamson.amazon.s3.Rename.java

License:Open Source License

public cfData execute(cfSession _session, cfArgStructData argStruct) throws cfmRunTimeException {
    AmazonKey amazonKey = getAmazonKey(_session, argStruct);
    AmazonS3 s3Client = getAmazonS3(amazonKey);

    String bucket = getNamedStringParam(argStruct, "bucket", null);
    String srckey = getNamedStringParam(argStruct, "srckey", null);
    String deskey = getNamedStringParam(argStruct, "destkey", null);
    String aes256key = getNamedStringParam(argStruct, "aes256key", null);

    if (srckey != null && srckey.charAt(0) == '/')
        srckey = srckey.substring(1);/* www .j a v  a 2s  .  c o m*/

    if (deskey != null && deskey.charAt(0) == '/')
        deskey = deskey.substring(1);

    CopyObjectRequest cor = new CopyObjectRequest(bucket, srckey, bucket, deskey);

    if (aes256key != null && !aes256key.isEmpty()) {
        cor.setSourceSSECustomerKey(new SSECustomerKey(aes256key));
        cor.setDestinationSSECustomerKey(new SSECustomerKey(aes256key));
    }

    try {
        s3Client.copyObject(cor);
        s3Client.deleteObject(new DeleteObjectRequest(bucket, srckey));
        return cfBooleanData.TRUE;
    } catch (Exception e) {
        throwException(_session, "AmazonS3: " + e.getMessage());
        return cfBooleanData.FALSE;
    }
}

From source file:rg.ent.S3SampleB.java

License:Open Source License

public static void copyFileBtweenBuckets() {

    AWSCredentials credentials = null;//from w  w  w  .j a v a  2  s  .c om
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/Users/john/.aws/credentials), and is in valid format.", e);
    }

    String file_name = "640_262098533.JPG";
    String source_bucket = "royaltygroupimages";
    String dest_bucket = "royaltygroupupload";

    AmazonS3 s3 = new AmazonS3Client(credentials);
    CopyObjectRequest cor = new CopyObjectRequest(source_bucket, file_name, dest_bucket, file_name);
    s3.copyObject(cor);
    s3.setObjectAcl(dest_bucket, file_name, CannedAccessControlList.PublicRead);

    //S3Object[] filteredObjects = s3Service.listObjects("sourceBucket", "appData/", null);
    //for(S3Object object: filteredObjects ){
    //    s3Service.copyObject("sourceBucket", "newAppData/" + object.getKey().substring(object.getKey().indexOf("/"), "destBucket", object, false);
    //}
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds gzip content-encoding metadata to S3 objects.
 * // w w w. j  av  a2s .  c o m
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: "
                + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setContentEncoding("gzip");
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set gzip content encoding metadata on bucket");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds cache-control header to S3 objects.
 * /* w  w  w. jav a  2 s . com*/
 * <p>Adds cache-control header to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder), and with the
 *    specified extension will have the header added. When the
 *    bucket serves objects it will then add a suitable
 *    Cache-Control header.
 *
 *    @param headerValue value of the cache-control header
 *    @param bucketName the bucket to apply the header to.
 *    @param prefix prefix within the bucket, beneath which to apply the header.
 *    @param extension file extension to apply header to
 *    @param logger a CloudwatchLogs logger.
 */
public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix,
        String extension, LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and prefix: " + prefix.get() + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            if (!key.endsWith(extension)) {
                continue;
            }
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setCacheControl(headerValue);
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set cache-control metadata on bucket");
}