Example usage for com.amazonaws.services.s3.transfer TransferManagerBuilder defaultTransferManager

List of usage examples for com.amazonaws.services.s3.transfer TransferManagerBuilder defaultTransferManager

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.transfer TransferManagerBuilder defaultTransferManager.

Prototype

public static TransferManager defaultTransferManager() 

Source Link

Usage

From source file:org.apache.flink.streaming.tests.util.s3.S3UtilProgram.java

License:Apache License

private static void downloadFile(ParameterTool params) {
    final String bucket = params.getRequired("bucket");
    final String s3file = params.getRequired("s3file");
    final String localFile = params.getRequired("localFile");
    TransferManager tx = TransferManagerBuilder.defaultTransferManager();
    try {// w  ww.  j a  va2s. c  o  m
        tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
    } catch (InterruptedException e) {
        System.out.println("Transfer interrupted");
    } finally {
        tx.shutdownNow();
    }
}

From source file:org.apache.flink.streaming.tests.util.s3.S3UtilProgram.java

License:Apache License

private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
    final String bucket = params.getRequired("bucket");
    final String s3prefix = params.getRequired("s3prefix");
    final String localFolder = params.getRequired("localFolder");
    final String s3filePrefix = params.get("s3filePrefix", "");
    TransferManager tx = TransferManagerBuilder.defaultTransferManager();
    Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
    KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL
            : objectSummary -> keyPredicate.test(objectSummary.getKey());
    try {//from   w ww . ja  v  a2 s  . c  om
        tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
    } catch (InterruptedException e) {
        System.out.println("Transfer interrupted");
    } finally {
        tx.shutdownNow();
    }
}

From source file:squash.deployment.lambdas.AngularjsAppCustomResourceLambda.java

License:Apache License

void deleteAngularjsApp(String websiteBucket, LambdaLogger logger) {
    logger.log("Removing AngularjsApp content from website versioned S3 bucket");

    // We need to delete every version of every key
    ListVersionsRequest listVersionsRequest = new ListVersionsRequest().withBucketName(websiteBucket);
    VersionListing versionListing;//w  w  w  . j  a  va2 s .co  m

    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        versionListing = client.listVersions(listVersionsRequest);
        versionListing.getVersionSummaries().stream().filter(k -> (k.getKey().startsWith("app"))).forEach(k -> {
            logger.log("About to delete version: " + k.getVersionId() + " of AngularjsApp page: " + k.getKey());
            DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(websiteBucket, k.getKey(),
                    k.getVersionId());
            client.deleteVersion(deleteVersionRequest);
            logger.log("Successfully deleted version: " + k.getVersionId() + " of AngularjsApp page: "
                    + k.getKey());
        });

        listVersionsRequest.setKeyMarker(versionListing.getNextKeyMarker());
    } while (versionListing.isTruncated());
    logger.log("Finished removing AngularjsApp content from website S3 bucket");
}

From source file:squash.deployment.lambdas.ApiGatewayCustomResourceLambda.java

License:Apache License

void constructApiAndUploadSdk(String restApiId, AmazonApiGateway apiGatewayClient, String region,
        String validDatesGETLambdaURI, String bookingsGETLambdaURI, String bookingsPUTDELETELambdaURI,
        String bookingRulesGETLambdaURI, String bookingRuleOrExclusionPUTDELETELambdaURI,
        String bookingsApiGatewayInvocationRole, String stageName, LambdaLogger logger) throws Exception {
    // Create the API's resources
    logger.log("Creating API resources");
    String validDates = createTopLevelResourceOnApi("validdates", restApiId, apiGatewayClient, logger).getId();
    String bookings = createTopLevelResourceOnApi("bookings", restApiId, apiGatewayClient, logger).getId();
    String bookingRules = createTopLevelResourceOnApi("bookingrules", restApiId, apiGatewayClient, logger)
            .getId();//ww w .  j  a  va  2  s . c o  m
    String reservationForm = createTopLevelResourceOnApi("reservationform", restApiId, apiGatewayClient, logger)
            .getId();
    String cancellationForm = createTopLevelResourceOnApi("cancellationform", restApiId, apiGatewayClient,
            logger).getId();

    // Create the API's methods
    logger.log("Creating API methods");
    Map<String, String> extraParameters = new HashMap<>();

    String revvingSuffix = System.getenv("RevvingSuffix");

    // Methods on the validdates resource
    logger.log("Creating methods on validdates resource");
    extraParameters.put("ValidDatesGETLambdaURI", validDatesGETLambdaURI);
    extraParameters.put("BookingsGETLambdaURI", bookingsGETLambdaURI);
    extraParameters.put("BookingsPUTDELETELambdaURI", bookingsPUTDELETELambdaURI);
    extraParameters.put("BookingRulesGETLambdaURI", bookingRulesGETLambdaURI);
    extraParameters.put("BookingRulesPUTDELETELambdaURI", bookingRuleOrExclusionPUTDELETELambdaURI);
    extraParameters.put("BookingsApiGatewayInvocationRole", bookingsApiGatewayInvocationRole);
    createMethodOnResource("ValidDatesGET", validDates, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("ValidDatesOPTIONS", validDates, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);

    // Methods on the bookings resource
    logger.log("Creating methods on bookings resource");
    createMethodOnResource("BookingsGET", bookings, restApiId, extraParameters, apiGatewayClient, revvingSuffix,
            region, logger);
    createMethodOnResource("BookingsDELETE", bookings, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("BookingsPUT", bookings, restApiId, extraParameters, apiGatewayClient, revvingSuffix,
            region, logger);
    createMethodOnResource("BookingsPOST", bookings, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("BookingsOPTIONS", bookings, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);

    // Methods on the bookingrules resource
    logger.log("Creating methods on bookingrules resource");
    createMethodOnResource("BookingrulesGET", bookingRules, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("BookingrulesDELETE", bookingRules, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("BookingrulesPUT", bookingRules, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("BookingrulesOPTIONS", bookingRules, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);

    // Methods on the reservationform resource
    logger.log("Creating methods on reservationform resource");
    createMethodOnResource("ReservationformGET", reservationForm, restApiId, extraParameters, apiGatewayClient,
            revvingSuffix, region, logger);
    createMethodOnResource("ReservationformOPTIONS", reservationForm, restApiId, extraParameters,
            apiGatewayClient, revvingSuffix, region, logger);

    // Methods on the cancellationform resource
    logger.log("Creating methods on cancellationform resource");
    createMethodOnResource("CancellationformGET", cancellationForm, restApiId, extraParameters,
            apiGatewayClient, revvingSuffix, region, logger);
    createMethodOnResource("CancellationformOPTIONS", cancellationForm, restApiId, extraParameters,
            apiGatewayClient, revvingSuffix, region, logger);

    // Deploy the api to a stage (with default throttling settings)
    logger.log("Deploying API to stage: " + stageName);
    CreateDeploymentRequest createDeploymentRequest = new CreateDeploymentRequest();
    createDeploymentRequest.setCacheClusterEnabled(false);
    createDeploymentRequest.setDescription("A deployment of the Squash api");
    createDeploymentRequest.setStageDescription("A stage for the Squash api");
    createDeploymentRequest.setStageName(stageName);
    createDeploymentRequest.setRestApiId(restApiId);
    CreateDeploymentResult createDeploymentResult = apiGatewayClient.createDeployment(createDeploymentRequest);
    logger.log("Deployed to stage with ID: " + createDeploymentResult.getId());

    // FIXME
    // Throttle all methods on this stage - does not seem to work yet?
    // logger.log("Throttling all of stage's methods");
    // GetStagesRequest getStagesRequest = new GetStagesRequest();
    // getStagesRequest.setRestApiId(restApiId);
    // GetStagesResult getStagesResult =
    // apiGatewayClient.getStages(getStagesRequest);
    // List<Stage> stages = getStagesResult.getItem();
    // Stage stage = stages.stream().filter(s ->
    // s.getStageName().equals(stageName)).findFirst().get();
    // MethodSetting methodSetting = new MethodSetting();
    // methodSetting.setThrottlingBurstLimit(10);
    // methodSetting.setThrottlingRateLimit(1.0);
    // stage.addMethodSettingsEntry("*/*", methodSetting); // Adds to all
    // methods
    // logger.log("Throttling completed");

    // Download javascript sdk and upload it to the S3 bucket serving the
    // squash site
    logger.log("Downloading Javascript SDK");
    GetSdkRequest getSdkRequest = new GetSdkRequest();
    getSdkRequest.setRestApiId(restApiId);
    getSdkRequest.setStageName(stageName);
    getSdkRequest.setSdkType("JavaScript");
    // This is for Android sdks but it crashes if the map is empty - so set
    // to something
    Map<String, String> paramsMap = new HashMap<>();
    paramsMap.put("GroupID", "Dummy");
    getSdkRequest.setParameters(paramsMap);
    GetSdkResult getSdkResult = apiGatewayClient.getSdk(getSdkRequest);

    // Copy the sdk to S3 via AWS lambda's temporary file system
    logger.log("Copying Javascript SDK to S3");
    try {
        logger.log("Saving SDK to lambda's temporary file system");
        ByteBuffer sdkBuffer = getSdkResult.getBody().asReadOnlyBuffer();
        try (FileOutputStream fileOutputStream = new FileOutputStream("/tmp/sdk.zip")) {
            try (WritableByteChannel channel = Channels.newChannel(fileOutputStream)) {
                channel.write(sdkBuffer);
            }
        }
        // Unzip the sdk
        logger.log("SDK saved. Now unzipping");
        String outputFolder = "/tmp/extractedSdk";
        ZipFile zipFile = new ZipFile("/tmp/sdk.zip");
        try {
            Enumeration<? extends ZipEntry> entries = zipFile.entries();
            while (entries.hasMoreElements()) {
                ZipEntry entry = entries.nextElement();
                logger.log("Unzipping next entry: " + entry.getName());
                File entryDestination = new File(outputFolder, entry.getName());
                if (entry.isDirectory()) {
                    entryDestination.mkdirs();
                } else {
                    entryDestination.getParentFile().mkdirs();
                    InputStream in = zipFile.getInputStream(entry);
                    OutputStream out = new FileOutputStream(entryDestination);
                    IOUtils.copy(in, out);
                    IOUtils.closeQuietly(in);
                    out.close();
                }
            }
        } finally {
            zipFile.close();
        }
        logger.log("SDK unzipped.");

        // GZIP all the sdk files individually
        logger.log("Gzip-ing sdk files to enable serving gzip-ed from S3");
        FileUtils.gzip(Arrays.asList(new File(outputFolder)), Collections.emptyList(), logger);
        logger.log("Gzip-ed sdk files to enable serving gzip-ed from S3");

        // Rev the files by appending revving-suffix to names - for cache-ing
        File sdkFolder = new File("/tmp/extractedSdk/apiGateway-js-sdk");
        FileUtils.appendRevvingSuffix(revvingSuffix, sdkFolder.toPath(), logger);

        // Upload the sdk from the temporary filesystem to S3.
        logger.log("Uploading unzipped Javascript SDK to S3 bucket: " + squashWebsiteBucket);
        TransferUtils.waitForS3Transfer(TransferManagerBuilder.defaultTransferManager()
                .uploadDirectory(squashWebsiteBucket, "", sdkFolder, true), logger);
        logger.log("Uploaded sdk successfully to S3");

        // Add gzip content-encoding metadata to zip-ed files
        logger.log("Updating gzip metadata on Javascript SDK in S3 bucket");
        TransferUtils.addGzipContentEncodingMetadata(squashWebsiteBucket, Optional.empty(), logger);
        logger.log("Updated gzip metadata on Javascript SDK in S3 bucket");

        // Add cache-control metadata to zip-ed files. js files will have
        // 1-year cache validity, since they are rev-ved.
        logger.log("Updating cache-control metadata on Javascript SDK in S3 bucket");
        TransferUtils.addCacheControlHeader("max-age=31536000", squashWebsiteBucket, Optional.empty(), ".js",
                logger);
        logger.log("Updated cache-control metadata on Javascript SDK in S3 bucket");

        logger.log("Setting public read permission on uploaded sdk");
        TransferUtils.setPublicReadPermissionsOnBucket(squashWebsiteBucket, Optional.empty(), logger);
        logger.log("Finished setting public read permissions on uploaded sdk");
    } catch (Exception e) {
        logger.log("Exception caught whilst copying Javascript SDK to S3: " + e.getMessage());
        throw e;
    }
}

From source file:squash.deployment.lambdas.NoScriptAppCustomResourceLambda.java

License:Apache License

/**
 * Implementation for the AWS Lambda function backing the NoScriptApp custom resource.
 * //from  w ww.jav a2 s  .  co m
 * <p>This lambda requires the following environment variables:
 * <ul>
 *    <li>SimpleDBDomainName - name of the simpleDB domain for bookings and booking rules.</li>
 *    <li>WebsiteBucket - name of S3 bucket serving the booking website.</li>
 *    <li>ApiGatewayBaseUrl - base Url of the ApiGateway Api.</li>
 *    <li>Region - the AWS region in which the Cloudformation stack is created.</li>
 *    <li>Revision - integer incremented to force stack updates to update this resource.</li>
 * </ul>
 * 
 * <p>On success, it returns the following output to Cloudformation:
 * <ul>
 *    <li>WebsiteURL - Url of the website's first booking page.</li>
 * </ul>
 *
 * @param request request parameters as provided by the CloudFormation service
 * @param context context as provided by the CloudFormation service
 */
@Override
public Object handleRequest(Map<String, Object> request, Context context) {

    LambdaLogger logger = context.getLogger();
    logger.log("Starting NoScriptApp custom resource handleRequest");

    // Handle standard request parameters
    Map<String, String> standardRequestParameters = LambdaInputLogger.logStandardRequestParameters(request,
            logger);
    String requestType = standardRequestParameters.get("RequestType");

    // Handle required environment variables
    logger.log("Logging environment variables required by custom resource request");

    String simpleDBDomainName = System.getenv("SimpleDBDomainName");
    String websiteBucket = System.getenv("WebsiteBucket");
    String apiGatewayBaseUrl = System.getenv("ApiGatewayBaseUrl");
    String region = System.getenv("AWS_REGION");
    String revision = System.getenv("Revision");

    // Log out our required environment variables
    logger.log("SimpleDBDomainName: " + simpleDBDomainName);
    logger.log("WebsiteBucket: " + websiteBucket);
    logger.log("ApiGatewayBaseUrl: " + apiGatewayBaseUrl);
    logger.log("Region: " + region);
    logger.log("Revision: " + revision);

    // API calls below can sometimes give access denied errors during stack
    // creation which I think is bc required new roles have not yet propagated
    // across AWS. We sleep here to allow time for this propagation.
    try {
        Thread.sleep(10000);
    } catch (InterruptedException e) {
        logger.log("Sleep to allow new roles to propagate has been interrupted.");
    }

    // Prepare our response to be sent in the finally block
    CloudFormationResponder cloudFormationResponder = new CloudFormationResponder(standardRequestParameters,
            "DummyPhysicalResourceId");
    // Initialise failure response, which will be changed on success
    String responseStatus = "FAILED";

    String websiteURL = null;
    try {
        cloudFormationResponder.initialise();

        if (requestType.equals("Create") || requestType.equals("Update")) {

            // Upload 21 initial bookings pages and index page to the S3 bucket
            UpdateBookingsLambdaRequest updateBookingsLambdaRequest = new UpdateBookingsLambdaRequest();
            UpdateBookingsLambda updateBookingsLambda = new UpdateBookingsLambda();
            UpdateBookingsLambdaResponse updateBookingsLambdaResponse = updateBookingsLambda
                    .updateBookings(updateBookingsLambdaRequest, context);
            String firstDate = updateBookingsLambdaResponse.getCurrentDate();

            websiteURL = "http://" + websiteBucket + ".s3-website-" + region + ".amazonaws.com?selectedDate="
                    + firstDate + ".html";

        } else if (requestType.equals("Delete")) {
            logger.log("Delete request - so removing bookings pages from website versioned S3 bucket");

            // We need to delete every version of every key before the bucket itself
            // can be deleted
            ListVersionsRequest listVersionsRequest = new ListVersionsRequest().withBucketName(websiteBucket);
            VersionListing versionListing;

            AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
            do {
                versionListing = client.listVersions(listVersionsRequest);
                versionListing.getVersionSummaries().stream().filter(
                        // Maybe a bit slack, but '20' is to include e.g. 2015-10-04.html
                        k -> (k.getKey().startsWith("20") || k.getKey().equals("today.html"))).forEach(k -> {
                            logger.log("About to delete version: " + k.getVersionId() + " of booking page: "
                                    + k.getKey());
                            DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(websiteBucket,
                                    k.getKey(), k.getVersionId());
                            client.deleteVersion(deleteVersionRequest);
                            logger.log("Successfully deleted version: " + k.getVersionId()
                                    + " of booking page: " + k.getKey());
                        });

                listVersionsRequest.setKeyMarker(versionListing.getNextKeyMarker());
            } while (versionListing.isTruncated());
            logger.log("Finished removing bookings pages from website S3 bucket");
        }

        responseStatus = "SUCCESS";
        return null;
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        return null;
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        return null;
    } catch (Exception e) {
        logger.log("Exception caught in NoScriptApp Lambda: " + e.getMessage());
        return null;
    } finally {
        // Send response to CloudFormation
        cloudFormationResponder.addKeyValueOutputsPair("WebsiteURL", websiteURL);
        cloudFormationResponder.sendResponse(responseStatus, logger);
    }
}

From source file:squash.deployment.lambdas.utils.S3TransferManager.java

License:Apache License

public S3TransferManager() {
    transferManager = TransferManagerBuilder.defaultTransferManager();
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Sets public read permissions on content within an S3 bucket.
 * /* w ww  .  ja v  a 2 s .com*/
 * <p>Web content served from an S3 bucket must have public read permissions.
 * 
 *    @param bucketName the bucket to apply the permissions to.
 *    @param prefix prefix within the bucket, beneath which to apply the permissions.
 *    @param logger a CloudwatchLogs logger.
 */
public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {
    // Ensure newly uploaded content has public read permission
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: " + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting public read permission on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            logger.log("Setting permissions for S3 object: " + objectSummary.getKey());
            client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Finished setting public read permissions");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds gzip content-encoding metadata to S3 objects.
 * //  w  w w.  ja va 2 s .co m
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: "
                + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setContentEncoding("gzip");
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set gzip content encoding metadata on bucket");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds cache-control header to S3 objects.
 * /*from   ww w  . j a v  a  2  s  . c o m*/
 * <p>Adds cache-control header to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder), and with the
 *    specified extension will have the header added. When the
 *    bucket serves objects it will then add a suitable
 *    Cache-Control header.
 *
 *    @param headerValue value of the cache-control header
 *    @param bucketName the bucket to apply the header to.
 *    @param prefix prefix within the bucket, beneath which to apply the header.
 *    @param extension file extension to apply header to
 *    @param logger a CloudwatchLogs logger.
 */
public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix,
        String extension, LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and prefix: " + prefix.get() + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            if (!key.endsWith(extension)) {
                continue;
            }
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setCacheControl(headerValue);
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set cache-control metadata on bucket");
}