Example usage for com.amazonaws.services.s3.model StorageClass Glacier

List of usage examples for com.amazonaws.services.s3.model StorageClass Glacier

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model StorageClass Glacier.

Prototype

StorageClass Glacier

To view the source code for com.amazonaws.services.s3.model StorageClass Glacier.

Click Source Link

Document

The Amazon Glacier storage class.

Usage

From source file:cloudExplorer.BucketTransitionGlacier.java

License:Open Source License

public void run() {
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials,
            new ClientConfiguration().withSignerOverride("S3SignerType"));
    s3Client.setEndpoint(endpoint);//from www  . j a v  a2  s.  c o  m
    int converted_days = 0;
    if (!disabled) {
        converted_days = Integer.parseInt(days);
    }

    Transition transToArchive = new Transition().withDays(converted_days)
            .withStorageClass(StorageClass.Glacier);

    BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = null;
    if (!disabled) {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withTransition(transToArchive)
                // .withExpirationInDays(converted_days + 1)
                .withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    } else {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withTransition(transToArchive)
                //.withExpirationInDays(100)
                .withStatus(BucketLifecycleConfiguration.DISABLED.toString());
    }
    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);

    try {
        BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);
        s3Client.setBucketLifecycleConfiguration(bucket, configuration);
    } catch (Exception get) {
        mainFrame.jTextArea1.append("\n" + get.getMessage());
    }
    if (!disabled) {
        mainFrame.jTextArea1.append("\nSent request to set bucket life cycle to tier to Glacier after: "
                + converted_days + " day(s). Please observe for any errors.");
    } else {
        mainFrame.jTextArea1
                .append("\nSent request to disable the bucket life cycle. Please observe for any errors.");
    }
    calibrate();
}

From source file:edu.iit.s3bucket.S3Bucket.java

/**
 *
 *///from  w w  w. ja  v a 2  s.  com
public void setRules() {
    Transition transToArchive = new Transition().withDays(365).withStorageClass(StorageClass.Glacier);

    BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
            .withId("Archive and delete rule").withTransition(transToArchive).withExpirationInDays(3650)
            .withStatus(BucketLifecycleConfiguration.ENABLED.toString());

    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);

    BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

    // Save configuration.
    s3client.setBucketLifecycleConfiguration(this.bucketname, configuration);
}

From source file:io.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location./*from  ww  w.  ja  v a 2s. c  om*/
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        if (listResult.getKeyCount() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:org.apache.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location.//from  ww  w  .  j  ava  2  s  .c  o m
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        // Using getObjectSummaries().size() instead of getKeyCount as, in some cases
        // it is observed that even though the getObjectSummaries returns some data
        // keyCount is still zero.
        if (listResult.getObjectSummaries().size() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:org.duracloud.glacierstorage.GlacierStorageProvider.java

License:Apache License

@Override
protected StoragePolicy getStoragePolicy() {
    return new StoragePolicy(StorageClass.Glacier, 0);
}

From source file:org.elasticsearch.repositories.s3.S3BlobStore.java

License:Apache License

public static StorageClass initStorageClass(String storageClass) {
    if (storageClass == null || storageClass.equals("")) {
        return StorageClass.Standard;
    }//  w  w w  .  j a  va  2 s.com

    try {
        StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
        if (_storageClass.equals(StorageClass.Glacier)) {
            throw new BlobStoreException("Glacier storage class is not supported");
        }

        return _storageClass;
    } catch (IllegalArgumentException illegalArgumentException) {
        throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
    }
}

From source file:org.finra.herd.dao.impl.MockS3OperationsImpl.java

License:Apache License

@Override
public void restoreObject(RestoreObjectRequest requestRestore, AmazonS3 s3Client) {
    if (requestRestore.getKey().endsWith(MockAwsOperationsHelper.AMAZON_THROTTLING_EXCEPTION)) {
        AmazonServiceException throttlingException = new AmazonServiceException("test throttling exception");
        throttlingException.setErrorCode("ThrottlingException");
        throw throttlingException;
    } else if (MOCK_S3_BUCKET_NAME_NO_SUCH_BUCKET_EXCEPTION.equals(requestRestore.getBucketName())) {
        AmazonServiceException amazonServiceException = new AmazonServiceException(
                S3Operations.ERROR_CODE_NO_SUCH_BUCKET);
        amazonServiceException.setStatusCode(404);
        throw amazonServiceException;
    } else if (MOCK_S3_BUCKET_NAME_ACCESS_DENIED.equals(requestRestore.getBucketName())) {
        AmazonServiceException amazonServiceException = new AmazonServiceException(
                S3Operations.ERROR_CODE_ACCESS_DENIED);
        amazonServiceException.setStatusCode(403);
        throw amazonServiceException;
    } else if (MOCK_S3_BUCKET_NAME_INTERNAL_ERROR.equals(requestRestore.getBucketName())
            || requestRestore.getKey().endsWith(MOCK_S3_FILE_NAME_SERVICE_EXCEPTION)) {
        throw new AmazonServiceException(S3Operations.ERROR_CODE_INTERNAL_ERROR);
    } else {/* ww w.  java2 s  .  c o m*/
        MockS3Bucket mockS3Bucket = getOrCreateBucket(requestRestore.getBucketName());
        MockS3Object mockS3Object = mockS3Bucket.getObjects().get(requestRestore.getKey());

        if (mockS3Object == null) {
            AmazonServiceException amazonServiceException = new AmazonServiceException(
                    S3Operations.ERROR_CODE_NO_SUCH_KEY);
            amazonServiceException.setStatusCode(404);
            throw amazonServiceException;
        }

        // Get object metadata.
        ObjectMetadata objectMetadata = mockS3Object.getObjectMetadata();

        // Fail if the object is not in Glacier.
        if (!StorageClass.Glacier.toString().equals(objectMetadata.getStorageClass())) {
            AmazonServiceException amazonServiceException = new AmazonServiceException(
                    "object is not in Glacier");
            throw amazonServiceException;
        }

        // Fail if the object is already being restored.
        if (objectMetadata.getOngoingRestore()) {
            AmazonServiceException amazonServiceException = new AmazonServiceException(
                    "object is already being restored");
            throw amazonServiceException;
        }

        // Update the object metadata to indicate that there is an ongoing restore request.
        objectMetadata.setOngoingRestore(true);
    }
}

From source file:org.finra.herd.service.impl.BusinessObjectDataFinalizeRestoreHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps for the business object data finalize restore.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 *//*from   ww  w .  j  a  v  a2s.  com*/
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
            .getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto
            .setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get actual S3 files by selecting all S3 keys matching the S3 key prefix form the S3 bucket.
    // When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files,
            businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to check for restore status by selection only objects that are currently archived in Glacier (have Glacier storage class).
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass())) {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Validate that all Glacier storage class S3 files are now restored.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper
            .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));
    s3Service.validateGlacierS3FilesRestored(s3FileTransferRequestParamsDto);
}

From source file:org.finra.herd.service.impl.BusinessObjectDataInitiateRestoreHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps for the initiation of a business object data restore request. The method also updates the specified DTO.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 *///from  w  ww. j a  v  a  2 s.  c  om
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    try {
        // Create an S3 file transfer parameters DTO to access the S3 bucket.
        // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
                .getS3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
        s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
        s3FileTransferRequestParamsDto.setS3KeyPrefix(
                StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

        // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
        List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

        // Validate existence and file size of the S3 files.
        storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(),
                actualS3Files, businessObjectDataRestoreDto.getStorageName(),
                businessObjectDataRestoreDto.getBusinessObjectDataKey());

        // Validate that all files to be restored are currently archived in Glacier (have Glacier storage class).
        // Fail on any S3 file that does not have Glacier storage class. This can happen when request to restore business object
        // data is posted after business object data archiving transition is executed (relative S3 objects get tagged),
        // but before AWS actually transitions the S3 files to Glacier (changes S3 object storage class to Glacier).
        for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
            if (!StringUtils.equals(s3ObjectSummary.getStorageClass(), StorageClass.Glacier.toString())) {
                throw new IllegalArgumentException(String.format(
                        "S3 file \"%s\" is not archived (found %s storage class when expecting %s). S3 Bucket Name: \"%s\"",
                        s3ObjectSummary.getKey(), s3ObjectSummary.getStorageClass(),
                        StorageClass.Glacier.toString(), s3FileTransferRequestParamsDto.getS3BucketName()));
            }
        }

        // Set a list of files to restore.
        s3FileTransferRequestParamsDto.setFiles(storageFileHelper
                .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(actualS3Files)));

        // Initiate restore requests for the list of objects in the Glacier bucket.
        // TODO: Make "expirationInDays" value configurable with default value set to 99 years (36135 days).
        s3Service.restoreObjects(s3FileTransferRequestParamsDto, 36135,
                businessObjectDataRestoreDto.getArchiveRetrievalOption());
    } catch (RuntimeException e) {
        // Log the exception.
        LOGGER.error(
                "Failed to initiate a restore request for the business object data. businessObjectDataKey={}",
                jsonHelper.objectToJson(businessObjectDataRestoreDto.getBusinessObjectDataKey()), e);

        // Update the DTO with the caught exception.
        businessObjectDataRestoreDto.setException(e);
    }
}

From source file:org.finra.herd.service.impl.ExpireRestoredBusinessObjectDataHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps required to expire business object data.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to expire business object data
 *///w  w w .  j  a v  a  2 s  .  c  o  m
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
            .getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto
            .setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files,
            businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to expire by selection only objects that have Glacier storage class.
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass())) {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Set a list of files to expire.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper
            .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));

    // To expire the restored S3 objects, initiate restore requests with expiration set to 1 day.
    s3Service.restoreObjects(s3FileTransferRequestParamsDto, 1, null);
}