Example usage for com.amazonaws.services.s3.model StorageClass fromValue

List of usage examples for com.amazonaws.services.s3.model StorageClass fromValue

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model StorageClass fromValue.

Prototype

public static StorageClass fromValue(String s3StorageClassString) throws IllegalArgumentException 

Source Link

Document

Returns the Amazon S3 StorageClass enumeration value representing the specified Amazon S3 StorageClass ID string.

Usage

From source file:com.liferay.portal.store.s3.S3Store.java

License:Open Source License

@Activate
protected void activate(Map<String, Object> properties) {
    _s3StoreConfiguration = ConfigurableUtil.createConfigurable(S3StoreConfiguration.class, properties);

    _awsCredentialsProvider = getAWSCredentialsProvider();

    _amazonS3 = getAmazonS3(_awsCredentialsProvider);

    _bucketName = _s3StoreConfiguration.bucketName();
    _transferManager = getTransferManager(_amazonS3);

    try {//w w  w.j  av  a 2 s .  co m
        _storageClass = StorageClass.fromValue(_s3StoreConfiguration.s3StorageClass());
    } catch (IllegalArgumentException iae) {
        _storageClass = StorageClass.Standard;

        if (_log.isWarnEnabled()) {
            _log.warn(_s3StoreConfiguration.s3StorageClass() + " is not a valid value for the storage class",
                    iae);
        }
    }
}

From source file:com.upplication.s3fs.util.S3UploadRequest.java

License:Open Source License

public S3UploadRequest setStorageClass(String storageClass) {
    if (storageClass == null)
        return this;

    try {// www .  j  a v a  2  s  . co m
        setStorageClass(StorageClass.fromValue(storageClass));
    } catch (IllegalArgumentException e) {
        log.warn("Not a valid AWS S3 storage class: `{}` -- Using default", storageClass);
    }
    return this;
}

From source file:gov.cdc.sdp.cbr.aphl.AphlS3Producer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;/*from w  w w  . j a v  a  2  s .c om*/
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        LOG.error("aphl-s3: MultiPart upload requires a File input.");
        throw new InvalidArgumentException("aphl-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    LOG.trace("Initiating multipart upload ...");

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception exception) {
        LOG.error("Multi-part upload failed, aborting", exception);
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw exception;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:io.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location./*from   w  w  w .  j a v a2s.  co m*/
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        if (listResult.getKeyCount() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:io.konig.camel.aws.s3.DeleteObjectProducer.java

License:Apache License

public void processMultiPart(final Exchange exchange) throws Exception {
    File filePayload = null;//  www  . j  a  va 2  s.c  o m
    Object obj = exchange.getIn().getMandatoryBody();
    // Need to check if the message body is WrappedFile
    if (obj instanceof WrappedFile) {
        obj = ((WrappedFile<?>) obj).getFile();
    }
    if (obj instanceof File) {
        filePayload = (File) obj;
    } else {
        throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
    }

    ObjectMetadata objectMetadata = determineMetadata(exchange);
    if (objectMetadata.getContentLength() == 0) {
        objectMetadata.setContentLength(filePayload.length());
    }

    final String keyName = determineKey(exchange);
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
            getConfiguration().getBucketName(), keyName, objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        initRequest.setStorageClass(StorageClass.fromValue(storageClass));
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        initRequest.setCannedACL(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will
        // be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        initRequest.setAccessControlList(acl);
    }

    if (getConfiguration().isUseAwsKMS()) {
        SSEAwsKeyManagementParams keyManagementParams;
        if (ObjectHelper.isNotEmpty(getConfiguration().getAwsKMSKeyId())) {
            keyManagementParams = new SSEAwsKeyManagementParams(getConfiguration().getAwsKMSKeyId());
        } else {
            keyManagementParams = new SSEAwsKeyManagementParams();
        }
        initRequest.setSSEAwsKeyManagementParams(keyManagementParams);
    }

    LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);

    final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client()
            .initiateMultipartUpload(initRequest);
    final long contentLength = objectMetadata.getContentLength();
    final List<PartETag> partETags = new ArrayList<PartETag>();
    long partSize = getConfiguration().getPartSize();
    CompleteMultipartUploadResult uploadResult = null;

    long filePosition = 0;

    try {
        for (int part = 1; filePosition < contentLength; part++) {
            partSize = Math.min(partSize, contentLength - filePosition);

            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(getConfiguration().getBucketName()).withKey(keyName)
                    .withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition)
                    .withFile(filePayload).withPartSize(partSize);

            LOG.trace("Uploading part [{}] for {}", part, keyName);
            partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);

        uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);

    } catch (Exception e) {
        getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(
                getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
        throw e;
    }

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
    if (uploadResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.apache.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location.//from ww  w.  ja v a  2s. co  m
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        // Using getObjectSummaries().size() instead of getKeyCount as, in some cases
        // it is observed that even though the getObjectSummaries returns some data
        // keyCount is still zero.
        if (listResult.getObjectSummaries().size() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:org.duracloud.s3task.storage.SetStoragePolicyTaskRunner.java

License:Apache License

@Override
public String performTask(String taskParameters) {
    SetStoragePolicyTaskParameters taskParams = SetStoragePolicyTaskParameters.deserialize(taskParameters);

    // Get parameters
    String spaceId = taskParams.getSpaceId();
    int daysToTransition = taskParams.getDaysToTransition();
    StorageClass storageClass;// w w w.  j  a  v  a2 s  .  com
    try {
        storageClass = StorageClass.fromValue(taskParams.getStorageClass());
    } catch (IllegalArgumentException e) {
        throw new RuntimeException(
                "Cannot set storage policy due to invalid " + "storage class. The valid storage class "
                        + "options are: " + Arrays.asList(StorageClass.values()));
    }

    // Will throw if bucket does not exist
    String bucketName = unwrappedS3Provider.getBucketName(spaceId);

    // Set bucket lifecycle policy
    StoragePolicy storagePolicy = new StoragePolicy(storageClass, daysToTransition);
    unwrappedS3Provider.setSpaceLifecycle(bucketName, storagePolicy.getBucketLifecycleConfig());

    return "Successfully set storage policy on space " + spaceId + " to move content to " + storageClass.name()
            + " after " + daysToTransition + " days";
}

From source file:org.elasticsearch.repositories.s3.S3BlobStore.java

License:Apache License

public static StorageClass initStorageClass(String storageClass) {
    if (storageClass == null || storageClass.equals("")) {
        return StorageClass.Standard;
    }//from   ww  w  .  j  a v  a 2  s . c om

    try {
        StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
        if (_storageClass.equals(StorageClass.Glacier)) {
            throw new BlobStoreException("Glacier storage class is not supported");
        }

        return _storageClass;
    } catch (IllegalArgumentException illegalArgumentException) {
        throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
    }
}