Example usage for com.amazonaws.services.s3.model StorageClass Standard

List of usage examples for com.amazonaws.services.s3.model StorageClass Standard

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model StorageClass Standard.

Prototype

StorageClass Standard

To view the source code for com.amazonaws.services.s3.model StorageClass Standard.

Click Source Link

Document

The default Amazon S3 storage class.

Usage

From source file:com.liferay.portal.store.s3.S3Store.java

License:Open Source License

@Activate
protected void activate(Map<String, Object> properties) {
    _s3StoreConfiguration = ConfigurableUtil.createConfigurable(S3StoreConfiguration.class, properties);

    _awsCredentialsProvider = getAWSCredentialsProvider();

    _amazonS3 = getAmazonS3(_awsCredentialsProvider);

    _bucketName = _s3StoreConfiguration.bucketName();
    _transferManager = getTransferManager(_amazonS3);

    try {/*from   w  w w  . j a  v a 2 s. co m*/
        _storageClass = StorageClass.fromValue(_s3StoreConfiguration.s3StorageClass());
    } catch (IllegalArgumentException iae) {
        _storageClass = StorageClass.Standard;

        if (_log.isWarnEnabled()) {
            _log.warn(_s3StoreConfiguration.s3StorageClass() + " is not a valid value for the storage class",
                    iae);
        }
    }
}

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * This method uploads a file with a length greater than PART_SIZE (5Mb).
 * /*ww w . ja  va  2s .c  o  m*/
 * @param file to be stored within the storage.
 * @param path is the path for this new file within the root.
 * @throws UniversalIOException when a specific IO error occurs.
 */
private void uploadFile(File file, String path) throws UniversalIOException {
    // Create a list of UploadPartResponse objects. You get one of these
    // for each part upload.
    List<PartETag> partETags = new ArrayList<PartETag>();

    // Step 1: Initialize.
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(this.settings.getRoot(),
            file.getName());
    InitiateMultipartUploadResult initResponse = this.s3client.initiateMultipartUpload(initRequest);

    long contentLength = file.length();
    long partSize = PART_SIZE; // Set part size to 5 MB.

    ObjectMetadata objectMetadata = new ObjectMetadata();
    if (this.settings.getEncryption()) {
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }

    List<Tag> tags = new ArrayList<Tag>();
    for (String key : this.settings.getTags().keySet()) {
        tags.add(new Tag(key, this.settings.getTags().get(key)));
    }

    try {
        this.triggerOnStoreFileListeners();
        // Step 2: Upload parts.
        long filePosition = 0;
        for (int i = 1; filePosition < contentLength; i++) {
            // Last part can be less than 5 MB. Adjust part size.
            partSize = Math.min(partSize, (contentLength - filePosition));

            // Create request to upload a part.
            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)))
                    .withKey(file.getName()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withFileOffset(filePosition).withFile(file).withObjectMetadata(objectMetadata)
                    .withPartSize(partSize);

            // Upload part and add response to our list.
            partETags.add(this.s3client.uploadPart(uploadRequest).getPartETag());

            filePosition += partSize;
        }

        // Step 3: Complete.
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(),
                initResponse.getUploadId(), partETags);

        CompleteMultipartUploadResult result = this.s3client.completeMultipartUpload(compRequest);

        StorageClass storageClass = getStorageClass();
        if (storageClass != StorageClass.Standard) {
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(this.settings.getRoot(), file.getName(),
                    this.settings.getRoot(), file.getName()).withStorageClass(storageClass);

            this.s3client.copyObject(copyObjectRequest);
        }

        if (!tags.isEmpty()) {
            this.s3client.setObjectTagging(new SetObjectTaggingRequest(this.settings.getRoot(), file.getName(),
                    new ObjectTagging(tags)));
        }

        this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(),
                PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/"
                        + file.getName(),
                result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))));
    } catch (Exception e) {
        this.s3client.abortMultipartUpload(new AbortMultipartUploadRequest(this.settings.getRoot(),
                file.getName(), initResponse.getUploadId()));

        UniversalIOException error = new UniversalIOException(e.getMessage());
        this.triggerOnErrorListeners(error);
        throw error;
    }
}

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * Gets the enum from StorageClass according to the storage class from the settings.
 *//* w  w w  .j a  v a2 s . c om*/
private StorageClass getStorageClass() {
    String sc = this.settings.getStorageClass();
    if ("REDUCED_REDUNDANCY".equals(sc)) {
        return StorageClass.ReducedRedundancy;
    } else if ("STANDARD_IA".equals(sc)) {
        return StorageClass.StandardInfrequentAccess;
    } else if ("STANDARD".equals(sc)) {
        return StorageClass.Standard;
    }

    return StorageClass.Standard;
}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

/**
 * {@inheritDoc}//from   w  w w  .j  ava2  s. c  om
 */
@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

private String initiateMultipartUpload() throws IOException {

    boolean operationSuccessful = false;
    final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
    if (this.useRRS) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    } else {//from w w w.  j  a  v  a2  s  .  com
        request.setStorageClass(StorageClass.Standard);
    }

    try {

        final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
        operationSuccessful = true;
        return result.getUploadId();

    } catch (AmazonServiceException e) {
        throw new IOException(StringUtils.stringifyException(e));
    } finally {
        if (!operationSuccessful) {
            abortUpload();
        }
    }
}

From source file:eu.stratosphere.runtime.fs.s3.S3DataOutputStream.java

License:Apache License

@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }/*from w  w  w .j  ava2s . co m*/

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}

From source file:org.alanwilliamson.amazon.AmazonKey.java

License:Open Source License

public StorageClass getAmazonStorageClass(String storage) {
    if (storage == null)
        return StorageClass.Standard;
    else if (storage.equalsIgnoreCase("standard"))
        return StorageClass.Standard;
    else if (storage.toLowerCase().startsWith("reduced"))
        return StorageClass.ReducedRedundancy;
    else/*from  w  w  w .j  a v a 2 s . c  o  m*/
        return StorageClass.Standard;
}

From source file:org.elasticsearch.repositories.s3.S3BlobStore.java

License:Apache License

public static StorageClass initStorageClass(String storageClass) {
    if (storageClass == null || storageClass.equals("")) {
        return StorageClass.Standard;
    }//from ww w. jav a 2  s  .co m

    try {
        StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
        if (_storageClass.equals(StorageClass.Glacier)) {
            throw new BlobStoreException("Glacier storage class is not supported");
        }

        return _storageClass;
    } catch (IllegalArgumentException illegalArgumentException) {
        throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
    }
}