Example usage for com.amazonaws.services.s3.model PutObjectRequest setStorageClass

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest setStorageClass

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest setStorageClass.

Prototype

public void setStorageClass(String storageClass) 

Source Link

Document

Sets the optional Amazon S3 storage class to use when storing the new object.

Usage

From source file:com.ALC.SC2BOAserver.aws.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/*  ww  w .j  a  v a 2 s. com*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(SC2BOAStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3Client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3Client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.amazon.aws.samplecode.travellog.aws.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/*from   w w w .j ava 2 s  . c  o m*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(TravelLogStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    //Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    //Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    //If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.awscrud.aws.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3// w  ww  .  j a  v  a 2 s.  c  om
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(AwscrudStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.erudika.para.storage.AWSFileStore.java

License:Apache License

@Override
public String store(String path, InputStream data) {
    if (StringUtils.startsWith(path, "/")) {
        path = path.substring(1);/*from  w  ww  .ja v a2 s .c  om*/
    }
    if (StringUtils.isBlank(path) || data == null) {
        return null;
    }
    int maxFileSizeMBytes = Config.getConfigInt("para.s3.max_filesize_mb", 10);
    try {
        if (data.available() > 0 && data.available() <= (maxFileSizeMBytes * 1024 * 1024)) {
            ObjectMetadata om = new ObjectMetadata();
            om.setCacheControl("max-age=15552000, must-revalidate"); // 180 days
            if (path.endsWith(".gz")) {
                om.setContentEncoding("gzip");
                path = path.substring(0, path.length() - 3);
            }
            path = System.currentTimeMillis() + "." + path;
            PutObjectRequest por = new PutObjectRequest(bucket, path, data, om);
            por.setCannedAcl(CannedAccessControlList.PublicRead);
            por.setStorageClass(StorageClass.ReducedRedundancy);
            s3.putObject(por);
            return Utils.formatMessage(baseUrl, Config.AWS_REGION, bucket, path);
        }
    } catch (IOException e) {
        logger.error(null, e);
    } finally {
        try {
            data.close();
        } catch (IOException ex) {
            logger.error(null, ex);
        }
    }
    return null;
}

From source file:com.kittypad.music.game.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/*www  .  ja va  2s  . c om*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(MusicItem obj, byte[] data, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    //checkForAndCreateBucket(this.bucketName);
    String key = obj.getUUID() + obj.getMusicName() + "." + obj.getType();
    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(mimeType);
    omd.setContentLength(obj.getSize());
    ByteArrayInputStream is = new ByteArrayInputStream(data);
    PutObjectRequest request = new PutObjectRequest(bucketName, key, is, omd);
    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }
    s3client.putObject(request);
    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(bucketName, key, acl);
    }
}

From source file:com.tfnsnproject.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/*  ww w.ja  v a2  s  . co  m*/
 *
 * @param obj               the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl               a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(S3StorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * This method uploads a file with a length lesser than PART_SIZE (5Mb).
 * // w  ww  . ja va2 s.  co m
 * @param file to be stored within the storage.
 * @param path is the path for this new file within the root.
 * @throws UniversalIOException when a specific IO error occurs.
 */
private void uploadTinyFile(File file, String path) throws UniversalIOException {
    try {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (this.settings.getEncryption()) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }

        List<Tag> tags = new ArrayList<Tag>();
        for (String key : this.settings.getTags().keySet()) {
            tags.add(new Tag(key, this.settings.getTags().get(key)));
        }

        PutObjectRequest request = new PutObjectRequest(
                this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(), file);
        request.setMetadata(objectMetadata);
        request.setTagging(new ObjectTagging(tags));
        request.setStorageClass(getStorageClass());
        this.triggerOnStoreFileListeners();

        PutObjectResult result = this.s3client.putObject(request);

        this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(),
                PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/"
                        + file.getName(),
                result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))));
    } catch (Exception e) {
        UniversalIOException error = new UniversalIOException(e.getMessage());
        this.triggerOnErrorListeners(error);
        throw error;
    }
}

From source file:com.upplication.s3fs.S3OutputStream.java

License:Open Source License

/**
 * Stores the given buffer using a single-part upload process
 *
 * @param contentLength/*www  . j a v a2s  .co m*/
 * @param content
 * @throws IOException
 */
private void putObject(final InputStream content, final long contentLength, byte[] checksum)
        throws IOException {

    final ObjectMetadata meta = metadata.clone();
    meta.setContentLength(contentLength);
    meta.setContentMD5(Base64.encodeAsString(checksum));

    final PutObjectRequest request = new PutObjectRequest(objectId.getBucket(), objectId.getKey(), content,
            meta);

    if (storageClass != null) {
        request.setStorageClass(storageClass);
    }

    try {
        s3.putObject(request);
    } catch (final AmazonClientException e) {
        throw new IOException("Failed to put data into Amazon S3 object", e);
    }
}

From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java

License:Apache License

/**
 * {@inheritDoc}/*from w  w w. j  a v  a  2  s  .  com*/
 */
@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}

From source file:eu.stratosphere.runtime.fs.s3.S3DataOutputStream.java

License:Apache License

@Override
public void close() throws IOException {

    if (this.uploadId == null) {
        // This is not a multipart upload

        // No data has been written
        if (this.bytesWritten == 0) {
            return;
        }/*from w w w. ja  v a 2s . co m*/

        final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten);
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(this.bytesWritten);

        final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om);
        if (this.useRRS) {
            por.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            por.setStorageClass(StorageClass.Standard);
        }

        try {
            this.s3Client.putObject(por);
        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }

        this.bytesWritten = 0;

    } else {

        if (this.bytesWritten > 0) {
            uploadPartAndFlushBuffer();
        }

        boolean operationSuccessful = false;
        try {
            final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket,
                    this.object, this.uploadId, this.partETags);
            this.s3Client.completeMultipartUpload(request);

            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
}