Example usage for com.amazonaws.services.s3.model S3Object getKey

List of usage examples for com.amazonaws.services.s3.model S3Object getKey

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3Object getKey.

Prototype

public String getKey() 

Source Link

Document

Gets the key under which this object is stored.

Usage

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static File getObject(String key) {
    connect();/*from www  .j a  v a 2 s .c  o m*/

    try {
        log.info("[S3Helper][getObject] Downloading an object");

        S3Object s3object = s3Client
                .getObject(new GetObjectRequest(S3Properties.getInstance().getBucketName(), key));
        byte[] contentBytes = IOUtils.toByteArray(s3object.getObjectContent());

        log.info("Content-Type: " + s3object.getObjectMetadata().getContentType());

        File aux = new File();
        aux.setPath(s3object.getKey());
        aux.setIsFile(true);
        aux.setContent(new String(Base64.encodeBase64String(contentBytes)));

        return aux;
    } catch (AmazonServiceException ase) {
        log.error(
                "[S3Helper][getObject] Caught an AmazonServiceException, which" + " means your request made it "
                        + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][getObject] Caught an AmazonClientException, which means"
                + " the client encountered " + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    } catch (IOException e) {
        log.error("[S3Helper][getObject] Error: " + e);
    }

    return null;
}

From source file:com.emc.vipr.s3.sample._02_ReadObject.java

License:Open Source License

public static void main(String[] args) throws Exception {
    // create the ViPR S3 Client
    ViPRS3Client s3 = ViPRS3Factory.getS3Client();

    // retrieve the key value from user
    System.out.println("Enter the object key:");
    String key = new BufferedReader(new InputStreamReader(System.in)).readLine();

    // read the object from the demo bucket
    S3Object object = s3.getObject(ViPRS3Factory.S3_BUCKET, key);

    // convert object to a text string
    BufferedReader reader = new BufferedReader(new InputStreamReader(object.getObjectContent()));
    String content = reader.readLine();

    // print object key/value and content for validation
    System.out.println(//from www .ja  v  a2  s. c  om
            String.format("object [%s/%s] content: [%s]", object.getBucketName(), object.getKey(), content));
}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added// w  w  w .ja v a 2 s  . com
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//from ww  w.j  a v a 2  s .c o m
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.netflix.exhibitor.core.backup.s3.MockS3Client.java

License:Apache License

public MockS3Client(S3Object object, ObjectListing listing) {
    if (object != null) {
        S3Object value = new S3Object();
        value.setKey(object.getKey());
        value.setObjectMetadata(object.getObjectMetadata());
        value.setObjectContent(object.getObjectContent());
        uploads.put(object.getKey(), value);
    }/*from  w w  w  . jav  a2s .c o  m*/
    this.listing = listing;
}

From source file:com.nike.cerberus.operation.core.EnableConfigReplicationOperation.java

License:Apache License

private void touchCurrentFiles() {
    final String bucketName = environmentMetadata.getBucketName();
    final ObjectListing objectListing = s3Client.listObjects(bucketName);

    logger.info("Touching config files that already exist so they are replicated.");
    objectListing.getObjectSummaries().forEach(os -> {
        if (!StringUtils.startsWith(os.getKey(), "consul")) {
            logger.debug("Touching {}.", os.getKey());
            final S3Object object = s3Client.getObject(bucketName, os.getKey());
            s3Client.putObject(bucketName, object.getKey(), object.getObjectContent(),
                    object.getObjectMetadata());
        }/*  w  w w.  jav  a 2 s. c o m*/
    });
}

From source file:com.smoketurner.pipeline.application.core.AmazonS3Downloader.java

License:Apache License

/**
 * Retrieves a file from S3/*from  w  w  w.jav  a2s. co  m*/
 *
 * @param record
 *            S3 event notification record to download
 * @return S3 object
 * @throws AmazonS3ConstraintException
 *             if the etag constraints weren't met
 * @throws AmazonS3ZeroSizeException
 *             if the file size of the object is zero
 */
public S3Object fetch(@Nonnull final S3EventNotificationRecord record)
        throws AmazonS3ConstraintException, AmazonS3ZeroSizeException {
    final AmazonS3Object object = converter.convert(Objects.requireNonNull(record));

    final GetObjectRequest request = new GetObjectRequest(object.getBucketName(), object.getKey());
    object.getVersionId().ifPresent(request::setVersionId);
    object.getETag().ifPresent(etag -> request.setMatchingETagConstraints(Collections.singletonList(etag)));

    LOGGER.debug("Fetching key: {}/{}", object.getBucketName(), object.getKey());

    final S3Object download;
    try {
        download = s3.getObject(request);
    } catch (AmazonServiceException e) {
        LOGGER.error("Service error while fetching object from S3", e);
        throw e;
    } catch (AmazonClientException e) {
        LOGGER.error("Client error while fetching object from S3", e);
        throw e;
    }

    if (download == null) {
        LOGGER.error("eTag from object did not match for key: {}/{}", object.getBucketName(), object.getKey());
        throw new AmazonS3ConstraintException(object.getKey());
    }

    final long contentLength = download.getObjectMetadata().getContentLength();
    if (contentLength < 1) {
        try {
            download.close();
        } catch (IOException e) {
            LOGGER.error(String.format("Failed to close S3 stream for key: %s/%s", download.getBucketName(),
                    download.getKey()), e);
        }

        LOGGER.debug("Object size is zero for key: {}/{}", download.getBucketName(), download.getKey());
        throw new AmazonS3ZeroSizeException(object.getKey());
    }

    LOGGER.debug("Streaming key ({} bytes): {}/{}", contentLength, download.getBucketName(), download.getKey());

    return download;
}

From source file:com.smoketurner.pipeline.application.core.AmazonS3Downloader.java

License:Apache License

/**
 * Determine whether the object is gzipped or not by inspecting the
 * ContentEncoding object property or whether the key ends in .gz
 * /*www.  j  ava 2s .  c  o m*/
 * @param object
 *            S3 object to inspect
 * @return true if the file is gzipped, otherwise false
 */
public static boolean isGZipped(@Nullable final S3Object object) {
    if (object == null) {
        return false;
    }

    final String encoding = Strings.nullToEmpty(object.getObjectMetadata().getContentEncoding());
    if (GZIP_ENCODING.equalsIgnoreCase(encoding.trim())) {
        return true;
    }

    final String key = Strings.nullToEmpty(object.getKey());
    return key.trim().toLowerCase().endsWith(GZIP_EXTENSION);
}

From source file:com.smoketurner.pipeline.application.core.MessageProcessor.java

License:Apache License

/**
 * Process an S3 event notification record by streaming object in
 * {@link streamObject}//from  ww w. j a va 2 s  .  co  m
 * 
 * @param record
 *            S3 event notification record
 * @return true if the record was fully processed, otherwise false
 */
private boolean processRecord(@Nonnull final S3EventNotificationRecord record) {
    LOGGER.trace("Event Record: {}", record);

    final S3Object download;
    try {
        download = s3.fetch(record);
    } catch (AmazonS3ConstraintException | AmazonS3ZeroSizeException e) {
        LOGGER.error("Unable to download file from S3, skipping to next record", e);
        return true;
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == 404) {
            LOGGER.warn("File does not exist in S3, skipping to next record", e);
            return true;
        }
        LOGGER.error("Amazon S3 exception, skipping remaining records", e);
        return false;
    } catch (Exception e) {
        LOGGER.error("Failed to download file from S3, skipping remaining records", e);
        return false;
    }

    final int eventCount;
    try {
        eventCount = streamObject(download);
    } catch (IOException e) {
        LOGGER.error(String.format("Error streaming key: %s/%s", download.getBucketName(), download.getKey()),
                e);
        return false;
    }

    eventCounts.update(eventCount);

    LOGGER.debug("Broadcast {} events from key: {}/{}", eventCount, download.getBucketName(),
            download.getKey());
    return true;
}

From source file:com.smoketurner.pipeline.application.core.MessageProcessor.java

License:Apache License

/**
 * Stream an {@link S3Object} object and process each line with the
 * processor./* w  w w.j  a v a2  s.co m*/
 * 
 * @param object
 *            S3Object to download and process
 * @return number of events processed
 * @throws IOException
 *             if unable to stream the object
 */
private int streamObject(@Nonnull final S3Object object) throws IOException {

    final AtomicInteger eventCount = new AtomicInteger(0);
    try (S3ObjectInputStream input = object.getObjectContent()) {

        final BufferedReader reader;
        if (AmazonS3Downloader.isGZipped(object)) {
            reader = new BufferedReader(
                    new InputStreamReader(new StreamingGZIPInputStream(input), StandardCharsets.UTF_8));
        } else {
            reader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8));
        }

        // failed will be true if we did not successfully broadcast all
        // of the events because of no consumers
        final boolean failed = reader.lines().peek(event -> eventCount.incrementAndGet())
                .anyMatch(broadcaster::test);

        if (failed) {
            // abort the current S3 download
            input.abort();
            LOGGER.error("Partial events broadcast ({} sent) from key: {}/{}", eventCount.get(),
                    object.getBucketName(), object.getKey());
            throw new IOException("aborting download");
        }
    }
    return eventCount.get();
}