Example usage for com.amazonaws.services.s3.model ObjectMetadata setHeader

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setHeader

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setHeader.

Prototype

public void setHeader(String key, Object value) 

Source Link

Document

For internal use only.

Usage

From source file:com.digitalpebble.stormcrawler.aws.s3.S3Cacher.java

License:Apache License

@Override
public void execute(Tuple tuple) {
    // stores the binary content on S3

    byte[] content = tuple.getBinaryByField("content");
    String url = tuple.getStringByField("url");
    final Metadata metadata = (Metadata) tuple.getValueByField("metadata");

    // If there is no content
    byte[] contentToCache = getContentToCache(metadata, content, url);
    if (contentToCache == null) {
        LOG.info("{} had no data to cache", url);
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);/*from  w ww. j  av a  2  s  .com*/
        return;
    }

    // already in the cache
    // don't need to recache it
    if (!shouldOverwrite(metadata)) {
        eventCounter.scope("already_in_cache").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    // normalises URL
    String key = "";
    try {
        key = URLEncoder.encode(url, "UTF-8");
    } catch (UnsupportedEncodingException e) {
        // ignore it - we know UTF-8 is valid
    }
    // check size of the key
    if (key.length() >= 1024) {
        LOG.info("Key too large : {}", key);
        eventCounter.scope("key_too_large").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    ByteArrayInputStream input = new ByteArrayInputStream(contentToCache);

    ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(contentToCache.length);
    md.setHeader("x-amz-storage-class", "STANDARD_IA");

    try {
        PutObjectResult result = client.putObject(bucketName, getKeyPrefix() + key, input, md);
        eventCounter.scope("cached").incr();
        // TODO check something with the result?
    } catch (AmazonS3Exception exception) {
        LOG.error("AmazonS3Exception while storing {}", url, exception);
        eventCounter.scope("s3_exception").incr();
    } finally {
        try {
            input.close();
        } catch (IOException e) {
            LOG.error("Error while closing ByteArrayInputStream", e);
        }
    }

    _collector.emit(tuple, new Values(url, content, metadata));
    // ack it no matter what
    _collector.ack(tuple);
}

From source file:com.metamug.mtg.s3.uploader.S3Uploader.java

public static String upload(InputStream inputStream, long fileSize, String URI) {
    String publicURL;//from w w w . j ava  2s  .co  m
    //ClientConfiguration max retry
    ObjectMetadata objectMetaData = new ObjectMetadata();
    objectMetaData.setContentLength(fileSize);
    //        objectMetaData.setContentType(IMAGE_CONTENT_TYPE);
    objectMetaData.setCacheControl("public");
    Calendar c = Calendar.getInstance();
    c.setTime(c.getTime());
    c.add(Calendar.MONTH, 6);
    String sdf = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss zzz").format(c.getTime());
    objectMetaData.setHeader("Expires", sdf);//Thu, 21 Mar 2042 08:16:32 GMT

    PutObjectResult por = s3Client
            .putObject(new PutObjectRequest(AWS_S3_BUCKET, URI, inputStream, objectMetaData)
                    .withCannedAcl(CannedAccessControlList.PublicRead));

    publicURL = "http://metamug.net/" + URI;
    return publicURL;
}

From source file:com.netflix.hollow.example.producer.infrastructure.S3Publisher.java

License:Apache License

@Override
public void publishSnapshot(File snapshotFile, long stateVersion) {
    String objectName = getS3ObjectName(blobNamespace, "snapshot", stateVersion);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.addUserMetadata("to_state", String.valueOf(stateVersion));
    metadata.setHeader("Content-Length", snapshotFile.length());

    uploadFile(snapshotFile, objectName, metadata);

    /// now we update the snapshot index
    updateSnapshotIndex(stateVersion);//from w  ww  .j a  v  a2  s.c  o m
}

From source file:com.netflix.hollow.example.producer.infrastructure.S3Publisher.java

License:Apache License

@Override
public void publishDelta(File deltaFile, long previousVersion, long currentVersion) {
    String objectName = getS3ObjectName(blobNamespace, "delta", previousVersion);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.addUserMetadata("from_state", String.valueOf(previousVersion));
    metadata.addUserMetadata("to_state", String.valueOf(currentVersion));
    metadata.setHeader("Content-Length", deltaFile.length());

    uploadFile(deltaFile, objectName, metadata);
}

From source file:com.netflix.hollow.example.producer.infrastructure.S3Publisher.java

License:Apache License

@Override
public void publishReverseDelta(File reverseDeltaFile, long previousVersion, long currentVersion) {
    String objectName = getS3ObjectName(blobNamespace, "reversedelta", currentVersion);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.addUserMetadata("from_state", String.valueOf(currentVersion));
    metadata.addUserMetadata("to_state", String.valueOf(previousVersion));
    metadata.setHeader("Content-Length", reverseDeltaFile.length());

    uploadFile(reverseDeltaFile, objectName, metadata);
}

From source file:com.netflix.hollow.example.producer.infrastructure.S3Publisher.java

License:Apache License

/**
 * Write a list of all of the state versions to S3.
 * @param newVersion//from w  w  w.j  a va2s  . co m
 */
private synchronized void updateSnapshotIndex(Long newVersion) {
    /// insert the new version into the list
    int idx = Collections.binarySearch(snapshotIndex, newVersion);
    int insertionPoint = Math.abs(idx) - 1;
    snapshotIndex.add(insertionPoint, newVersion);

    /// build a binary representation of the list -- gap encoded variable-length integers
    byte[] idxBytes = buidGapEncodedVarIntSnapshotIndex();

    /// indicate the Content-Length
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader("Content-Length", (long) idxBytes.length);

    /// upload the new file content.
    try (InputStream is = new ByteArrayInputStream(idxBytes)) {
        Upload upload = s3TransferManager.upload(bucketName, getSnapshotIndexObjectName(blobNamespace), is,
                metadata);

        upload.waitForCompletion();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:com.nike.cerberus.operation.dashboard.DashboardMetaDataProvider.java

License:Apache License

@Override
public void provideObjectMetadata(File file, ObjectMetadata metadata) {
    metadata.setHeader(CACHE_CONTROL, "private, no-cache, no-store, proxy-revalidate, no-transform");
    if (file.getName().endsWith(".html")) {
        metadata.setHeader(CONTENT_TYPE, "text/html; charset=UTF-8");
    }//from www .ja va2 s. co  m
}

From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java

License:Apache License

public EncryptionMetadataBuilder createEncryptionMetadataBuilder() {
    return () -> {
        ObjectMetadata metadata = null;
        if (sseConfigs != null) {
            switch (sseConfigs.getEncryption()) {
            case NONE:
                metadata = null;//from  w w w. j a  v a 2 s .com
                break;
            case S3:
                metadata = new ObjectMetadata();
                metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
                break;
            case KMS:
                metadata = new ObjectMetadata();
                metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
                metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
                        sseConfigs.getKmsKeyId().get());
                metadata.setHeader("x-amz-server-side-encryption-context",
                        sseConfigs.getEncryptionContext().entrySet().stream().collect(
                                Collectors.toMap(e -> e.getKey(), e -> Caller.call(() -> e.getValue().get()))));
                break;
            case CUSTOMER:
                metadata = new ObjectMetadata();
                metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
                metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
                        sseConfigs.getCustomerKey().get());
                metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
                        sseConfigs.getCustomerKeyMd5().get());
                break;
            default:
                throw new IllegalArgumentException(
                        String.format("Invalid encryption option '%s'", sseConfigs.getEncryption()));
            }
        }
        return metadata;
    };
}

From source file:com.streamsets.pipeline.stage.destination.s3.FileHelper.java

License:Apache License

protected ObjectMetadata getObjectMetadata() throws StageException {
    ObjectMetadata metadata = null;
    if (s3TargetConfigBean.sseConfig.useSSE) {
        metadata = new ObjectMetadata();
        switch (s3TargetConfigBean.sseConfig.encryption) {
        case S3://from   w ww . j a  v a 2s . co m
            metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
            break;
        case KMS:
            metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
            metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
                    s3TargetConfigBean.sseConfig.kmsKeyId.get());
            if (!s3TargetConfigBean.sseConfig.encryptionContext.isEmpty()) {
                metadata.setHeader("x-amz-server-side-encryption-context",
                        s3TargetConfigBean.sseConfig.resolveEncryptionContext());
            }
            break;
        case CUSTOMER:
            metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
            metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
                    s3TargetConfigBean.sseConfig.customerKey.get());
            metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
                    s3TargetConfigBean.sseConfig.customerKeyMd5.get());
            break;
        default:
            throw new IllegalStateException(
                    Utils.format("Unknown encryption option: ", s3TargetConfigBean.sseConfig.encryption));
        }
    }
    return metadata;
}

From source file:eu.openg.aws.s3.internal.FakeS3Object.java

License:Apache License

private void updateMetadata(ObjectMetadata metadata) {
    metadata.setHeader("Accept-Ranges", "bytes");
    metadata.setUserMetadata(serializeUserMetadata(metadata.getUserMetadata()));
}