List of usage examples for com.amazonaws.services.s3.model SSEAlgorithm KMS
SSEAlgorithm KMS
To view the source code for com.amazonaws.services.s3.model SSEAlgorithm KMS.
Click Source Link
From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java
License:Apache License
public EncryptionMetadataBuilder createEncryptionMetadataBuilder() { return () -> { ObjectMetadata metadata = null;/*from ww w .jav a 2s .com*/ if (sseConfigs != null) { switch (sseConfigs.getEncryption()) { case NONE: metadata = null; break; case S3: metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); break; case KMS: metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, sseConfigs.getKmsKeyId().get()); metadata.setHeader("x-amz-server-side-encryption-context", sseConfigs.getEncryptionContext().entrySet().stream().collect( Collectors.toMap(e -> e.getKey(), e -> Caller.call(() -> e.getValue().get())))); break; case CUSTOMER: metadata = new ObjectMetadata(); metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, sseConfigs.getCustomerKey().get()); metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, sseConfigs.getCustomerKeyMd5().get()); break; default: throw new IllegalArgumentException( String.format("Invalid encryption option '%s'", sseConfigs.getEncryption())); } } return metadata; }; }
From source file:com.streamsets.pipeline.stage.destination.s3.FileHelper.java
License:Apache License
protected ObjectMetadata getObjectMetadata() throws StageException { ObjectMetadata metadata = null;//from ww w . j a va 2s.c o m if (s3TargetConfigBean.sseConfig.useSSE) { metadata = new ObjectMetadata(); switch (s3TargetConfigBean.sseConfig.encryption) { case S3: metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); break; case KMS: metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, s3TargetConfigBean.sseConfig.kmsKeyId.get()); if (!s3TargetConfigBean.sseConfig.encryptionContext.isEmpty()) { metadata.setHeader("x-amz-server-side-encryption-context", s3TargetConfigBean.sseConfig.resolveEncryptionContext()); } break; case CUSTOMER: metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, s3TargetConfigBean.sseConfig.customerKey.get()); metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, s3TargetConfigBean.sseConfig.customerKeyMd5.get()); break; default: throw new IllegalStateException( Utils.format("Unknown encryption option: ", s3TargetConfigBean.sseConfig.encryption)); } } return metadata; }
From source file:io.confluent.connect.s3.S3SinkConnectorConfig.java
License:Open Source License
public static ConfigDef newConfigDef() { ConfigDef configDef = StorageSinkConnectorConfig.newConfigDef(FORMAT_CLASS_RECOMMENDER, AVRO_COMPRESSION_RECOMMENDER); {// w w w . jav a 2s. c om final String group = "S3"; int orderInGroup = 0; configDef.define(S3_BUCKET_CONFIG, Type.STRING, Importance.HIGH, "The S3 Bucket.", group, ++orderInGroup, Width.LONG, "S3 Bucket"); configDef.define(REGION_CONFIG, Type.STRING, REGION_DEFAULT, new RegionValidator(), Importance.MEDIUM, "The AWS region to be used the connector.", group, ++orderInGroup, Width.LONG, "AWS region", new RegionRecommender()); configDef.define(PART_SIZE_CONFIG, Type.INT, PART_SIZE_DEFAULT, new PartRange(), Importance.HIGH, "The Part Size in S3 Multi-part Uploads.", group, ++orderInGroup, Width.LONG, "S3 Part Size"); configDef.define(CREDENTIALS_PROVIDER_CLASS_CONFIG, Type.CLASS, CREDENTIALS_PROVIDER_CLASS_DEFAULT, new CredentialsProviderValidator(), Importance.LOW, "Credentials provider or provider chain to use for authentication to AWS. By default " + "the connector uses 'DefaultAWSCredentialsProviderChain'.", group, ++orderInGroup, Width.LONG, "AWS Credentials Provider Class"); List<String> validSsea = new ArrayList<>(SSEAlgorithm.values().length + 1); validSsea.add(""); for (SSEAlgorithm algo : SSEAlgorithm.values()) { validSsea.add(algo.toString()); } configDef.define(SSEA_CONFIG, Type.STRING, SSEA_DEFAULT, ConfigDef.ValidString.in(validSsea.toArray(new String[validSsea.size()])), Importance.LOW, "The S3 Server Side Encryption Algorithm.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Algorithm", new SseAlgorithmRecommender()); configDef.define(SSE_CUSTOMER_KEY, Type.PASSWORD, SSE_CUSTOMER_KEY_DEFAULT, Importance.LOW, "The S3 Server Side Encryption Customer-Provided Key (SSE-C).", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Customer-Provided Key (SSE-C)"); configDef.define(SSE_KMS_KEY_ID_CONFIG, Type.STRING, SSE_KMS_KEY_ID_DEFAULT, Importance.LOW, "The name of the AWS Key Management Service (AWS-KMS) key to be used for server side " + "encryption of the S3 objects. No encryption is used when no key is provided, but" + " it is enabled when '" + SSEAlgorithm.KMS + "' is specified as encryption " + "algorithm with a valid key name.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Key", new SseKmsKeyIdRecommender()); configDef.define(ACL_CANNED_CONFIG, Type.STRING, ACL_CANNED_DEFAULT, new CannedAclValidator(), Importance.LOW, "An S3 canned ACL header value to apply when writing objects.", group, ++orderInGroup, Width.LONG, "S3 Canned ACL"); configDef.define(WAN_MODE_CONFIG, Type.BOOLEAN, WAN_MODE_DEFAULT, Importance.MEDIUM, "Use S3 accelerated endpoint.", group, ++orderInGroup, Width.LONG, "S3 accelerated endpoint enabled"); configDef.define(COMPRESSION_TYPE_CONFIG, Type.STRING, COMPRESSION_TYPE_DEFAULT, new CompressionTypeValidator(), Importance.LOW, "Compression type for file written to S3. " + "Applied when using JsonFormat or ByteArrayFormat. " + "Available values: none, gzip.", group, ++orderInGroup, Width.LONG, "Compression type"); configDef.define(S3_PART_RETRIES_CONFIG, Type.INT, S3_PART_RETRIES_DEFAULT, atLeast(0), Importance.MEDIUM, "Maximum number of retry attempts for failed requests. Zero means no retries. " + "The actual number of attempts is determined by the S3 client based on multiple " + "factors, including, but not limited to - " + "the value of this parameter, type of exception occurred, " + "throttling settings of the underlying S3 client, etc.", group, ++orderInGroup, Width.LONG, "S3 Part Upload Retries"); configDef.define(S3_RETRY_BACKOFF_CONFIG, Type.LONG, S3_RETRY_BACKOFF_DEFAULT, atLeast(0L), Importance.LOW, "How long to wait in milliseconds before attempting the first retry " + "of a failed S3 request. Upon a failure, this connector may wait up to twice as " + "long as the previous wait, up to the maximum number of retries. " + "This avoids retrying in a tight loop under failure scenarios.", group, ++orderInGroup, Width.SHORT, "Retry Backoff (ms)"); configDef.define(FORMAT_BYTEARRAY_EXTENSION_CONFIG, Type.STRING, FORMAT_BYTEARRAY_EXTENSION_DEFAULT, Importance.LOW, String.format("Output file extension for ByteArrayFormat. Defaults to '%s'", FORMAT_BYTEARRAY_EXTENSION_DEFAULT), group, ++orderInGroup, Width.LONG, "Output file extension for ByteArrayFormat"); configDef.define(FORMAT_BYTEARRAY_LINE_SEPARATOR_CONFIG, Type.STRING, // Because ConfigKey automatically trims strings, we cannot set // the default here and instead inject null; // the default is applied in getFormatByteArrayLineSeparator(). null, Importance.LOW, "String inserted between records for ByteArrayFormat. " + "Defaults to 'System.lineSeparator()' " + "and may contain escape sequences like '\\n'. " + "An input record that contains the line separator will look like " + "multiple records in the output S3 object.", group, ++orderInGroup, Width.LONG, "Line separator ByteArrayFormat"); configDef.define(S3_PROXY_URL_CONFIG, Type.STRING, S3_PROXY_URL_DEFAULT, Importance.LOW, "S3 Proxy settings encoded in URL syntax. This property is meant to be used only if you" + " need to access S3 through a proxy.", group, ++orderInGroup, Width.LONG, "S3 Proxy Settings"); configDef.define(S3_PROXY_USER_CONFIG, Type.STRING, S3_PROXY_USER_DEFAULT, Importance.LOW, "S3 Proxy User. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_USER_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy User"); configDef.define(S3_PROXY_PASS_CONFIG, Type.PASSWORD, S3_PROXY_PASS_DEFAULT, Importance.LOW, "S3 Proxy Password. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_PASS_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy Password"); configDef.define(HEADERS_USE_EXPECT_CONTINUE_CONFIG, Type.BOOLEAN, HEADERS_USE_EXPECT_CONTINUE_DEFAULT, Importance.LOW, "Enable/disable use of the HTTP/1.1 handshake using EXPECT: 100-CONTINUE during " + "multi-part upload. If true, the client will wait for a 100 (CONTINUE) response " + "before sending the request body. Else, the client uploads the entire request " + "body without checking if the server is willing to accept the request.", group, ++orderInGroup, Width.SHORT, "S3 HTTP Send Uses Expect Continue"); } return configDef; }
From source file:io.confluent.connect.s3.storage.S3OutputStream.java
License:Open Source License
private MultipartUpload newMultipartUpload() throws IOException { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key, newObjectMetadata()).withCannedACL(cannedAcl); if (SSEAlgorithm.KMS.toString().equalsIgnoreCase(ssea) && StringUtils.isNotBlank(sseKmsKeyId)) { initRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsKeyId)); } else if (sseCustomerKey != null) { initRequest.setSSECustomerKey(sseCustomerKey); }//from www . ja va 2 s .c o m try { return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId()); } catch (AmazonClientException e) { // TODO: elaborate on the exception interpretation. If this is an AmazonServiceException, // there's more info to be extracted. throw new IOException("Unable to initiate MultipartUpload: " + e, e); } }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
/** * Prepares the object metadata for server side encryption and reduced redundancy storage. * * @param params the parameters./*from www .ja va2 s . c o m*/ * @param metadata the metadata to prepare. */ private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata) { // Set the server side encryption if (params.getKmsKeyId() != null) { /* * TODO Use proper way to set KMS once AWS provides a way. * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID. * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is * correctly uploaded. * To get around this, a system property defined at * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set. */ metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim()); } else { metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); } // If specified, set the metadata to use RRS. if (Boolean.TRUE.equals(params.isUseRrs())) { // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only // way to do it. However, setHeader() is flagged as For Internal Use Only metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString()); } }
From source file:org.icgc.dcc.storage.client.transport.StorageService.java
License:Open Source License
public void uploadPart(DataChannel channel, Part part, String objectId, String uploadId) throws IOException { retry.execute(new RetryCallback<Void, IOException>() { @Override/*w ww. jav a2 s. com*/ public Void doWithRetry(RetryContext ctx) throws IOException { log.debug("Upload Part URL: {}", part.getUrl()); final RequestCallback callback = request -> { HttpHeaders requestHeader = request.getHeaders(); requestHeader.setContentLength(channel.getLength()); try (OutputStream os = request.getBody()) { channel.writeTo(os); } }; final ResponseExtractor<HttpHeaders> headersExtractor = response -> response.getHeaders(); try { HttpHeaders headers = dataTemplate.execute(new URI(part.getUrl()), HttpMethod.PUT, callback, headersExtractor); try { finalizeUploadPart(objectId, uploadId, part.getPartNumber(), channel.getMd5(), cleanUpETag(headers.getETag()), disableChecksum(headers)); } catch (NotRetryableException e) { log.warn("Checksum failed for part #{}, MD5={}, ETAG={} : {}", part, channel.getMd5(), headers.getETag(), e); throw new RetryableException(e); } } catch (NotResumableException | NotRetryableException e) { log.error("Could not proceed. Failed to send part for part number: {}", part.getPartNumber(), e); throw e; } catch (Throwable e) { log.warn("Failed to send part for part #{} : {}", part.getPartNumber(), e); channel.reset(); throw new RetryableException(e); } return null; } private boolean disableChecksum(HttpHeaders headers) { List<String> encryption = headers.get(Headers.SERVER_SIDE_ENCRYPTION); if (encryption != null && !encryption.isEmpty()) { return encryption.contains(SSEAlgorithm.KMS.getAlgorithm()); } return false; } }); }