Example usage for com.amazonaws.services.kinesis AmazonKinesisClient putRecords

List of usage examples for com.amazonaws.services.kinesis AmazonKinesisClient putRecords

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis AmazonKinesisClient putRecords.

Prototype

@Override
public PutRecordsResult putRecords(PutRecordsRequest request) 

Source Link

Document

Writes multiple data records into a Kinesis data stream in a single call (also referred to as a PutRecords request).

Usage

From source file:org.apache.beam.sdk.io.kinesis.KinesisUploader.java

License:Apache License

public static void uploadAll(List<String> data, KinesisTestOptions options) {
    AmazonKinesisClient client = new AmazonKinesisClient(new StaticCredentialsProvider(
            new BasicAWSCredentials(options.getAwsAccessKey(), options.getAwsSecretKey())))
                    .withRegion(Regions.fromName(options.getAwsKinesisRegion()));

    List<List<String>> partitions = Lists.partition(data, MAX_NUMBER_OF_RECORDS_IN_BATCH);

    for (List<String> partition : partitions) {
        List<PutRecordsRequestEntry> allRecords = newArrayList();
        for (String row : partition) {
            allRecords.add(new PutRecordsRequestEntry().withData(ByteBuffer.wrap(row.getBytes(Charsets.UTF_8)))
                    .withPartitionKey(Integer.toString(row.hashCode()))

            );/*from   www.  jav a2s  .  co m*/
        }

        PutRecordsResult result;
        do {
            result = client.putRecords(new PutRecordsRequest().withStreamName(options.getAwsKinesisStream())
                    .withRecords(allRecords));
            List<PutRecordsRequestEntry> failedRecords = newArrayList();
            int i = 0;
            for (PutRecordsResultEntry row : result.getRecords()) {
                if (row.getErrorCode() != null) {
                    failedRecords.add(allRecords.get(i));
                }
                ++i;
            }
            allRecords = failedRecords;
        }

        while (result.getFailedRecordCount() > 0);
    }
}

From source file:org.apache.nifi.processors.aws.kinesis.stream.PutKinesisStream.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B)
            .longValue();/* w w w .  ja  v a  2  s  .  c om*/

    List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes,
            AWS_KINESIS_ERROR_MESSAGE);

    HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
    HashMap<String, List<PutRecordsRequestEntry>> recordHash = new HashMap<String, List<PutRecordsRequestEntry>>();

    final AmazonKinesisClient client = getClient();

    try {

        List<FlowFile> failedFlowFiles = new ArrayList<>();
        List<FlowFile> successfulFlowFiles = new ArrayList<>();

        // Prepare batch of records
        for (int i = 0; i < flowFiles.size(); i++) {
            FlowFile flowFile = flowFiles.get(i);

            String streamName = context.getProperty(KINESIS_STREAM_NAME).evaluateAttributeExpressions(flowFile)
                    .getValue();
            ;

            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
            session.exportTo(flowFile, baos);
            PutRecordsRequestEntry record = new PutRecordsRequestEntry()
                    .withData(ByteBuffer.wrap(baos.toByteArray()));

            String partitionKey = context.getProperty(PutKinesisStream.KINESIS_PARTITION_KEY)
                    .evaluateAttributeExpressions(flowFiles.get(i)).getValue();

            if (StringUtils.isBlank(partitionKey) == false) {
                record.setPartitionKey(partitionKey);
            } else {
                record.setPartitionKey(Integer.toString(randomParitionKeyGenerator.nextInt()));
            }

            if (recordHash.containsKey(streamName) == false) {
                recordHash.put(streamName, new ArrayList<>());
            }
            if (hashFlowFiles.containsKey(streamName) == false) {
                hashFlowFiles.put(streamName, new ArrayList<>());
            }

            hashFlowFiles.get(streamName).add(flowFile);
            recordHash.get(streamName).add(record);
        }

        for (Map.Entry<String, List<PutRecordsRequestEntry>> entryRecord : recordHash.entrySet()) {
            String streamName = entryRecord.getKey();
            List<PutRecordsRequestEntry> records = entryRecord.getValue();

            if (records.size() > 0) {

                PutRecordsRequest putRecordRequest = new PutRecordsRequest();
                putRecordRequest.setStreamName(streamName);
                putRecordRequest.setRecords(records);
                PutRecordsResult results = client.putRecords(putRecordRequest);

                List<PutRecordsResultEntry> responseEntries = results.getRecords();
                for (int i = 0; i < responseEntries.size(); i++) {
                    PutRecordsResultEntry entry = responseEntries.get(i);
                    FlowFile flowFile = hashFlowFiles.get(streamName).get(i);

                    Map<String, String> attributes = new HashMap<>();
                    attributes.put(AWS_KINESIS_SHARD_ID, entry.getShardId());
                    attributes.put(AWS_KINESIS_SEQUENCE_NUMBER, entry.getSequenceNumber());

                    if (StringUtils.isBlank(entry.getErrorCode()) == false) {
                        attributes.put(AWS_KINESIS_ERROR_CODE, entry.getErrorCode());
                        attributes.put(AWS_KINESIS_ERROR_MESSAGE, entry.getErrorMessage());
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        failedFlowFiles.add(flowFile);
                    } else {
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        successfulFlowFiles.add(flowFile);
                    }
                }
            }
            recordHash.get(streamName).clear();
            records.clear();
        }

        if (failedFlowFiles.size() > 0) {
            session.transfer(failedFlowFiles, REL_FAILURE);
            getLogger().error("Failed to publish to kinesis records {}", new Object[] { failedFlowFiles });
        }
        if (successfulFlowFiles.size() > 0) {
            session.transfer(successfulFlowFiles, REL_SUCCESS);
            getLogger().debug("Successfully published to kinesis records {}",
                    new Object[] { successfulFlowFiles });
        }

    } catch (final Exception exception) {
        getLogger().error("Failed to publish due to exception {} flowfiles {} ",
                new Object[] { exception, flowFiles });
        session.transfer(flowFiles, REL_FAILURE);
        context.yield();
    }
}