Example usage for com.amazonaws.services.kinesis.model PutRecordRequest PutRecordRequest

List of usage examples for com.amazonaws.services.kinesis.model PutRecordRequest PutRecordRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model PutRecordRequest PutRecordRequest.

Prototype

PutRecordRequest

Source Link

Usage

From source file:AmazonKinesisSample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();//from  w  w  w . ja  v  a2s  .  co m

    final String myStreamName = "myFirstStream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    kinesisClient.createStream(createStreamRequest);
    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 10; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }

    // Delete the stream.
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);

    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
}

From source file:kinesisAlertAnalysis.java

License:Open Source License

public static void main(String[] args) throws Exception {

    init();// www.  ja v  a 2s  . com

    final String myStreamName = "alertsStream";
    final Integer myStreamSize = 1;

    /*
     * The ProfileCredentialsProvider will return your [awsReilly]
     * credential profile by reading from the credentials file located at
     * (/Users/johnreilly/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("jreilly").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/Users/johnreilly/.aws/credentials), and is in valid format.", e);
    }

    // Describe the stream and check if it exists.
    DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest().withStreamName(myStreamName);
    try {
        StreamDescription streamDescription = kinesis.describeStream(describeStreamRequest)
                .getStreamDescription();
        System.out.printf("Stream %s has a status of %s.\n", myStreamName, streamDescription.getStreamStatus());

        if ("DELETING".equals(streamDescription.getStreamStatus())) {
            System.out.println("Stream is being deleted. This sample will now exit.");
            System.exit(0);
        }

        // Wait for the stream to become active if it is not yet ACTIVE.
        if (!"ACTIVE".equals(streamDescription.getStreamStatus())) {
            waitForStreamToBecomeAvailable(myStreamName);
        }
    } catch (ResourceNotFoundException ex) {
        System.out.printf("Stream %s does not exist. Creating it now.\n", myStreamName);

        // Create a stream. The number of shards determines the provisioned throughput.
        CreateStreamRequest createStreamRequest = new CreateStreamRequest();
        createStreamRequest.setStreamName(myStreamName);
        createStreamRequest.setShardCount(myStreamSize);
        kinesis.createStream(createStreamRequest);
        // The stream is now being created. Wait for it to become active.
        waitForStreamToBecomeAvailable(myStreamName);
    }

    // List all of my streams.
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesis.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesis.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());
    }
    // Print all of my streams.
    System.out.println("List of my streams: ");
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println("\t- " + streamNames.get(i));
    }

    AmazonSQS sqs = new AmazonSQSClient(credentials);
    Region usEast1 = Region.getRegion(Regions.US_EAST_1);
    sqs.setRegion(usEast1);

    System.out.println("");
    System.out.println("===========================================");
    System.out.println("Getting Started with sqsAlertCache");
    System.out.println("===========================================\n");

    try {

        String thisQueue = "alertCache";
        String nextQueue = "alertReceive";

        // Receive messages
        System.out.println("Receiving messages from " + thisQueue + ".");
        ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(thisQueue);
        List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages();
        System.out.println("Message count for " + thisQueue + ": " + messages.size() + "\n");

        for (Message message : messages) {

            System.out.println("  Message");
            System.out.println("    MessageId:     " + message.getMessageId());
            System.out.println("    ReceiptHandle: " + message.getReceiptHandle());
            System.out.println("    MD5OfBody:     " + message.getMD5OfBody());
            System.out.println("    Body:          " + message.getBody());
            for (Entry<String, String> entry : message.getAttributes().entrySet()) {
                System.out.println("  Attribute");
                System.out.println("    Name:  " + entry.getKey());
                System.out.println("    Value: " + entry.getValue());
            }
            System.out.println();

            // Write record to the stream
            long createTime = System.currentTimeMillis();
            PutRecordRequest putRecordRequest = new PutRecordRequest();
            putRecordRequest.setStreamName(myStreamName);
            putRecordRequest.setData(ByteBuffer.wrap(String.format(message.getBody(), createTime).getBytes()));
            putRecordRequest.setPartitionKey(String.format("partitionKey-%d", createTime));
            PutRecordResult putRecordResult = kinesis.putRecord(putRecordRequest);
            System.out.printf(
                    "Successfully put record, partition key : %s, ShardID : %s, SequenceNumber : %s.\n",
                    putRecordRequest.getPartitionKey(), putRecordResult.getShardId(),
                    putRecordResult.getSequenceNumber());

            // then send message to cache queue
            System.out.println("Sending messages to next queue.");
            sqs.sendMessage(new SendMessageRequest(nextQueue, message.getBody()));

            // delete message after sending to persist queue
            System.out.println("Deleting message from this queue.\n");
            String messageRecieptHandle = message.getReceiptHandle();
            sqs.deleteMessage(new DeleteMessageRequest(thisQueue, messageRecieptHandle));
        }

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon SQS, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with SQS, such as not "
                + "being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:sqsAlertStream.java

License:Open Source License

public static void main(String[] args) throws Exception {

    // get credentials
    String user = "jreilly";
    AWSCredentials credentials = whgHelper.getCred(user);

    // use credentials to set access to SQS
    AmazonSQS sqs = whgHelper.setQueueAccess(credentials);

    // define queue that messages will be retrieved from
    String thisQueue = "alertStream";
    String nextQueue = "alertErrorHandling";

    // set access to stream instance
    kinesis = new AmazonKinesisClient(credentials);

    final String streamName = "alertsStream";
    final Integer streamSize = 1;

    while (1 > 0) {

        // pull list of current messages (up to 10) in the queue
        List<Message> messages = whgHelper.getMessagesFromQueue(thisQueue, sqs);
        System.out.println("Count of messages in " + thisQueue + ": " + messages.size());

        try {/*from w ww  . j a v  a 2 s .  c  o  m*/

            for (Message message : messages) {

                whgHelper.printMessage(message);
                for (Entry<String, String> entry : message.getAttributes().entrySet()) {
                    whgHelper.printMessageEntry(entry);
                }

                // Write record to the stream
                long createTime = System.currentTimeMillis();
                PutRecordRequest putRecordRequest = new PutRecordRequest();
                putRecordRequest.setStreamName(streamName);
                putRecordRequest
                        .setData(ByteBuffer.wrap(String.format(message.getBody(), createTime).getBytes()));
                putRecordRequest.setPartitionKey(String.format("partitionKey-%d", createTime));
                PutRecordResult putRecordResult = kinesis.putRecord(putRecordRequest);
                System.out.printf(
                        "Successfully put record, partition key : %s, ShardID : %s, SequenceNumber : %s.\n",
                        putRecordRequest.getPartitionKey(), putRecordResult.getShardId(),
                        putRecordResult.getSequenceNumber());

                // then send message to cache queue
                System.out.println("Sending messages to next queue.");
                sqs.sendMessage(new SendMessageRequest(nextQueue, message.getBody()));

                // delete message after sending to persist queue
                System.out.println("Deleting message from this queue.\n");
                String messageRecieptHandle = message.getReceiptHandle();
                sqs.deleteMessage(new DeleteMessageRequest(thisQueue, messageRecieptHandle));
            }
            Thread.sleep(20000); // do nothing for 1000 miliseconds (1 second)

        } catch (AmazonServiceException ase) {
            whgHelper.errorMessagesAse(ase);
        } catch (AmazonClientException ace) {
            whgHelper.errorMessagesAce(ace);
        }
    }
}

From source file:AmazonKinesisCreate.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();//from ww  w .  jav  a2 s.c om

    final String myStreamName = "philsteststream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    // pt
    kinesisClient.createStream(createStreamRequest);

    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 10; j++) {

        try {
            PutRecordRequest putRecordRequest = new PutRecordRequest();
            putRecordRequest.setStreamName(myStreamName);
            putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
            putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
            PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
            System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                    + ", ShardID : " + putRecordResult.getShardId());
            Thread.sleep(1000);

        } catch (Exception e) {
            e.printStackTrace();
        }

    }

    // Delete the stream.

    /*
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);
            
    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
            
    LOG.info("Streaming completed" + myStreamName);
    */

}

From source file:KinesisStreamDataProducer.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();//from   w ww  . j a  va2s .  com

    final String myStreamName = "sparkStream";

    logger.info("Putting records in stream : " + myStreamName);
    // Write records to the stream
    for (int j = 0; j < 100; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(
                ByteBuffer.wrap(String.format("testData-%d testData-%d testData-%d", j, j, j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }
}

From source file:AmazonKinesisRecordProducerSample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();/*from  ww  w.j  a  v  a 2 s .  co m*/

    final String myStreamName = AmazonKinesisApplicationSample.SAMPLE_APPLICATION_STREAM_NAME;
    final Integer myStreamSize = 1;

    // Describe the stream and check if it exists.
    DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest().withStreamName(myStreamName);
    try {
        StreamDescription streamDescription = kinesis.describeStream(describeStreamRequest)
                .getStreamDescription();
        System.out.printf("Stream %s has a status of %s.\n", myStreamName, streamDescription.getStreamStatus());

        if ("DELETING".equals(streamDescription.getStreamStatus())) {
            System.out.println("Stream is being deleted. This sample will now exit.");
            System.exit(0);
        }

        // Wait for the stream to become active if it is not yet ACTIVE.
        if (!"ACTIVE".equals(streamDescription.getStreamStatus())) {
            waitForStreamToBecomeAvailable(myStreamName);
        }
    } catch (ResourceNotFoundException ex) {
        System.out.printf("Stream %s does not exist. Creating it now.\n", myStreamName);

        // Create a stream. The number of shards determines the provisioned throughput.
        CreateStreamRequest createStreamRequest = new CreateStreamRequest();
        createStreamRequest.setStreamName(myStreamName);
        createStreamRequest.setShardCount(myStreamSize);
        kinesis.createStream(createStreamRequest);
        // The stream is now being created. Wait for it to become active.
        waitForStreamToBecomeAvailable(myStreamName);
    }

    // List all of my streams.
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesis.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesis.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());
    }
    // Print all of my streams.
    System.out.println("List of my streams: ");
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println("\t- " + streamNames.get(i));
    }

    System.out.printf("Putting records in stream : %s until this application is stopped...\n", myStreamName);
    System.out.println("Press CTRL-C to stop.");
    // Write records to the stream until this program is aborted.
    while (true) {
        long createTime = System.currentTimeMillis();
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", createTime).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", createTime));
        PutRecordResult putRecordResult = kinesis.putRecord(putRecordRequest);
        System.out.printf("Successfully put record, partition key : %s, ShardID : %s, SequenceNumber : %s.\n",
                putRecordRequest.getPartitionKey(), putRecordResult.getShardId(),
                putRecordResult.getSequenceNumber());
    }
}

From source file:com.alertlogic.aws.analytics.poc.RecordKinesisPutter.java

License:Open Source License

/**
 * Send a single record to Amazon Kinesis using PutRecord.
 *//*from   w  ww  .  j  a va  2  s . c o  m*/
private void sendRecord() {
    Record record = recordFactory.create();
    byte[] bytes;
    try {
        bytes = JSON.writeValueAsBytes(record);
    } catch (IOException e) {
        LOG.warn("Skipping record. Unable to serialize: '" + record + "'", e);
        return;
    }

    PutRecordRequest putRecord = new PutRecordRequest();
    putRecord.setStreamName(streamName);
    // We use the resource as the partition key so we can accurately calculate totals for a given resource
    putRecord.setPartitionKey(record.getField("resource"));
    putRecord.setData(ByteBuffer.wrap(bytes));
    // Order is not important for this application so we do not send a SequenceNumberForOrdering
    putRecord.setSequenceNumberForOrdering(null);

    try {
        kinesis.putRecord(putRecord);
    } catch (ProvisionedThroughputExceededException ex) {
        if (LOG.isDebugEnabled()) {
            LOG.debug(String.format("Thread %s's Throughput exceeded. Waiting 10ms",
                    Thread.currentThread().getName()));
        }
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    } catch (AmazonClientException ex) {
        LOG.warn("Error sending record to Amazon Kinesis.", ex);
    }
}

From source file:com.alertlogic.aws.kinesis.test1.producer.HttpReferrerKinesisPutter.java

License:Open Source License

/**
 * Send a single pair to Amazon Kinesis using PutRecord.
 *//*from   ww  w  .j ava 2s  .  c  om*/
private void sendPair() {
    HttpReferrerPair pair = referrerFactory.create();
    byte[] bytes;
    try {
        bytes = JSON.writeValueAsBytes(pair);
    } catch (IOException e) {
        LOG.warn("Skipping pair. Unable to serialize: '" + pair + "'", e);
        return;
    }

    PutRecordRequest putRecord = new PutRecordRequest();
    putRecord.setStreamName(streamName);
    // We use the resource as the partition key so we can accurately calculate totals for a given resource
    putRecord.setPartitionKey(pair.getResource());
    putRecord.setData(ByteBuffer.wrap(bytes));
    // Order is not important for this application so we do not send a SequenceNumberForOrdering
    putRecord.setSequenceNumberForOrdering(null);

    try {
        kinesis.putRecord(putRecord);
    } catch (ProvisionedThroughputExceededException ex) {
        if (LOG.isDebugEnabled()) {
            LOG.debug(String.format("Thread %s's Throughput exceeded. Waiting 10ms",
                    Thread.currentThread().getName()));
        }
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    } catch (AmazonClientException ex) {
        LOG.warn("Error sending record to Amazon Kinesis.", ex);
    }
}

From source file:com.boundary.aws.kinesis.Sample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();/*from www.j a  va 2 s .c o  m*/

    final String myStreamName = "boundary-test-stream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned
    // throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    kinesisClient.createStream(createStreamRequest);
    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 100; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }

    // Delete the stream.
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);

    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
}

From source file:com.calamp.services.kinesis.events.writer.CalAmpEventWriter.java

License:Open Source License

/**
 * Uses the Kinesis client to send the event to the given stream.
 *
 * @param trade instance representing the stock trade
 * @param kinesisClient Amazon Kinesis client
 * @param streamName Name of stream /*from w ww  .java  2s.c om*/
 */
public static void sendEvent(CalAmpEvent event, AmazonKinesis kinesisClient, String streamName) {
    byte[] bytes = event.toJsonAsBytes();
    // The bytes could be null if there is an issue with the JSON serialization by the Jackson JSON library.
    if (bytes == null) {
        LOG.warn("Could not get JSON bytes for stock trade");
        return;
    }

    LOG.info("Putting trade: " + event.toString());
    PutRecordRequest putRecord = new PutRecordRequest();
    putRecord.setStreamName(CalAmpParameters.unorderdStreamName);
    putRecord.setPartitionKey(String.valueOf(event.getMachineId()));
    putRecord.setData(ByteBuffer.wrap(bytes));

    //This is needed to guaranteed FIFO ordering per partitionKey
    if (prevSeqNum != null) {
        putRecord.setSequenceNumberForOrdering(prevSeqNum);
    }
    try {
        PutRecordResult res = kinesisClient.putRecord(putRecord);
        prevSeqNum = res.getSequenceNumber();
        Utils.lazyLog(putRecord, CalAmpParameters.writeLogName);
    } catch (AmazonClientException ex) {
        LOG.warn("Error sending record to Amazon Kinesis.", ex);
    }
}