Example usage for com.amazonaws.services.kinesis.model Record getSequenceNumber

List of usage examples for com.amazonaws.services.kinesis.model Record getSequenceNumber

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model Record getSequenceNumber.

Prototype


public String getSequenceNumber() 

Source Link

Document

The unique identifier of the record within its shard.

Usage

From source file:SampleKinesisRecordScheme.java

License:Open Source License

@Override
public List<Object> deserialize(Record record) {
    final List<Object> l = new ArrayList<>();
    l.add(record.getPartitionKey());/*from  w  w  w .  j  a  v  a 2  s .c om*/
    l.add(record.getSequenceNumber());
    l.add(record.getData().array());
    return l;
}

From source file:AmazonKinesisGet.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();/*w  w  w .j a  v  a 2s  .c om*/

    final String myStreamName = "philsteststream";
    final Integer myStreamSize = 1;

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);

        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    //System.out.println(streamNames.get(0));
    String myownstream = streamNames.get(0);

    // Retrieve the Shards from a Stream
    DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
    describeStreamRequest.setStreamName(myownstream);
    DescribeStreamResult describeStreamResult;
    List<Shard> shards = new ArrayList<>();
    String lastShardId = null;

    do {
        describeStreamRequest.setExclusiveStartShardId(lastShardId);
        describeStreamResult = kinesisClient.describeStream(describeStreamRequest);
        shards.addAll(describeStreamResult.getStreamDescription().getShards());
        if (shards.size() > 0) {
            lastShardId = shards.get(shards.size() - 1).getShardId();
        }
    } while (describeStreamResult.getStreamDescription().getHasMoreShards());

    // Get Data from the Shards in a Stream
    // Hard-coded to use only 1 shard
    String shardIterator;
    GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
    getShardIteratorRequest.setStreamName(myownstream);
    //get(0) shows hardcoded to 1 stream
    getShardIteratorRequest.setShardId(shards.get(0).getShardId());
    // using TRIM_HORIZON but could use alternatives
    getShardIteratorRequest.setShardIteratorType("TRIM_HORIZON");

    GetShardIteratorResult getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
    shardIterator = getShardIteratorResult.getShardIterator();

    // Continuously read data records from shard.
    List<Record> records;

    while (true) {
        // Create new GetRecordsRequest with existing shardIterator.
        // Set maximum records to return to 1000.

        GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
        getRecordsRequest.setShardIterator(shardIterator);
        getRecordsRequest.setLimit(1000);

        GetRecordsResult result = kinesisClient.getRecords(getRecordsRequest);

        // Put result into record list. Result may be empty.
        records = result.getRecords();

        // Print records
        for (Record record : records) {
            ByteBuffer byteBuffer = record.getData();
            System.out.println(String.format("Seq No: %s - %s", record.getSequenceNumber(),
                    new String(byteBuffer.array())));
        }

        try {
            Thread.sleep(1000);
        } catch (InterruptedException exception) {
            throw new RuntimeException(exception);
        }

        shardIterator = result.getNextShardIterator();
    }

}

From source file:AmazonKinesisApplicationSampleRecordProcessor.java

License:Open Source License

/**
 * Process a single record.//from w  ww  . j  av  a2  s . co  m
 * 
 * @param record The record to be processed.
 */
private void processSingleRecord(Record record) {
    // TODO Add your own record processing logic here

    String data = null;
    try {
        // For this app, we interpret the payload as UTF-8 chars.
        data = decoder.decode(record.getData()).toString();
        // Assume this record came from AmazonKinesisSample and log its age.
        long recordCreateTime = new Long(data.substring("testData-".length()));
        long ageOfRecordInMillis = System.currentTimeMillis() - recordCreateTime;

        LOG.info(record.getSequenceNumber() + ", " + record.getPartitionKey() + ", " + data + ", Created "
                + ageOfRecordInMillis + " milliseconds ago.");
    } catch (NumberFormatException e) {
        LOG.info("Record does not match sample record format. Ignoring record with data; " + data);
    } catch (CharacterCodingException e) {
        LOG.error("Malformed data: " + data, e);
    }
}

From source file:SampleRecordProcessor.java

License:Open Source License

/** Process records performing retries as needed. Skip "poison pill" records.
 * @param records// w w w  . j a v a2 s.  c  om
 */
private void processRecordsWithRetries(List<Record> records) {
    for (Record record : records) {
        boolean processedSuccessfully = false;
        String data = null;
        for (int i = 0; i < NUM_RETRIES; i++) {
            try {
                // For this app, we interpret the payload as UTF-8 chars.
                data = decoder.decode(record.getData()).toString();
                LOG.info(record.getSequenceNumber() + ", " + record.getPartitionKey() + ", " + data);
                //
                // Logic to process record goes here.
                //
                processedSuccessfully = true;
                break;
            } catch (CharacterCodingException e) {
                LOG.error("Malformed data: " + data, e);
                break;
            } catch (Throwable t) {
                LOG.warn("Caught throwable while processing record " + record, t);
            }

            // backoff if we encounter an exception.
            try {
                Thread.sleep(BACKOFF_TIME_IN_MILLIS);
            } catch (InterruptedException e) {
                LOG.debug("Interrupted sleep", e);
            }
        }

        if (!processedSuccessfully) {
            LOG.error("Couldn't process record " + record + ". Skipping the record.");
        }
    }
}

From source file:com.alertlogic.aws.analytics.poc.RecordProcessor.java

License:Open Source License

@Override
public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) {
    for (Record r : records) {
        // Deserialize each record as an UTF-8 encoded JSON String of the type provided
        T record;/*  w  w w  .j  a v a 2  s. c o  m*/
        try {
            record = JSON.readValue(r.getData().array(), recordType);
        } catch (IOException e) {
            LOG.warn("Skipping record. Unable to parse record into Record. Partition Key: "
                    + r.getPartitionKey() + ". Sequence Number: " + r.getSequenceNumber(), e);
            continue;
        }
        // Increment the counter for the new record. This is synchronized because there is another thread reading from
        // the counter to compute running totals every interval.
        synchronized (counter) {
            counter.increment(record);
        }
    }

    // Checkpoint if it's time to!
    if (checkpointTimer.isTimeUp()) {
        // Obtain a lock on the counter to prevent additional counts from being calculated while checkpointing.
        synchronized (counter) {
            checkpoint(checkpointer);
            resetCheckpointAlarm();
        }
    }
}

From source file:com.alertlogic.aws.kinesis.test1.kcl.CountingRecordProcessor.java

License:Open Source License

@Override
public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) {
    for (Record r : records) {
        // Deserialize each record as an UTF-8 encoded JSON String of the type provided
        T pair;/*from w ww.j a va 2  s .c  om*/
        try {
            pair = JSON.readValue(r.getData().array(), recordType);
        } catch (IOException e) {
            LOG.warn("Skipping record. Unable to parse record into HttpReferrerPair. Partition Key: "
                    + r.getPartitionKey() + ". Sequence Number: " + r.getSequenceNumber(), e);
            continue;
        }
        // Increment the counter for the new pair. This is synchronized because there is another thread reading from
        // the counter to compute running totals every interval.
        synchronized (counter) {
            counter.increment(pair);
        }
    }

    // Checkpoint if it's time to!
    if (checkpointTimer.isTimeUp()) {
        // Obtain a lock on the counter to prevent additional counts from being calculated while checkpointing.
        synchronized (counter) {
            checkpoint(checkpointer);
            resetCheckpointAlarm();
        }
    }
}

From source file:com.datatorrent.contrib.kinesis.KinesisConsumer.java

License:Open Source License

/**
 * This method is called in the activate method of the operator
 *//*from w  w w .  j  a  va  2  s . co m*/
public void start() {
    isAlive = true;
    int realNumStream = simpleConsumerThreads.size();
    if (realNumStream == 0)
        return;

    consumerThreadExecutor = Executors.newFixedThreadPool(realNumStream);
    for (final Shard shd : simpleConsumerThreads) {
        consumerThreadExecutor.submit(new Runnable() {
            @Override
            public void run() {
                logger.debug("Thread " + Thread.currentThread().getName() + " start consuming Records...");
                while (isAlive) {
                    Shard shard = shd;
                    try {
                        List<Record> records = KinesisUtil.getInstance().getRecords(streamName, recordsLimit,
                                shard, getIteratorType(shard.getShardId()),
                                shardPosition.get(shard.getShardId()));

                        if (records == null || records.isEmpty()) {
                            if (shard.getSequenceNumberRange().getEndingSequenceNumber() != null) {
                                closedShards.add(shard);
                                break;
                            }
                            try {
                                Thread.sleep(recordsCheckInterval);
                            } catch (Exception e) {
                                throw new RuntimeException(e);
                            }
                        } else {
                            String seqNo = "";
                            for (Record rc : records) {
                                seqNo = rc.getSequenceNumber();
                                putRecord(shd.getShardId(), rc);
                            }
                            shardPosition.put(shard.getShardId(), new String(seqNo));
                        }
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
                logger.debug("Thread " + Thread.currentThread().getName() + " stop consuming Records...");
            }
        });
    }
}

From source file:com.datatorrent.contrib.kinesis.KinesisTestConsumer.java

License:Open Source License

@Override
public void run() {
    DescribeStreamRequest describeRequest = new DescribeStreamRequest();
    describeRequest.setStreamName(streamName);

    DescribeStreamResult describeResponse = client.describeStream(describeRequest);
    final List<Shard> shards = describeResponse.getStreamDescription().getShards();
    logger.debug("Inside consumer::run receiveCount= {}", receiveCount);
    while (isAlive) {
        Shard shId = shards.get(0);/*from  ww w.j a v a2  s.c  o  m*/
        GetShardIteratorRequest iteratorRequest = new GetShardIteratorRequest();
        iteratorRequest.setStreamName(streamName);
        iteratorRequest.setShardId(shId.getShardId());

        iteratorRequest.setShardIteratorType("TRIM_HORIZON");
        GetShardIteratorResult iteratorResponse = client.getShardIterator(iteratorRequest);
        String iterator = iteratorResponse.getShardIterator();

        GetRecordsRequest getRequest = new GetRecordsRequest();
        getRequest.setLimit(1000);
        getRequest.setShardIterator(iterator);
        //call "get" operation and get everything in this shard range
        GetRecordsResult getResponse = client.getRecords(getRequest);
        //get reference to next iterator for this shard
        //retrieve records
        List<Record> records = getResponse.getRecords();
        if (records == null || records.isEmpty()) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
        } else {
            String seqNo = "";
            for (Record rc : records) {
                if (latch != null) {
                    latch.countDown();
                }
                seqNo = rc.getSequenceNumber();
                if (getData(rc).equals(KinesisOperatorTestBase.END_TUPLE))
                    break;
                holdingBuffer.add(rc);
                receiveCount++;
                logger.debug("Consuming {}, receiveCount= {}", getData(rc), receiveCount);
            }
        }
    }
    logger.debug("DONE consuming");
}

From source file:com.facebook.presto.kinesis.util.MockKinesisClient.java

License:Apache License

protected ShardIterator getNextShardIterator(ShardIterator previousIter, ArrayList<Record> records) {
    ShardIterator newIter = null;//from   ww w  . jav  a  2s. co m
    if (records.size() == 0) {
        newIter = previousIter;
    } else {
        Record rec = records.get(records.size() - 1);
        int lastSeq = Integer.valueOf(rec.getSequenceNumber());
        newIter = new ShardIterator(previousIter.streamId, previousIter.shardIndex, lastSeq + 1);
    }

    return newIter;
}

From source file:com.hortonworks.streamline.streams.runtime.storm.spout.KinesisRecordToTupleMapper.java

License:Apache License

@Override
public List<Object> getTuple(Record record) {
    CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder();
    List<Object> tuple = new ArrayList<>();
    tuple.add(record.getPartitionKey());
    tuple.add(record.getSequenceNumber());
    try {/*w w  w . jav  a2  s. co m*/
        String data = decoder.decode(record.getData()).toString();
        tuple.add(data);
    } catch (CharacterCodingException e) {
        e.printStackTrace();
        LOG.warn("Exception occured. Emitting tuple with empty string data", e);
        tuple.add("");
    }
    return tuple;
}