Example usage for com.amazonaws.services.kinesis.model ShardIteratorType LATEST

List of usage examples for com.amazonaws.services.kinesis.model ShardIteratorType LATEST

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model ShardIteratorType LATEST.

Prototype

ShardIteratorType LATEST

To view the source code for com.amazonaws.services.kinesis.model ShardIteratorType LATEST.

Click Source Link

Usage

From source file:com.datatorrent.contrib.kinesis.KinesisConsumer.java

License:Open Source License

/**
 * This method returns the iterator type of the given shard
 *//*w w  w  .jav a  2 s.c  o m*/
public ShardIteratorType getIteratorType(String shardId) {
    if (shardPosition.containsKey(shardId)) {
        return ShardIteratorType.AFTER_SEQUENCE_NUMBER;
    }
    return initialOffset.equalsIgnoreCase("earliest") ? ShardIteratorType.TRIM_HORIZON
            : ShardIteratorType.LATEST;
}

From source file:com.trulia.stail.Stail.java

License:Apache License

private static String getShardIterator(AmazonKinesis client, String stream, Shard shard, String start) {
    GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
    getShardIteratorRequest.setStreamName(stream);
    getShardIteratorRequest.setShardId(shard.getShardId());

    if (!Strings.isNullOrEmpty(start)) {
        getShardIteratorRequest.setShardIteratorType(ShardIteratorType.AT_TIMESTAMP);
        getShardIteratorRequest//from w w w  . ja v  a 2  s. c  om
                .setTimestamp(new Date(System.currentTimeMillis() - Duration.parse(start).toMillis()));
    } else {
        getShardIteratorRequest.setShardIteratorType(ShardIteratorType.LATEST);
    }

    GetShardIteratorResult getShardIteratorResult = client.getShardIterator(getShardIteratorRequest);
    return getShardIteratorResult.getShardIterator();
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

@Override
public void seekToLatest(Set<StreamPartition<String>> partitions) throws InterruptedException {
    checkIfClosed();/*w w w. jav a  2  s .c  o m*/
    filterBufferAndResetFetchRunnable(partitions);
    partitions.forEach(partition -> seekInternal(partition, null, ShardIteratorType.LATEST));
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

@Nullable
@Override/*from w ww. j  a  v  a  2s .  c om*/
public String getLatestSequenceNumber(StreamPartition<String> partition) {
    checkIfClosed();
    return getSequenceNumberInternal(partition, ShardIteratorType.LATEST);
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer.java

License:Apache License

@SuppressWarnings("unchecked")
@Override//from w  w  w .  jav a  2s .c o m
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (subscribedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.TRIM_HORIZON.toString(),
                    null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_TIMESTAMP.toString(),
                    initTimestamp);
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        deserializeRecordForCollectionAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (isRunning()) {
            if (nextShardItr == null) {
                fetcherRef.updateState(subscribedShardStateIndex,
                        SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());

                // we can close this consumer thread once we've reached the end of the subscribed shard
                break;
            } else {
                if (fetchIntervalMillis != 0) {
                    Thread.sleep(fetchIntervalMillis);
                }

                GetRecordsResult getRecordsResult = getRecords(nextShardItr, maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    deserializeRecordForCollectionAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        fetcherRef.stopWithError(t);
    }
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerThread.java

License:Apache License

@SuppressWarnings("unchecked")
@Override//from www .j  a  va  2 s  . c  om
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (assignedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesisProxy.getShardIterator(assignedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                    ShardIteratorType.TRIM_HORIZON.toString(), null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        collectRecordAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (running) {
            if (nextShardItr == null) {
                synchronized (sourceContext.getCheckpointLock()) {
                    seqNoState.put(assignedShard,
                            SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
                }

                break;
            } else {
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(nextShardItr,
                        maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    collectRecordAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        ownerRef.stopWithError(t);
    }
}

From source file:org.apache.storm.kinesis.spout.Config.java

License:Apache License

private void validate() {
    if (streamName == null || streamName.length() < 1) {
        throw new IllegalArgumentException("streamName is required and cannot be of length 0.");
    }/*from   w w  w .  j a  v  a2  s .c om*/
    if (shardIteratorType == null || shardIteratorType.equals(ShardIteratorType.AFTER_SEQUENCE_NUMBER)
            || shardIteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER)) {
        throw new IllegalArgumentException(
                "shardIteratorType has to be one of the " + ShardIteratorType.AT_TIMESTAMP + ","
                        + ShardIteratorType.LATEST + "," + ShardIteratorType.TRIM_HORIZON);
    }
    if (shardIteratorType.equals(ShardIteratorType.AT_TIMESTAMP) && timestamp == null) {
        throw new IllegalArgumentException(
                "timestamp must be provided if shardIteratorType is " + ShardIteratorType.AT_TIMESTAMP);
    }
    if (recordToTupleMapper == null) {
        throw new IllegalArgumentException("recordToTupleMapper cannot be null");
    }
    if (failedMessageRetryHandler == null) {
        throw new IllegalArgumentException("failedMessageRetryHandler cannot be null");
    }
    if (zkInfo == null) {
        throw new IllegalArgumentException("zkInfo cannot be null");
    }
    if (kinesisConnectionInfo == null) {
        throw new IllegalArgumentException("kinesisConnectionInfo cannot be null");
    }
    if (maxUncommittedRecords == null || maxUncommittedRecords < 1) {
        throw new IllegalArgumentException("maxUncommittedRecords has to be a positive integer");
    }
}

From source file:org.springframework.integration.aws.inbound.kinesis.KinesisShardOffset.java

License:Apache License

public static KinesisShardOffset latest(String stream, String shard) {
    KinesisShardOffset kinesisShardOffset = new KinesisShardOffset(ShardIteratorType.LATEST);
    kinesisShardOffset.stream = stream;//  w  w  w . jav a2s  .  co  m
    kinesisShardOffset.shard = shard;
    return kinesisShardOffset;
}