Example usage for com.amazonaws.services.kinesis.model ShardIteratorType TRIM_HORIZON

List of usage examples for com.amazonaws.services.kinesis.model ShardIteratorType TRIM_HORIZON

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model ShardIteratorType TRIM_HORIZON.

Prototype

ShardIteratorType TRIM_HORIZON

To view the source code for com.amazonaws.services.kinesis.model ShardIteratorType TRIM_HORIZON.

Click Source Link

Usage

From source file:com.datatorrent.contrib.kinesis.KinesisConsumer.java

License:Open Source License

/**
 * This method returns the iterator type of the given shard
 *///from w  ww.ja v  a2  s. co m
public ShardIteratorType getIteratorType(String shardId) {
    if (shardPosition.containsKey(shardId)) {
        return ShardIteratorType.AFTER_SEQUENCE_NUMBER;
    }
    return initialOffset.equalsIgnoreCase("earliest") ? ShardIteratorType.TRIM_HORIZON
            : ShardIteratorType.LATEST;
}

From source file:com.netflix.spectator.tdigest.KinesisTDigestReader.java

License:Apache License

/**
 * Create a new instance that reads from the beginning of the shard. The iterator type is
 * set to {@code TRIM_HORIZON}./*w  w  w . jav  a  2 s. c o  m*/
 *
 * @param registry
 *     Registry for creating metrics.
 * @param client
 *     Client for interacting with the Kinesis service.
 * @param stream
 *     Name of the stream to read from.
 * @param shard
 *     Id of the shard to consume.
 */
public KinesisTDigestReader(Registry registry, AmazonKinesisClient client, String stream, String shard) {
    this(registry, client, new GetShardIteratorRequest().withStreamName(stream).withShardId(shard)
            .withShardIteratorType(ShardIteratorType.TRIM_HORIZON));
}

From source file:com.trulia.stail.Stail.java

License:Apache License

private static String getOldestShardIterator(AmazonKinesis client, String stream, Shard shard) {
    GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
    getShardIteratorRequest.setStreamName(stream);
    getShardIteratorRequest.setShardId(shard.getShardId());
    getShardIteratorRequest.setShardIteratorType(ShardIteratorType.TRIM_HORIZON);

    GetShardIteratorResult getShardIteratorResult = client.getShardIterator(getShardIteratorRequest);
    return getShardIteratorResult.getShardIterator();
}

From source file:dbtucker.connect.kinesis.KinesisSourceTask.java

License:Apache License

private GetShardIteratorRequest getShardIteratorRequest(String shardId, String streamName, String seqNum) {
    final GetShardIteratorRequest req = new GetShardIteratorRequest();
    req.setShardId(shardId);//  www .jav  a  2s  . co  m
    req.setStreamName(streamName);
    if (seqNum == null) {
        req.setShardIteratorType(ShardIteratorType.TRIM_HORIZON);
    } else {
        req.setShardIteratorType(ShardIteratorType.AFTER_SEQUENCE_NUMBER);
        req.setStartingSequenceNumber(seqNum);
    }
    return req;
}

From source file:org.apache.beam.sdk.io.kinesis.AmazonKinesisMock.java

License:Apache License

@Override
public GetShardIteratorResult getShardIterator(GetShardIteratorRequest getShardIteratorRequest) {
    ShardIteratorType shardIteratorType = ShardIteratorType
            .fromValue(getShardIteratorRequest.getShardIteratorType());

    String shardIterator;/*w ww .j a v a2s.  co m*/
    if (shardIteratorType == ShardIteratorType.TRIM_HORIZON) {
        shardIterator = String.format("%s:%s", getShardIteratorRequest.getShardId(), 0);
    } else {
        throw new RuntimeException("Not implemented");
    }

    return new GetShardIteratorResult().withShardIterator(shardIterator);
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

@Override
public void seekToEarliest(Set<StreamPartition<String>> partitions) throws InterruptedException {
    checkIfClosed();/*ww w .java 2 s . c o m*/
    filterBufferAndResetFetchRunnable(partitions);
    partitions.forEach(partition -> seekInternal(partition, null, ShardIteratorType.TRIM_HORIZON));
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

@Nullable
@Override/*  w w w. ja  v a  2 s.c o m*/
public String getEarliestSequenceNumber(StreamPartition<String> partition) {
    checkIfClosed();
    return getSequenceNumberInternal(partition, ShardIteratorType.TRIM_HORIZON);
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*from   ww w . j av a 2 s  .  c o  m*/
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (subscribedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.TRIM_HORIZON.toString(),
                    null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_TIMESTAMP.toString(),
                    initTimestamp);
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        deserializeRecordForCollectionAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (isRunning()) {
            if (nextShardItr == null) {
                fetcherRef.updateState(subscribedShardStateIndex,
                        SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());

                // we can close this consumer thread once we've reached the end of the subscribed shard
                break;
            } else {
                if (fetchIntervalMillis != 0) {
                    Thread.sleep(fetchIntervalMillis);
                }

                GetRecordsResult getRecordsResult = getRecords(nextShardItr, maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    deserializeRecordForCollectionAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        fetcherRef.stopWithError(t);
    }
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerThread.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*from w ww .  j  av a2s .com*/
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (assignedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesisProxy.getShardIterator(assignedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                    ShardIteratorType.TRIM_HORIZON.toString(), null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        collectRecordAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (running) {
            if (nextShardItr == null) {
                synchronized (sourceContext.getCheckpointLock()) {
                    seqNoState.put(assignedShard,
                            SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
                }

                break;
            } else {
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(nextShardItr,
                        maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    collectRecordAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        ownerRef.stopWithError(t);
    }
}

From source file:org.apache.storm.kinesis.spout.Config.java

License:Apache License

private void validate() {
    if (streamName == null || streamName.length() < 1) {
        throw new IllegalArgumentException("streamName is required and cannot be of length 0.");
    }/*w  w w. j  a v  a2  s  .com*/
    if (shardIteratorType == null || shardIteratorType.equals(ShardIteratorType.AFTER_SEQUENCE_NUMBER)
            || shardIteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER)) {
        throw new IllegalArgumentException(
                "shardIteratorType has to be one of the " + ShardIteratorType.AT_TIMESTAMP + ","
                        + ShardIteratorType.LATEST + "," + ShardIteratorType.TRIM_HORIZON);
    }
    if (shardIteratorType.equals(ShardIteratorType.AT_TIMESTAMP) && timestamp == null) {
        throw new IllegalArgumentException(
                "timestamp must be provided if shardIteratorType is " + ShardIteratorType.AT_TIMESTAMP);
    }
    if (recordToTupleMapper == null) {
        throw new IllegalArgumentException("recordToTupleMapper cannot be null");
    }
    if (failedMessageRetryHandler == null) {
        throw new IllegalArgumentException("failedMessageRetryHandler cannot be null");
    }
    if (zkInfo == null) {
        throw new IllegalArgumentException("zkInfo cannot be null");
    }
    if (kinesisConnectionInfo == null) {
        throw new IllegalArgumentException("kinesisConnectionInfo cannot be null");
    }
    if (maxUncommittedRecords == null || maxUncommittedRecords < 1) {
        throw new IllegalArgumentException("maxUncommittedRecords has to be a positive integer");
    }
}