Example usage for com.amazonaws.services.kinesis.model ShardIteratorType AT_SEQUENCE_NUMBER

List of usage examples for com.amazonaws.services.kinesis.model ShardIteratorType AT_SEQUENCE_NUMBER

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model ShardIteratorType AT_SEQUENCE_NUMBER.

Prototype

ShardIteratorType AT_SEQUENCE_NUMBER

To view the source code for com.amazonaws.services.kinesis.model ShardIteratorType AT_SEQUENCE_NUMBER.

Click Source Link

Usage

From source file:com.trulia.stail.Stail.java

License:Apache License

private static String getShardIteratorAtSequenceNumber(AmazonKinesis client, String stream, Shard shard,
        String sequenceNumber) {//from   ww w  . j a  v  a  2s  . co m
    GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
    getShardIteratorRequest.setStreamName(stream);
    getShardIteratorRequest.setShardId(shard.getShardId());

    getShardIteratorRequest.setShardIteratorType(ShardIteratorType.AT_SEQUENCE_NUMBER);
    getShardIteratorRequest.setStartingSequenceNumber(sequenceNumber);

    GetShardIteratorResult getShardIteratorResult = client.getShardIterator(getShardIteratorRequest);
    return getShardIteratorResult.getShardIterator();
}

From source file:org.apache.apex.malhar.contrib.kinesis.AbstractKinesisInputOperator.java

License:Apache License

protected void replay(long windowId) {
    try {//  w  w  w.  j  av  a  2 s.  com
        @SuppressWarnings("unchecked")
        Map<String, MutablePair<String, Integer>> recoveredData = (Map<String, MutablePair<String, Integer>>) windowDataManager
                .retrieve(windowId);
        if (recoveredData == null) {
            return;
        }
        for (Map.Entry<String, MutablePair<String, Integer>> rc : recoveredData.entrySet()) {
            logger.debug("Replaying the windowId: {}", windowId);
            logger.debug("ShardId: " + rc.getKey() + " , Start Sequence Id: " + rc.getValue().getLeft()
                    + " , No Of Records: " + rc.getValue().getRight());
            try {
                List<Record> records = KinesisUtil.getInstance().getRecords(consumer.streamName,
                        rc.getValue().getRight(), rc.getKey(), ShardIteratorType.AT_SEQUENCE_NUMBER,
                        rc.getValue().getLeft());
                for (Record record : records) {
                    emitTuple(new Pair<String, Record>(rc.getKey(), record));
                    shardPosition.put(rc.getKey(), record.getSequenceNumber());
                }
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        /*
         * Set the shard positions and start the consumer if last recovery windowid
         * match with current completed windowid.
         */
        if (windowId == windowDataManager.getLargestCompletedWindow()) {
            // Set the shard positions to the consumer
            Map<String, String> statsData = new HashMap<String, String>(getConsumer().getShardPosition());
            statsData.putAll(shardPosition);
            getConsumer().resetShardPositions(statsData);
            consumer.start();
        }
    } catch (IOException e) {
        throw new RuntimeException("replay", e);
    }
}

From source file:org.apache.apex.malhar.contrib.kinesis.KinesisUtil.java

License:Apache License

/**
 * Get the records from the particular shard
 * @param streamName Name of the stream from where the records to be accessed
 * @param recordsLimit Number of records to return from shard
 * @param shId Shard Id of the shard//  ww  w.  j  av a 2 s.co m
 * @param iteratorType Shard iterator type
 * @param seqNo Record sequence number
 * @return the list of records from the given shard
 * @throws AmazonClientException
 */
public List<Record> getRecords(String streamName, Integer recordsLimit, String shId,
        ShardIteratorType iteratorType, String seqNo) throws AmazonClientException {
    assert client != null : "Illegal client";
    try {
        // Create the GetShardIteratorRequest instance and sets streamName, shardId and iteratorType to it
        GetShardIteratorRequest iteratorRequest = new GetShardIteratorRequest();
        iteratorRequest.setStreamName(streamName);
        iteratorRequest.setShardId(shId);
        iteratorRequest.setShardIteratorType(iteratorType);

        // If the iteratorType is AFTER_SEQUENCE_NUMBER, set the sequence No to the iteratorRequest
        if (ShardIteratorType.AFTER_SEQUENCE_NUMBER.equals(iteratorType)
                || ShardIteratorType.AT_SEQUENCE_NUMBER.equals(iteratorType)) {
            iteratorRequest.setStartingSequenceNumber(seqNo);
        }
        // Get the Response from the getShardIterator service method & get the shardIterator from that response
        GetShardIteratorResult iteratorResponse = client.getShardIterator(iteratorRequest);
        // getShardIterator() specifies the position in the shard
        String iterator = iteratorResponse.getShardIterator();

        // Create the GetRecordsRequest instance and set the recordsLimit and iterator
        GetRecordsRequest getRequest = new GetRecordsRequest();
        getRequest.setLimit(recordsLimit);
        getRequest.setShardIterator(iterator);

        // Get the Response from the getRecords service method and get the data records from that response.
        GetRecordsResult getResponse = client.getRecords(getRequest);
        return getResponse.getRecords();
    } catch (AmazonClientException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

@Override
public void seek(StreamPartition<String> partition, String sequenceNumber) throws InterruptedException {
    checkIfClosed();//ww  w. j  a  v  a 2 s .  c o  m
    filterBufferAndResetFetchRunnable(ImmutableSet.of(partition));
    seekInternal(partition, sequenceNumber, ShardIteratorType.AT_SEQUENCE_NUMBER);
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer.java

License:Apache License

@SuppressWarnings("unchecked")
@Override// www.j  a v a 2s .  c  o m
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (subscribedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.TRIM_HORIZON.toString(),
                    null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get())) {
            nextShardItr = kinesis.getShardIterator(subscribedShard, ShardIteratorType.AT_TIMESTAMP.toString(),
                    initTimestamp);
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        deserializeRecordForCollectionAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesis.getShardIterator(subscribedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (isRunning()) {
            if (nextShardItr == null) {
                fetcherRef.updateState(subscribedShardStateIndex,
                        SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());

                // we can close this consumer thread once we've reached the end of the subscribed shard
                break;
            } else {
                if (fetchIntervalMillis != 0) {
                    Thread.sleep(fetchIntervalMillis);
                }

                GetRecordsResult getRecordsResult = getRecords(nextShardItr, maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
                        subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    deserializeRecordForCollectionAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        fetcherRef.stopWithError(t);
    }
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerThread.java

License:Apache License

@SuppressWarnings("unchecked")
@Override//from w  ww  . j av a2 s .  com
public void run() {
    String nextShardItr;

    try {
        // before infinitely looping, we set the initial nextShardItr appropriately

        if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
            // if the shard is already closed, there will be no latest next record to get for this shard
            if (assignedShard.isClosed()) {
                nextShardItr = null;
            } else {
                nextShardItr = kinesisProxy.getShardIterator(assignedShard, ShardIteratorType.LATEST.toString(),
                        null);
            }
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
            nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                    ShardIteratorType.TRIM_HORIZON.toString(), null);
        } else if (lastSequenceNum.equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
            nextShardItr = null;
        } else {
            // we will be starting from an actual sequence number (due to restore from failure).
            // if the last sequence number refers to an aggregated record, we need to clean up any dangling sub-records
            // from the last aggregated record; otherwise, we can simply start iterating from the record right after.

            if (lastSequenceNum.isAggregated()) {
                String itrForLastAggregatedRecord = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), lastSequenceNum.getSequenceNumber());

                // get only the last aggregated record
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(itrForLastAggregatedRecord, 1);

                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                long lastSubSequenceNum = lastSequenceNum.getSubSequenceNumber();
                for (UserRecord record : fetchedRecords) {
                    // we have found a dangling sub-record if it has a larger subsequence number
                    // than our last sequence number; if so, collect the record and update state
                    if (record.getSubSequenceNumber() > lastSubSequenceNum) {
                        collectRecordAndUpdateState(record);
                    }
                }

                // set the nextShardItr so we can continue iterating in the next while loop
                nextShardItr = getRecordsResult.getNextShardIterator();
            } else {
                // the last record was non-aggregated, so we can simply start from the next record
                nextShardItr = kinesisProxy.getShardIterator(assignedShard,
                        ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
                        lastSequenceNum.getSequenceNumber());
            }
        }

        while (running) {
            if (nextShardItr == null) {
                synchronized (sourceContext.getCheckpointLock()) {
                    seqNoState.put(assignedShard,
                            SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
                }

                break;
            } else {
                GetRecordsResult getRecordsResult = kinesisProxy.getRecords(nextShardItr,
                        maxNumberOfRecordsPerFetch);

                // each of the Kinesis records may be aggregated, so we must deaggregate them before proceeding
                List<UserRecord> fetchedRecords = deaggregateRecords(getRecordsResult.getRecords(),
                        assignedShard.getStartingHashKey(), assignedShard.getEndingHashKey());

                for (UserRecord record : fetchedRecords) {
                    collectRecordAndUpdateState(record);
                }

                nextShardItr = getRecordsResult.getNextShardIterator();
            }
        }
    } catch (Throwable t) {
        ownerRef.stopWithError(t);
    }
}

From source file:org.apache.storm.kinesis.spout.Config.java

License:Apache License

private void validate() {
    if (streamName == null || streamName.length() < 1) {
        throw new IllegalArgumentException("streamName is required and cannot be of length 0.");
    }/*ww  w  . j  a  va  2s  . c  o  m*/
    if (shardIteratorType == null || shardIteratorType.equals(ShardIteratorType.AFTER_SEQUENCE_NUMBER)
            || shardIteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER)) {
        throw new IllegalArgumentException(
                "shardIteratorType has to be one of the " + ShardIteratorType.AT_TIMESTAMP + ","
                        + ShardIteratorType.LATEST + "," + ShardIteratorType.TRIM_HORIZON);
    }
    if (shardIteratorType.equals(ShardIteratorType.AT_TIMESTAMP) && timestamp == null) {
        throw new IllegalArgumentException(
                "timestamp must be provided if shardIteratorType is " + ShardIteratorType.AT_TIMESTAMP);
    }
    if (recordToTupleMapper == null) {
        throw new IllegalArgumentException("recordToTupleMapper cannot be null");
    }
    if (failedMessageRetryHandler == null) {
        throw new IllegalArgumentException("failedMessageRetryHandler cannot be null");
    }
    if (zkInfo == null) {
        throw new IllegalArgumentException("zkInfo cannot be null");
    }
    if (kinesisConnectionInfo == null) {
        throw new IllegalArgumentException("kinesisConnectionInfo cannot be null");
    }
    if (maxUncommittedRecords == null || maxUncommittedRecords < 1) {
        throw new IllegalArgumentException("maxUncommittedRecords has to be a positive integer");
    }
}

From source file:org.apache.storm.kinesis.spout.KinesisConnection.java

License:Apache License

String getShardIterator(String stream, String shardId, ShardIteratorType shardIteratorType,
        String sequenceNumber, Date timestamp) {
    String shardIterator = "";
    try {/* w  w w  .ja v  a2s . c  o  m*/
        GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
        getShardIteratorRequest.setStreamName(stream);
        getShardIteratorRequest.setShardId(shardId);
        getShardIteratorRequest.setShardIteratorType(shardIteratorType);
        if (shardIteratorType.equals(ShardIteratorType.AFTER_SEQUENCE_NUMBER)
                || shardIteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER)) {
            getShardIteratorRequest.setStartingSequenceNumber(sequenceNumber);
        } else if (shardIteratorType.equals(ShardIteratorType.AT_TIMESTAMP)) {
            getShardIteratorRequest.setTimestamp(timestamp);
        }
        GetShardIteratorResult getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
        if (getShardIteratorResult != null) {
            shardIterator = getShardIteratorResult.getShardIterator();
        }
    } catch (Exception e) {
        LOG.warn(
                "Exception occured while getting shardIterator for shard " + shardId + " shardIteratorType "
                        + shardIteratorType + " sequence number " + sequenceNumber + " timestamp " + timestamp,
                e);
    }
    LOG.warn("Returning shardIterator " + shardIterator + " for shardId " + shardId + " shardIteratorType "
            + shardIteratorType + " sequenceNumber " + sequenceNumber + " timestamp" + timestamp);
    return shardIterator;
}

From source file:org.apache.storm.kinesis.spout.KinesisRecordsManager.java

License:Apache License

private void refreshShardIteratorForFailedRecord(KinesisMessageId kinesisMessageId) {
    String shardIterator = null;/* ww  w  . ja  v  a  2  s .  co  m*/
    // Set the shard iterator for last fetched sequence number to start from correct position in shard
    shardIterator = kinesisConnection.getShardIterator(kinesisConfig.getStreamName(),
            kinesisMessageId.getShardId(), ShardIteratorType.AT_SEQUENCE_NUMBER,
            kinesisMessageId.getSequenceNumber(), null);
    if (shardIterator != null && !shardIterator.isEmpty()) {
        LOG.warn("Refreshing shard iterator for failed records for message " + kinesisMessageId
                + " with shardIterator " + shardIterator);
        shardIteratorPerFailedMessage.put(kinesisMessageId, shardIterator);
    }
}

From source file:org.springframework.integration.aws.inbound.kinesis.KinesisShardOffset.java

License:Apache License

public static KinesisShardOffset atSequenceNumber(String stream, String shard, String sequenceNumber) {
    KinesisShardOffset kinesisShardOffset = new KinesisShardOffset(ShardIteratorType.AT_SEQUENCE_NUMBER);
    kinesisShardOffset.stream = stream;/*from ww w  . j a  va  2 s  .  c  om*/
    kinesisShardOffset.shard = shard;
    kinesisShardOffset.sequenceNumber = sequenceNumber;
    return kinesisShardOffset;
}