Example usage for com.amazonaws.services.kinesis AmazonKinesisClient AmazonKinesisClient

List of usage examples for com.amazonaws.services.kinesis AmazonKinesisClient AmazonKinesisClient

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis AmazonKinesisClient AmazonKinesisClient.

Prototype

@Deprecated
public AmazonKinesisClient() 

Source Link

Document

Constructs a new client to invoke service methods on Kinesis.

Usage

From source file:com.mh2c.LogGenerator.java

License:Apache License

/**
 * Generates log lines and sends them to Kinesis.
 *
 * @param streamName Kinesis stream name
 * @param recsPerSecond number of records to send each second
 * @param numRecords total number of records to send
 *//*from w  w w .j a  v a2  s .c  o m*/
public void generate(String streamName, int recsPerSecond, int numRecords) throws InterruptedException {

    AmazonKinesisClient client = new AmazonKinesisClient();

    int numPasses = (numRecords + recsPerSecond - 1) / recsPerSecond;
    int recordsLeft = numRecords;
    for (int i = 0; i < numPasses; i++) {
        int numToGenerate = Math.min(recordsLeft, recsPerSecond);
        for (int j = 0; j < numToGenerate; j++) {
            String logLine = generateLogLine();

            PutRecordRequest request = new PutRecordRequest().withStreamName(streamName)
                    .withPartitionKey(PARTITION_KEY)
                    .withData(ByteBuffer.wrap(logLine.getBytes(StandardCharsets.UTF_8)));
            PutRecordResult result = client.putRecord(request);
            System.out.println(
                    String.format("Wrote to shard %s as %s", result.getShardId(), result.getSequenceNumber()));
        }

        recordsLeft -= numToGenerate;
        if (recordsLeft > 0) {
            Thread.sleep(1000L);
        }
    }
}

From source file:com.mh2c.LogProcessor.java

License:Apache License

/**
 * Processes a Kinesis stream.//w  ww. ja  va  2s  .c  o m
 *
 * @param streamName Kinesis stream name
 * @param region AWS region housing Kinesis stream
 * @param batchInterval streaming batch interval, in milliseconds
 * @param hadoopDir directory prefix in Hadoop where files are written
 * @throws InterruptedException if processing is interrupted
 */
public void process(String streamName, String region, int batchInterval, String hadoopDir)
        throws InterruptedException {

    String kinesisEndpoint = String.format("https://kinesis.%s.amazonaws.com/", region);

    AmazonKinesisClient client = new AmazonKinesisClient();
    client.setEndpoint(kinesisEndpoint);

    int numShards = client.describeStream(streamName).getStreamDescription().getShards().size();
    SparkConf conf = new SparkConf().setAppName(APP_NAME);
    JavaStreamingContext ctx = new JavaStreamingContext(conf, new Duration(batchInterval));

    JavaDStream<byte[]> kinesisStream = KinesisUtils.createStream(ctx, APP_NAME, streamName, kinesisEndpoint,
            region, InitialPositionInStream.LATEST, new Duration(batchInterval),
            StorageLevel.MEMORY_AND_DISK_2());

    // Make more DStreams
    JavaDStream<ApacheLogRecord> processedRecords = kinesisStream
            .map(line -> new ApacheLogRecord(new String(line, StandardCharsets.UTF_8)))
            .map(record -> record.withIpAddress(anonymizeIpAddress(record.getIpAddress())))
            .map(record -> record.withUserAgent(categorizeUserAgent(record.getUserAgent())));

    // Only pair streams can be written as Hadoop files
    JavaPairDStream<String, ApacheLogRecord> markedRecords = processedRecords.transformToPair(
            recordRdd -> recordRdd.mapToPair(record -> new Tuple2<>(UUID.randomUUID().toString(), record)));

    // Write out to Hadoop
    markedRecords.print();
    markedRecords.saveAsHadoopFiles(hadoopDir, "txt", Text.class, Text.class, TextOutputFormat.class);

    ctx.start();
    try {
        ctx.awaitTermination();
    } catch (InterruptedException e) {
        System.out.println("Streaming stopped");
        return;
    }
}

From source file:com.netflix.spectator.tdigest.TDigestModule.java

License:Apache License

@Provides
@Singleton//from w  w w.ja  v a2s.  c  o  m
private TDigestWriter providesWriter(Registry registry, TDigestConfig config) {
    AmazonKinesisClient client = new AmazonKinesisClient();
    client.setEndpoint(config.getEndpoint());
    return new KinesisTDigestWriter(registry, client, config);
}

From source file:dbtucker.connect.kinesis.KinesisSinkTask.java

License:Apache License

@Override
public void start(Map<String, String> map) {
    config = new KinesisSinkConnectorConfig(map);
    client = new AmazonKinesisClient();
    client.configureRegion(config.getRegionId());
    remainingRetries = config.getMaxRetries();
    log.debug("Task launched with client {}", client.toString());
}

From source file:dbtucker.connect.kinesis.KinesisSourceConnector.java

License:Apache License

@Override
public void start(Map<String, String> map) {
    config = new KinesisSourceConnectorConfig(map);
    streamShards = new HashMap<>();

    List<String> streamNames;
    final Set<String> ignoredStreams = new HashSet<>();
    final Set<String> consumedStreams = new HashSet<>();

    final AmazonKinesisClient client = new AmazonKinesisClient();
    client.configureRegion(config.getRegionId());

    ListStreamsResult listResult;/*from   ww w  .  j a va  2 s.  c om*/
    ListStreamsRequest lsr = new ListStreamsRequest();
    lsr.setLimit(32);

    String lastEvaluatedStreamName = null;
    do {
        lsr.setExclusiveStartStreamName(lastEvaluatedStreamName);
        listResult = client.listStreams(lsr);

        streamNames = listResult.getStreamNames();
        for (String streamName : streamNames) {
            if (config.getStreamsPrefix() == null) {
                if ((config.getStreamsBlacklist() == null || config.getStreamsBlacklist().contains(streamName))
                        && (config.getStreamsWhitelist() == null
                                || !config.getStreamsWhitelist().contains(streamName))) {
                    ignoredStreams.add(streamName);
                    continue;
                }
            } else {
                if (streamName.startsWith(config.getStreamsPrefix())) {
                    if (config.getStreamsBlacklist() != null
                            && config.getStreamsBlacklist().contains(streamName)) {
                        ignoredStreams.add(streamName);
                        continue;
                    }
                } else {
                    ignoredStreams.add(streamName);
                    continue;
                }
            }

            final DescribeStreamResult streamDesc = client.describeStream(streamName);

            if (streamDesc.getStreamDescription().getStreamStatus().equals(StreamStatus.DELETING.toString())) {
                log.warn("Stream '{}' is being deleted and cannot be consumed", streamName);
                ignoredStreams.add(streamName);
                continue;
            }

            for (Shard shard : streamDesc.getStreamDescription().getShards()) {
                streamShards.put(shard, streamDesc);
            }

            consumedStreams.add(streamName);
        }

        if (streamNames.size() > 0) {
            lastEvaluatedStreamName = streamNames.get(streamNames.size() - 1);
        }

    } while (listResult.getHasMoreStreams());

    log.info("Streams to ingest: {}", consumedStreams);
    log.info("Streams to ignore: {}", ignoredStreams);

    client.shutdown();

    if (consumedStreams.isEmpty()) {
        throw new ConnectException("No matching Kinesis Streams found.  Exiting connector");
    }
}

From source file:dbtucker.connect.kinesis.KinesisSourceTask.java

License:Apache License

@Override
public void start(Map<String, String> props) {
    config = new KinesisSourceTaskConfig(props);

    client = new AmazonKinesisClient();
    client.configureRegion(config.getRegionId());

    assignedShards = new ArrayList<>(config.getShards());
    shardIterators = new HashMap<>(assignedShards.size());
    currentShardIdx = 0;//from   ww w .  j a  v a2  s .c  om

    log.info("start: KinesisSourceTaskConfiguration values: {}", config.originalsStrings());
    log.info("start: {} shards assigned to task {}", assignedShards.size(), this.toString());
}

From source file:gov.pnnl.cloud.producer.kinesis.ProducerClient.java

License:Open Source License

/**
 * @param name The name of the client, used for debugging purposes
 * @param streamName The name of the stream to send data to
 * @param threads The number of threads to put in the pool
 * @param region The region that the kinesis stream is in
 *///from ww w .j  a  v  a2s.c  om
public ProducerClient(String name, String streamName, int threads, Region region, StatisticsCollection stats) {

    kinesisClient = new AmazonKinesisClient();
    kinesisClient.setRegion(region);

    eventsQueue = new LinkedBlockingQueue<Event>();

    this.name = name;
    this.canRun = new AtomicBoolean(true);
    this.threads = threads;
    this.streamName = streamName;
    this.stats = stats;

}