Example usage for com.amazonaws.services.kinesis AmazonKinesis setRegion

List of usage examples for com.amazonaws.services.kinesis AmazonKinesis setRegion

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis AmazonKinesis setRegion.

Prototype

@Deprecated
void setRegion(Region region);

Source Link

Document

An alternative to AmazonKinesis#setEndpoint(String) , sets the regional endpoint for this client's service calls.

Usage

From source file:com.alertlogic.aws.analytics.poc.DeleteResources.java

License:Open Source License

public static void main(String[] args) {
    if (args.length != 4) {
        System.err.println("Usage: " + DeleteResources.class.getSimpleName()
                + " <application name> <stream name> <DynamoDB table name> <region>");
        System.exit(1);/*from  w  ww . j a v  a  2  s  .  c  o  m*/
    }

    String applicationName = args[0];
    String streamName = args[1];
    String countsTableName = args[2];
    Region region = Utils.parseRegion(args[3]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = Utils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);
    AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig);
    dynamoDB.setRegion(region);

    StreamUtils streamUtils = new StreamUtils(kinesis);
    DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB);

    LOG.info("Removing Amazon Kinesis and DynamoDB resources used by the sample application...");

    streamUtils.deleteStream(streamName);
    // The Kinesis Client Library creates a table to manage shard leases and uses the application name for its name.
    dynamoDBUtils.deleteTable(applicationName);
    dynamoDBUtils.deleteTable(countsTableName);
}

From source file:com.alertlogic.aws.kinesis.test1.StreamProcessor.java

License:Open Source License

/**
 * Start the Kinesis Client application.
 * /*from w ww  .  j a  v a 2s . co  m*/
 * @param args Expecting 4 arguments: Application name to use for the Kinesis Client Application, Stream name to
 *        read from, DynamoDB table name to persist counts into, and the AWS region in which these resources
 *        exist or should be created.
 */
public static void main(String[] args) throws UnknownHostException {
    if (args.length != 4) {
        System.err.println("Usage: " + StreamProcessor.class.getSimpleName()
                + " <application name> <stream name> <DynamoDB table name> <region>");
        System.exit(1);
    }

    String applicationName = args[0];
    String streamName = args[1];
    String countsTableName = args[2];
    Region region = SampleUtils.parseRegion(args[3]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);
    AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig);
    dynamoDB.setRegion(region);

    // Creates a stream to write to, if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB);
    dynamoDBUtils.createCountTableIfNotExists(countsTableName);
    LOG.info(String.format("%s DynamoDB table is ready for use", countsTableName));

    String workerId = String.valueOf(UUID.randomUUID());
    LOG.info(String.format("Using working id: %s", workerId));
    KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName,
            credentialsProvider, workerId);
    kclConfig.withCommonClientConfig(clientConfig);
    kclConfig.withRegionName(region.getName());
    kclConfig.withInitialPositionInStream(InitialPositionInStream.LATEST);

    // Persist counts to DynamoDB
    DynamoDBCountPersister persister = new DynamoDBCountPersister(
            dynamoDBUtils.createMapperForTable(countsTableName));

    IRecordProcessorFactory recordProcessor = new CountingRecordProcessorFactory<HttpReferrerPair>(
            HttpReferrerPair.class, persister, COMPUTE_RANGE_FOR_COUNTS_IN_MILLIS, COMPUTE_INTERVAL_IN_MILLIS);

    Worker worker = new Worker(recordProcessor, kclConfig);

    int exitCode = 0;
    try {
        worker.run();
    } catch (Throwable t) {
        LOG.error("Caught throwable while processing data.", t);
        exitCode = 1;
    }
    System.exit(exitCode);
}

From source file:com.alertlogic.aws.kinesis.test1.StreamWriter.java

License:Open Source License

/**
 * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the
 * program is terminated.// www  .j  av  a  2 s. c  o m
 *
 * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send
 *        data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources
 *        exist or should be created.
 * @throws InterruptedException If this application is interrupted while sending records to Kinesis.
 */
public static void main(String[] args) throws InterruptedException {
    if (args.length != 3) {
        System.err.println(
                "Usage: " + StreamWriter.class.getSimpleName() + " <number of threads> <stream name> <region>");
        System.exit(1);
    }

    int numberOfThreads = Integer.parseInt(args[0]);
    String streamName = args[1];
    Region region = SampleUtils.parseRegion(args[2]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    // The more resources we declare the higher write IOPS we need on our DynamoDB table.
    // We write a record for each resource every interval.
    // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum.
    List<String> resources = new ArrayList<>();
    resources.add("/index.html");

    // These are the possible referrers to use when generating pairs
    List<String> referrers = new ArrayList<>();
    referrers.add("http://www.amazon.com");
    referrers.add("http://www.google.com");
    referrers.add("http://www.yahoo.com");
    referrers.add("http://www.bing.com");
    referrers.add("http://www.stackoverflow.com");
    referrers.add("http://www.reddit.com");

    HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers);

    // Creates a stream to write to with 2 shards if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName);

    ExecutorService es = Executors.newCachedThreadPool();

    Runnable pairSender = new Runnable() {
        @Override
        public void run() {
            try {
                putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                LOG.warn(
                        "Thread encountered an error while sending records. Records will no longer be put by this thread.",
                        ex);
            }
        }
    };

    for (int i = 0; i < numberOfThreads; i++) {
        es.submit(pairSender);
    }

    LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).",
            DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads));

    es.shutdown();
    es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
}

From source file:com.alertlogic.aws.kinesis.test1.utils.DeleteSampleResources.java

License:Open Source License

public static void main(String[] args) {
    if (args.length != 4) {
        System.err.println("Usage: " + DeleteSampleResources.class.getSimpleName()
                + " <application name> <stream name> <DynamoDB table name> <region>");
        System.exit(1);//from   w  w  w.  j a v a2s . c o  m
    }

    String applicationName = args[0];
    String streamName = args[1];
    String countsTableName = args[2];
    Region region = SampleUtils.parseRegion(args[3]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);
    AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig);
    dynamoDB.setRegion(region);

    StreamUtils streamUtils = new StreamUtils(kinesis);
    DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB);

    LOG.info("Removing Amazon Kinesis and DynamoDB resources used by the sample application...");

    streamUtils.deleteStream(streamName);
    // The Kinesis Client Library creates a table to manage shard leases and uses the application name for its name.
    dynamoDBUtils.deleteTable(applicationName);
    dynamoDBUtils.deleteTable(countsTableName);
}

From source file:com.calamp.services.kinesis.events.processor.CalAmpEventProcessor.java

License:Open Source License

public static void main(String[] args) throws Exception {
    checkUsage(args);/*from  w  w  w.ja v  a 2s  . com*/
    //String applicationName = args[0];
    //String streamName = args[1];
    //Region region = RegionUtils.getRegion(args[2]);
    boolean isUnordered = Boolean.valueOf(args[3]);
    String applicationName = isUnordered ? CalAmpParameters.sortAppName : CalAmpParameters.consumeAppName;
    String streamName = isUnordered ? CalAmpParameters.unorderdStreamName : CalAmpParameters.orderedStreamName;
    Region region = RegionUtils.getRegion(CalAmpParameters.regionName);

    if (region == null) {
        System.err.println(args[2] + " is not a valid AWS region.");
        System.exit(1);
    }

    setLogLevels();
    AWSCredentialsProvider credentialsProvider = CredentialUtils.getCredentialsProvider();
    ClientConfiguration cc = ConfigurationUtils.getClientConfigWithUserAgent(true);
    AmazonKinesis kinesisClient = new AmazonKinesisClient(credentialsProvider, cc);
    kinesisClient.setRegion(region);

    //Utils.kinesisClient = kinesisClient;

    String workerId = String.valueOf(UUID.randomUUID());
    KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName,
            credentialsProvider, workerId).withRegionName(region.getName()).withCommonClientConfig(cc)
                    .withMaxRecords(com.calamp.services.kinesis.events.utils.CalAmpParameters.maxRecPerPoll)
                    .withIdleTimeBetweenReadsInMillis(
                            com.calamp.services.kinesis.events.utils.CalAmpParameters.pollDelayMillis)
                    .withCallProcessRecordsEvenForEmptyRecordList(CalAmpParameters.alwaysPoll)
                    .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    IRecordProcessorFactory processorFactory = new RecordProcessorFactory(isUnordered);

    // Create the KCL worker with the stock trade record processor factory
    Worker worker = new Worker(processorFactory, kclConfig);

    int exitCode = 0;
    try {
        worker.run();
    } catch (Throwable t) {
        LOG.error("Caught throwable while processing data.", t);
        exitCode = 1;
    }
    System.exit(exitCode);
}

From source file:com.calamp.services.kinesis.events.writer.CalAmpEventWriter.java

License:Open Source License

public static void main(String[] args) throws Exception {
    checkUsage(args);//ww  w . ja v a2s  .c  o  m
    String streamName = CalAmpParameters.unorderdStreamName; //args[0];
    String regionName = CalAmpParameters.regionName;
    Region region = RegionUtils.getRegion(regionName);
    if (region == null) {
        System.err.println(regionName + " is not a valid AWS region.");
        System.exit(1);
    }
    AWSCredentials credentials = CredentialUtils.getCredentialsProvider().getCredentials();

    ClientConfiguration ccuo = ConfigurationUtils.getClientConfigWithUserAgent(true);
    AmazonKinesis kinesisClient = new AmazonKinesisClient(credentials, ccuo);
    kinesisClient.setRegion(region);

    // Validate that the stream exists and is active
    Utils.validateStream(kinesisClient, streamName);

    int numToGen = 50000;
    String filePath = "kinesis-rand-events.in";

    //genRandEventsToFile( filePath, numToGen );
    List<CalAmpEvent> buffer = readEventsFromFile(filePath);
    Utils.initLazyLog(CalAmpParameters.writeLogName, "Producer Send Start");

    Utils.putByParts(buffer, CalAmpParameters.unorderdStreamName, kinesisClient, CalAmpParameters.writeLogName);
    //Utils.putObo(buffer, CalAmpParameters.unorderdStreamName, kinesisClient, CalAmpParameters.writeLogName);

    //runningLoop(new RandomEventSender(kinesisClient, filePath, CalAmpParameters.pollDelayMillis));
    System.out.println("Writer Done");
}

From source file:com.haskins.cloudtrailviewer.dialog.resourcedetail.detailpanels.KinesisStreamDetail.java

License:Open Source License

@Override
public String retrieveDetails(ResourceDetailRequest detailRequest) {

    String response = null;//from  w w w .  j a  v a2  s . c o m

    try {

        AmazonKinesis client = new AmazonKinesisClient(credentials);
        client.setRegion(Region.getRegion(Regions.fromName(detailRequest.getRegion())));

        DescribeStreamRequest request = new DescribeStreamRequest();
        request.setStreamName(detailRequest.getResourceName());

        DescribeStreamResult result = client.describeStream(request);
        buildUI(result);

    } catch (IllegalArgumentException | AmazonClientException e) {
        response = e.getMessage();
        LOGGER.log(Level.WARNING, "Problem retrieving Kinesis details from AWS", e);
    }

    return response;
}

From source file:com.innoq.hagmans.bachelor.TemperatureConsumer.java

License:Open Source License

public static void main(String[] args) throws InterruptedException {
    if (args.length == 2) {
        streamName = args[0];/*  ww w. j  ava  2s.c  o m*/
        db_name = args[1];
    }

    // Initialize Utils
    KinesisClientLibConfiguration config = new KinesisClientLibConfiguration(db_name, streamName,
            new DefaultAWSCredentialsProviderChain(), "KinesisProducerLibSampleConsumer")
                    .withRegionName(TemperatureProducer.REGION)
                    .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    Region region = RegionUtils.getRegion(TemperatureProducer.REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonDynamoDB amazonDynamoDB = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration());
    AmazonDynamoDBClient client = new AmazonDynamoDBClient(credentialsProvider);
    client.setRegion(region);
    DynamoDB dynamoDB = new DynamoDB(client);
    amazonDynamoDB.setRegion(region);
    DynamoDBUtils dbUtils = new DynamoDBUtils(dynamoDB, amazonDynamoDB, client);
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    try {
        if (!streamUtils.isActive(kinesis.describeStream(streamName))) {
            log.info("Stream is not active. Waiting for Stream to become active....");
            streamUtils.waitForStreamToBecomeActive(streamName);
        }
    } catch (ResourceNotFoundException e) {
        log.info("Stream is not created right now. Waiting for stream to get created and become active....");
        streamUtils.waitForStreamToBecomeActive(streamName);
    }
    dbUtils.deleteTable(db_name);
    dbUtils.createTemperatureTableIfNotExists(tableName);

    Thread.sleep(1000);

    final TemperatureConsumer consumer = new TemperatureConsumer();

    new Worker.Builder().recordProcessorFactory(consumer).config(config).build().run();
}

From source file:com.innoq.hagmans.bachelor.TemperatureProducer.java

License:Open Source License

public static void main(String[] args) throws Exception {

    if (args.length == 4) {
        streamName = args[0];//from   ww w.j  a v  a2 s .co m
        sensorName = args[1];
        secondsToRun = Integer.parseInt(args[2]);
        recordsPerSecond = Integer.parseInt(args[3]);
    }

    // Create a new stream if it doesn't already exists
    Region region = RegionUtils.getRegion(REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStream(streamName, NUMBER_OF_SHARDS);

    final KinesisProducer producer = getKinesisProducer();

    // The monotonically increasing sequence number we will put in the data
    // of each record
    final AtomicLong sequenceNumber = new AtomicLong(0);

    // The number of records that have finished (either successfully put, or
    // failed)
    final AtomicLong completed = new AtomicLong(0);

    // KinesisProducer.addUserRecord is asynchronous. A callback can be used
    // to receive the results.
    final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() {
        @Override
        public void onFailure(Throwable t) {
            // We don't expect any failures during this sample. If it
            // happens, we will log the first one and exit.
            if (t instanceof UserRecordFailedException) {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                log.error(String.format("Record failed to put - %s : %s", last.getErrorCode(),
                        last.getErrorMessage()));
            }
            log.error("Exception during put", t);
            System.exit(1);
        }

        @Override
        public void onSuccess(UserRecordResult result) {
            temperature = Utils.getNextTemperature(temperature);
            completed.getAndIncrement();
        }
    };

    // The lines within run() are the essence of the KPL API.
    final Runnable putOneRecord = new Runnable() {
        @Override
        public void run() {
            ByteBuffer data = Utils.generateData(temperature, sensorName, DATA_SIZE);
            // TIMESTAMP is our partition key
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP,
                    Utils.randomExplicitHashKey(), data);
            Futures.addCallback(f, callback);
        }
    };

    // This gives us progress updates
    EXECUTOR.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            long put = sequenceNumber.get();
            long total = recordsPerSecond * secondsToRun;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            log.info(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total,
                    putPercent, done, donePercent));
        }
    }, 1, 1, TimeUnit.SECONDS);

    // Kick off the puts
    log.info(String.format("Starting puts... will run for %d seconds at %d records per second", secondsToRun,
            recordsPerSecond));
    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, recordsPerSecond);

    // Wait for puts to finish. After this statement returns, we have
    // finished all calls to putRecord, but the records may still be
    // in-flight. We will additionally wait for all records to actually
    // finish later.
    EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS);

    // If you need to shutdown your application, call flushSync() first to
    // send any buffered records. This method will block until all records
    // have finished (either success or fail). There are also asynchronous
    // flush methods available.
    //
    // Records are also automatically flushed by the KPL after a while based
    // on the time limit set with Configuration.setRecordMaxBufferedTime()
    log.info("Waiting for remaining puts to finish...");
    producer.flushSync();
    log.info("All records complete.");

    // This kills the child process and shuts down the threads managing it.
    producer.destroy();
    log.info("Finished.");
}

From source file:com.kinesis.datavis.utils.DeleteSampleResources.java

License:Open Source License

public static void main(String[] args) {
    if (args.length != 4) {
        System.err.println("Usage: " + DeleteSampleResources.class.getSimpleName()
                + " <application name> <stream name> <DynamoDB table name> <region>");
        System.exit(1);/*from  w  w w  .jav  a  2  s.  c om*/
    }

    String applicationName = args[0];
    String streamName = args[1];
    String countsTableName = args[2];
    Region region = AppUtils.parseRegion(args[3]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();

    ClientConfiguration clientConfig = AppUtils.configureUserAgentForSample(new ClientConfiguration());

    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig);
    dynamoDB.setRegion(region);

    StreamUtils streamUtils = new StreamUtils(kinesis);
    DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB);

    LOG.info("Removing Amazon Kinesis and DynamoDB resources used by the sample application...");

    streamUtils.deleteStream(streamName);

    // The Kinesis Client Library creates a table to manage shard leases and uses the application name for its name.
    dynamoDBUtils.deleteTable(applicationName);
    dynamoDBUtils.deleteTable(countsTableName);
}