List of usage examples for com.amazonaws.services.kinesis AmazonKinesisClientBuilder standard
public static AmazonKinesisClientBuilder standard()
From source file:com.trulia.stail.Stail.java
License:Apache License
public static void main(String[] args) { final Stail stail = new Stail(); JCommander jct = new JCommander(stail); jct.setProgramName("stail"); try {/*from w w w.j a v a 2 s . co m*/ jct.parse(args); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); if (stail.profile != null) { credentialsProvider = new ProfileCredentialsProvider(stail.profile); } if (stail.role != null) { credentialsProvider = new STSAssumeRoleSessionCredentialsProvider.Builder(stail.role, "stail") .withStsClient(AWSSecurityTokenServiceClientBuilder.standard() .withCredentials(credentialsProvider).build()) .build(); } AmazonKinesis client = AmazonKinesisClientBuilder.standard().withRegion(stail.region) .withCredentials(credentialsProvider).build(); // prepare the initial shard iterators at the LATEST position Map<Shard, String> shardIterators = getShardIterators(client, stail.stream, stail.start); IRecordProcessor processor = stail.json ? new JSONRecordProcessor() : new RawRecordProcessor(); Map<Shard, RateLimiter> rateLimiters = new HashMap<>(); shardIterators.keySet() .forEach(shard -> rateLimiters.put(shard, RateLimiter.create(MAX_SHARD_THROUGHPUT))); long end = Strings.isNullOrEmpty(stail.duration) ? Long.MAX_VALUE : System.currentTimeMillis() + Duration.parse(stail.duration).toMillis(); Set<String> reshardedShards = new HashSet<>(); Map<Shard, String> sequenceNumbers = new HashMap<>(); while (System.currentTimeMillis() < end) { if (!reshardedShards.isEmpty()) { // get the new list of shards List<Shard> shards = getShards(client, stail.stream); for (Shard shard : shards) { if (!Strings.isNullOrEmpty(shard.getParentShardId()) && reshardedShards.contains(shard.getParentShardId())) { // the old shard was split, so we need to consume this new shard from the beginning shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } else if (!Strings.isNullOrEmpty(shard.getAdjacentParentShardId()) && reshardedShards.contains(shard.getAdjacentParentShardId())) { // the old shards were merged into a new shard shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } } reshardedShards.clear(); } for (Shard shard : Lists.newArrayList(shardIterators.keySet())) { String shardIterator = shardIterators.remove(shard); GetRecordsRequest getRecordsRequest = new GetRecordsRequest(); getRecordsRequest.setShardIterator(shardIterator); getRecordsRequest.setLimit(BATCH_SIZE); try { GetRecordsResult getRecordsResult = client.getRecords(getRecordsRequest); List<Record> records = getRecordsResult.getRecords(); processor.processRecords(records, null); shardIterator = getRecordsResult.getNextShardIterator(); if (records.size() <= 0) { // nothing on the stream yet, so lets wait a bit to see if something appears TimeUnit.SECONDS.sleep(1); } else { int bytesRead = records.stream().map(record -> record.getData().position()) .reduce((_1, _2) -> _1 + _2).get(); sequenceNumbers.put(shard, records.get(records.size() - 1).getSequenceNumber()); // optionally sleep if we have hit the limit for this shard rateLimiters.get(shard).acquire(bytesRead); } if (!Strings.isNullOrEmpty(shardIterator)) { shardIterators.put(shard, shardIterator); } else { reshardedShards.add(shard.getShardId()); } } catch (ProvisionedThroughputExceededException e) { logger.warn("tripped the max throughput. Backing off: {}", e.getMessage()); TimeUnit.SECONDS.sleep(6); // we tripped the max throughput. Back off // add the original iterator back into the map so we can try it again shardIterators.put(shard, shardIterator); } catch (ExpiredIteratorException e) { logger.debug("Iterator expired", e); String sequenceNumber = sequenceNumbers.get(shard); if (sequenceNumber == null) { logger.warn("No previously known sequence number for {}. Moving to LATEST", shard.getShardId()); shardIterators.put(shard, getShardIterator(client, stail.stream, shard, null)); } else { shardIterators.put(shard, getShardIteratorAtSequenceNumber(client, stail.stream, shard, sequenceNumber)); } } } } } catch (ParameterException e) { jct.usage(); System.exit(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.exit(2); } }
From source file:org.apache.beam.sdk.io.kinesis.BasicKinesisProvider.java
License:Apache License
@Override public AmazonKinesis getKinesisClient() { AmazonKinesisClientBuilder clientBuilder = AmazonKinesisClientBuilder.standard() .withCredentials(getCredentialsProvider()); if (serviceEndpoint == null) { clientBuilder.withRegion(region); } else {//from w w w. j av a 2s .c o m clientBuilder.withEndpointConfiguration( new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region.getName())); } return clientBuilder.build(); }
From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java
License:Apache License
public static AmazonKinesis getAmazonKinesisClient(String endpoint, AWSCredentialsConfig awsCredentialsConfig, String awsAssumedRoleArn, String awsExternalId) { AWSCredentialsProvider awsCredentialsProvider = AWSCredentialsUtils .defaultAWSCredentialsProviderChain(awsCredentialsConfig); if (awsAssumedRoleArn != null) { log.info("Assuming role [%s] with externalId [%s]", awsAssumedRoleArn, awsExternalId); STSAssumeRoleSessionCredentialsProvider.Builder builder = new STSAssumeRoleSessionCredentialsProvider.Builder( awsAssumedRoleArn, StringUtils.format("druid-kinesis-%s", UUID.randomUUID().toString())) .withStsClient(AWSSecurityTokenServiceClientBuilder.standard() .withCredentials(awsCredentialsProvider).build()); if (awsExternalId != null) { builder.withExternalId(awsExternalId); }//from w w w .j a v a 2 s .c om awsCredentialsProvider = builder.build(); } return AmazonKinesisClientBuilder.standard().withCredentials(awsCredentialsProvider) .withClientConfiguration(new ClientConfiguration()) .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, AwsHostNameUtils.parseRegion(endpoint, null))) .build(); }
From source file:org.apache.samza.system.kinesis.KinesisSystemAdmin.java
License:Apache License
private SystemStreamMetadata createSystemStreamMetadata(String stream) { LOG.info("create stream metadata for stream {} based on aws stream", stream); Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> metadata = new HashMap<>(); AmazonKinesisClient client = null;//w w w. j a v a 2 s . co m try { ClientConfiguration clientConfig = kConfig.getAWSClientConfig(system); AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard() .withCredentials(kConfig.credentialsProviderForStream(system, stream)) .withClientConfiguration(clientConfig); builder.setRegion(kConfig.getRegion(system, stream).getName()); client = (AmazonKinesisClient) builder.build(); StreamDescription desc = client.describeStream(stream).getStreamDescription(); IntStream.range(0, desc.getShards().size()) .forEach(i -> metadata.put(new Partition(i), SYSTEM_STREAM_PARTITION_METADATA)); } catch (Exception e) { String errMsg = "couldn't load metadata for stream " + stream; LOG.error(errMsg, e); throw new SamzaException(errMsg, e); } finally { if (client != null) { client.shutdown(); } } return new SystemStreamMetadata(stream, metadata); }
From source file:org.lendingclub.mercator.aws.KinesisScanner.java
License:Apache License
@Override protected AmazonKinesisClient createClient() { return (AmazonKinesisClient) builder.configure(AmazonKinesisClientBuilder.standard()).build(); }
From source file:org.wildfly.camel.test.common.aws.KinesisUtils.java
License:Apache License
public static AmazonKinesisClient createKinesisClient() { BasicCredentialsProvider credentials = BasicCredentialsProvider.standard(); AmazonKinesisClient client = !credentials.isValid() ? null : (AmazonKinesisClient) AmazonKinesisClientBuilder.standard().withCredentials(credentials) .withRegion("eu-west-1").build(); return client; }