Example usage for com.amazonaws.services.kinesisfirehose AmazonKinesisFirehoseClient AmazonKinesisFirehoseClient

List of usage examples for com.amazonaws.services.kinesisfirehose AmazonKinesisFirehoseClient AmazonKinesisFirehoseClient

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesisfirehose AmazonKinesisFirehoseClient AmazonKinesisFirehoseClient.

Prototype

AmazonKinesisFirehoseClient(AwsSyncClientParams clientParams) 

Source Link

Document

Constructs a new client to invoke service methods on Firehose using the specified parameters.

Usage

From source file:AbstractAmazonKinesisFirehoseDelivery.java

License:Open Source License

/**
 * Method to initialize the clients using the specified AWSCredentials.
 *
 * @param Exception/*from   www  .  java2  s.  co m*/
 */
protected static void initClients() throws Exception {
    /*
     * The ProfileCredentialsProvider will return your [default] credential
     * profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    // S3 client
    s3Client = new AmazonS3Client(credentials);
    Region s3Region = RegionUtils.getRegion(s3RegionName);
    s3Client.setRegion(s3Region);

    // Firehose client
    firehoseClient = new AmazonKinesisFirehoseClient(credentials);
    firehoseClient.setRegion(RegionUtils.getRegion(firehoseRegion));

    // IAM client
    iamClient = new AmazonIdentityManagementClient(credentials);
    iamClient.setRegion(RegionUtils.getRegion(iamRegion));
}

From source file:com.nextdoor.bender.ipc.firehose.FirehoseTransportFactory.java

License:Apache License

@Override
public void setConf(AbstractConfig config) {
    this.config = (FirehoseTransportConfig) config;
    this.serializer = new FirehoseTransportSerializer(this.config.getAppendNewline());
    this.client = new AmazonKinesisFirehoseClient(new ClientConfiguration().withGzip(true));

    if (this.config.getRegion() != null) {
        this.client.withRegion(this.config.getRegion());
    }//from   w ww. ja  v a  2  s .  c  om
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.FirehoseTarget.java

License:Apache License

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();
    errorRecordHandler = new DefaultErrorRecordHandler(getContext());

    if (issues.isEmpty()) {
        conf.init(getContext(), issues);
        generatorFactory = conf.dataFormatConfig.getDataGeneratorFactory();
        firehoseClient = new AmazonKinesisFirehoseClient(AWSUtil.getCredentialsProvider(conf.awsConfig));
        firehoseClient.configureRegion(conf.region);
    }//  ww w  .ja  va2  s.  c o  m

    return issues;
}

From source file:org.voltdb.exportclient.KinesisFirehoseExportClient.java

License:Open Source License

@Override
public void configure(Properties config) throws Exception {
    String regionName = config.getProperty("region", "").trim();
    if (regionName.isEmpty()) {
        throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a region");
    }/* w  w  w  .  j a  v  a2  s .c  om*/
    m_region = RegionUtils.getRegion(regionName);

    m_streamName = config.getProperty("stream.name", "").trim();
    if (m_streamName.isEmpty()) {
        throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a stream.name");
    }

    m_accessKey = config.getProperty("access.key", "").trim();
    if (m_accessKey.isEmpty()) {
        throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide an access.key");
    }
    m_secretKey = config.getProperty("secret.key", "").trim();
    if (m_secretKey.isEmpty()) {
        throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a secret.key");
    }

    m_timeZone = TimeZone.getTimeZone(config.getProperty("timezone", VoltDB.REAL_DEFAULT_TIMEZONE.getID()));

    m_recordSeparator = config.getProperty(RECORD_SEPARATOR, "\n");

    config.setProperty(ROW_LENGTH_LIMIT,
            config.getProperty(ROW_LENGTH_LIMIT, Integer.toString(1024000 - m_recordSeparator.length())));

    m_backOffCap = Integer.parseInt(config.getProperty(BACKOFF_CAP, "1000"));
    // minimal interval between each putRecordsBatch api call;
    // for small records (row length < 1KB): records/s is the bottleneck
    // for large records (row length > 1KB): data throughput is the bottleneck
    // for orignal limit, (5000 records/s  divie by 500 records per call = 10 calls)
    // interval is 1000 ms / 10 = 100 ms
    m_streamLimit = Integer.parseInt(config.getProperty(STREAM_LIMIT, "5000"));
    m_backOffBase = Math.max(2, 1000 / (m_streamLimit / BATCH_NUMBER_LIMIT));

    // concurrent aws client = number of export table to this stream * number of voltdb partition
    m_concurrentWriter = Integer.parseInt(config.getProperty(CONCURRENT_WRITER, "0"));
    m_backOffStrategy = config.getProperty(BACKOFF_TYPE, "equal");

    m_firehoseClient = new AmazonKinesisFirehoseClient(new BasicAWSCredentials(m_accessKey, m_secretKey));
    m_firehoseClient.setRegion(m_region);
    m_backOff = BackOffFactory.getBackOff(m_backOffStrategy, m_backOffBase, m_backOffCap);
    m_sink = new FirehoseSink(m_streamName, m_firehoseClient, m_concurrentWriter, m_backOff);
    m_batchMode = Boolean.parseBoolean(config.getProperty(BATCH_MODE, "true"));
    m_batchSize = Math.min(BATCH_NUMBER_LIMIT, Integer.parseInt(config.getProperty(BATCH_SIZE, "200")));
}