Example usage for com.amazonaws.services.kinesisfirehose.model Record Record

List of usage examples for com.amazonaws.services.kinesisfirehose.model Record Record

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesisfirehose.model Record Record.

Prototype

Record

Source Link

Usage

From source file:AbstractAmazonKinesisFirehoseDelivery.java

License:Open Source License

/**
 * Method to create the record object for given data.
 *
 * @param data the content data//from   ww w . j  ava2 s .  c  om
 * @return the Record object
 */
private static Record createRecord(String data) {
    return new Record().withData(ByteBuffer.wrap(data.getBytes()));
}

From source file:ch.bbv.application.lambda.KinesisToFirehose.java

public void kinesisHandler(KinesisEvent event, Context context) {
    logger = context.getLogger();/*from   www .  j av  a2  s.  c o  m*/
    setup();
    for (KinesisEvent.KinesisEventRecord rec : event.getRecords()) {
        logger.log("Got message ");
        String msg = new String(rec.getKinesis().getData().array()) + "\n";
        Record deliveryStreamRecord = new Record().withData(ByteBuffer.wrap(msg.getBytes()));

        PutRecordRequest putRecordRequest = new PutRecordRequest().withDeliveryStreamName(deliveryStreamName)
                .withRecord(deliveryStreamRecord);

        logger.log("Putting message");
        firehoseClient.putRecord(putRecordRequest);
        logger.log("Successful Put");
    }
}

From source file:ch.bbv.application.lambda.KinesisToFirehose.java

private void putSampleMessages() {
    setup();/*from   w w  w .ja v a 2  s  .com*/
    for (int i = 0; i < 20000; i++) {
        String message = "{\"timestamp\":\"" + new Date().getTime() + "\"}";
        Record record = new Record().withData(ByteBuffer.wrap(message.getBytes()));
        PutRecordRequest putRecordInHoseRequest = new PutRecordRequest()
                .withDeliveryStreamName(deliveryStreamName).withRecord(record);

        PutRecordResult res = firehoseClient.putRecord(putRecordInHoseRequest);
        logIt(res.toString());
    }
}

From source file:com.amazon.kinesis.streaming.agent.tailing.FirehoseSender.java

License:Open Source License

@Override
protected BufferSendResult<FirehoseRecord> attemptSend(RecordBuffer<FirehoseRecord> buffer) {
    activeBatchPutCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "DeliveryStream:" + getDestination());
    try {/*from   w  w  w .  ja va  2  s  .  c o  m*/
        BufferSendResult<FirehoseRecord> sendResult = null;
        List<Record> requestRecords = new ArrayList<>();
        for (FirehoseRecord data : buffer) {
            Record record = new Record();
            record.setData(data.data());
            requestRecords.add(record);
        }
        PutRecordBatchRequest request = new PutRecordBatchRequest();
        request.setRecords(requestRecords);
        request.setDeliveryStreamName(getDestination());
        PutRecordBatchResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalBatchPutCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to firehose {}...", flow.getId(), buffer, getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getFirehoseClient().putRecordBatch(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalBatchPutLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (PutRecordBatchResponseEntry responseEntry : result.getRequestResponses()) {
                Record record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent firehose {}: {}. Failed records: {}", flow.getId(), buffer,
                    getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from firehose {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activeBatchPutCalls.decrementAndGet();
    }
}

From source file:com.gu.logback.appender.kinesis.FirehoseAppender.java

License:Open Source License

@Override
protected void putMessage(String message) throws Exception {
    ByteBuffer data = ByteBuffer.wrap(message.getBytes(getEncoding()));
    getClient().putRecordAsync(new PutRecordRequest().withDeliveryStreamName(getStreamName())
            .withRecord(new Record().withData(data)), asyncCallHandler);
}

From source file:com.nextdoor.bender.ipc.firehose.FirehoseTransportBufferBatch.java

License:Apache License

@Override
public boolean add(InternalEvent ievent) throws IllegalStateException, IOException {
    byte[] record = serializer.serialize(ievent);

    /*//from  w w w .  j  a  va  2s . c  o  m
     * Restrict size of individual record
     */
    if (record.length > MAX_RECORD_SIZE) {
        throw new IOException(
                "serialized event is " + record.length + " larger than max of " + MAX_RECORD_SIZE);
    }

    /*
     * Write record if there's room in buffer
     */
    if (dataRecords.size() >= MAX_RECORDS) {
        logger.trace("hit record index max");
        throw new IllegalStateException("reached max payload size");
    } else {
        if (cos.getByteCount() + record.length < MAX_RECORD_SIZE) {
            cos.write(record);
            return true;
        }

        /*
         * If current record is full then flush buffer to a Firehose Record and create a new buffer
         */
        logger.trace("creating new datarecord");
        ByteBuffer data = ByteBuffer.wrap(baos.toByteArray());
        this.dataRecords.add(new Record().withData(data));
        baos.reset();
        cos.resetByteCount();
        cos.resetCount();

        /*
         * If we hit the max number of Firehose Records (4) then notify IPC service that this buffer
         * needs to be sent.
         */
        if (dataRecords.size() >= MAX_RECORDS) {
            logger.trace("hit record index max");
            throw new IllegalStateException("reached max payload size");
        }

        /*
         * Otherwise write the record to the empty internal buffer
         */
        cos.write(record);
    }

    return true;
}

From source file:com.nextdoor.bender.ipc.firehose.FirehoseTransportBufferBatch.java

License:Apache License

@Override
public void close() {
    if (this.cos.getByteCount() != 0 && this.dataRecords.size() < MAX_RECORDS) {
        logger.trace("flushing remainder of buffer");
        ByteBuffer data = ByteBuffer.wrap(baos.toByteArray());
        this.dataRecords.add(new Record().withData(data));
    }//  w  w w .j a  v a 2s . c om

    try {
        this.baos.close();
    } catch (IOException e) {
    }
}

From source file:com.nextdoor.bender.ipc.firehose.FirehoseTransportBufferSimple.java

License:Apache License

@Override
public boolean add(InternalEvent ievent) throws IllegalStateException, IOException {
    if (dataRecords.size() >= MAX_RECORDS) {
        logger.trace("hit record index max");
        throw new IllegalStateException("reached max payload size");
    }//from   www .j a va2 s .  c o  m

    byte[] record = this.serializer.serialize(ievent);

    /*
     * Restrict size of individual record
     */
    if (record.length > MAX_RECORD_SIZE) {
        throw new IOException(
                "serialized event is " + record.length + " larger than max of " + MAX_RECORD_SIZE);
    }

    ByteBuffer data = ByteBuffer.wrap(record);
    dataRecords.add(new Record().withData(data));

    return true;
}

From source file:com.tcl.gateway.firehose.log4j.FirehoseAppender.java

License:Open Source License

/**
 * This method is called whenever a logging happens via logger.log(..) API
 * calls. Implementation for this appender will take in log events instantly
 * as long as the buffer is not full (as per user configuration). This call
 * will block if internal buffer is full until internal threads create some
 * space by publishing some of the records.
 * // w w  w  .jav a2s.c o m
 * If there is any error in parsing logevents, those logevents would be
 * dropped.
 */
@Override
public void append(LoggingEvent logEvent) {
    if (initializationFailed) {
        error("Check the configuration and whether the configured stream " + deliveryStreamName
                + " exists and is active. Failed to initialize kinesis log4j appender: " + name);
        return;
    }
    try {

        if (request == null) {
            request = new PutRecordBatchRequest();
        }

        String message = layout.format(logEvent);
        ByteBuffer data = ByteBuffer.wrap(message.getBytes(encoding));

        request.withRecords(new Record().withData(data));

        if (request.getRecords().size() >= batchSize) {
            firehoseClient.putRecordBatchAsync(request, asyncCallHander);
        }
    } catch (Exception e) {
        LOGGER.error("Failed to schedule log entry for publishing into Kinesis stream: " + deliveryStreamName);
        errorHandler.error(
                "Failed to schedule log entry for publishing into Kinesis stream: " + deliveryStreamName, e,
                ErrorCode.WRITE_FAILURE, logEvent);
    }
}

From source file:org.apache.nifi.processors.aws.kinesis.firehose.PutKinesisFirehose.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B)
            .longValue();//from  w w w  .j  ava 2s.  c  om

    List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes,
            AWS_KINESIS_FIREHOSE_ERROR_MESSAGE);
    HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
    HashMap<String, List<Record>> recordHash = new HashMap<String, List<Record>>();

    final AmazonKinesisFirehoseClient client = getClient();

    try {
        List<FlowFile> failedFlowFiles = new ArrayList<>();
        List<FlowFile> successfulFlowFiles = new ArrayList<>();

        // Prepare batch of records
        for (int i = 0; i < flowFiles.size(); i++) {
            FlowFile flowFile = flowFiles.get(i);

            final String firehoseStreamName = context.getProperty(KINESIS_FIREHOSE_DELIVERY_STREAM_NAME)
                    .evaluateAttributeExpressions(flowFile).getValue();

            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
            session.exportTo(flowFile, baos);

            if (recordHash.containsKey(firehoseStreamName) == false) {
                recordHash.put(firehoseStreamName, new ArrayList<>());
            }

            if (hashFlowFiles.containsKey(firehoseStreamName) == false) {
                hashFlowFiles.put(firehoseStreamName, new ArrayList<>());
            }

            hashFlowFiles.get(firehoseStreamName).add(flowFile);
            recordHash.get(firehoseStreamName).add(new Record().withData(ByteBuffer.wrap(baos.toByteArray())));
        }

        for (Map.Entry<String, List<Record>> entryRecord : recordHash.entrySet()) {
            String streamName = entryRecord.getKey();
            List<Record> records = entryRecord.getValue();

            if (records.size() > 0) {
                // Send the batch
                PutRecordBatchRequest putRecordBatchRequest = new PutRecordBatchRequest();
                putRecordBatchRequest.setDeliveryStreamName(streamName);
                putRecordBatchRequest.setRecords(records);
                PutRecordBatchResult results = client.putRecordBatch(putRecordBatchRequest);

                // Separate out the successful and failed flow files
                List<PutRecordBatchResponseEntry> responseEntries = results.getRequestResponses();
                for (int i = 0; i < responseEntries.size(); i++) {

                    PutRecordBatchResponseEntry entry = responseEntries.get(i);
                    FlowFile flowFile = hashFlowFiles.get(streamName).get(i);

                    Map<String, String> attributes = new HashMap<>();
                    attributes.put(AWS_KINESIS_FIREHOSE_RECORD_ID, entry.getRecordId());
                    flowFile = session.putAttribute(flowFile, AWS_KINESIS_FIREHOSE_RECORD_ID,
                            entry.getRecordId());
                    if (StringUtils.isBlank(entry.getErrorCode()) == false) {
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_CODE, entry.getErrorCode());
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_MESSAGE, entry.getErrorMessage());
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        failedFlowFiles.add(flowFile);
                    } else {
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        successfulFlowFiles.add(flowFile);
                    }
                }
                recordHash.get(streamName).clear();
                records.clear();
            }
        }

        if (failedFlowFiles.size() > 0) {
            session.transfer(failedFlowFiles, REL_FAILURE);
            getLogger().error("Failed to publish to kinesis firehose {}", new Object[] { failedFlowFiles });
        }
        if (successfulFlowFiles.size() > 0) {
            session.transfer(successfulFlowFiles, REL_SUCCESS);
            getLogger().info("Successfully published to kinesis firehose {}",
                    new Object[] { successfulFlowFiles });
        }

    } catch (final Exception exception) {
        getLogger().error("Failed to publish to kinesis firehose {} with exception {}",
                new Object[] { flowFiles, exception });
        session.transfer(flowFiles, REL_FAILURE);
        context.yield();
    }
}