Example usage for com.amazonaws.services.kinesisfirehose.model PutRecordBatchRequest PutRecordBatchRequest

List of usage examples for com.amazonaws.services.kinesisfirehose.model PutRecordBatchRequest PutRecordBatchRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesisfirehose.model PutRecordBatchRequest PutRecordBatchRequest.

Prototype

PutRecordBatchRequest

Source Link

Usage

From source file:AbstractAmazonKinesisFirehoseDelivery.java

License:Open Source License

/**
 * Method to perform PutRecordBatch operation with the given record list.
 *
 * @param recordList the collection of records
 * @return the output of PutRecordBatch/* w w w  . j  a v a 2  s .  co m*/
 */
private static PutRecordBatchResult putRecordBatch(List<Record> recordList) {
    PutRecordBatchRequest putRecordBatchRequest = new PutRecordBatchRequest();
    putRecordBatchRequest.setDeliveryStreamName(deliveryStreamName);
    putRecordBatchRequest.setRecords(recordList);

    // Put Record Batch records. Max No.Of Records we can put in a
    // single put record batch request is 500
    return firehoseClient.putRecordBatch(putRecordBatchRequest);
}

From source file:com.amazon.kinesis.streaming.agent.tailing.FirehoseSender.java

License:Open Source License

@Override
protected BufferSendResult<FirehoseRecord> attemptSend(RecordBuffer<FirehoseRecord> buffer) {
    activeBatchPutCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "DeliveryStream:" + getDestination());
    try {/*from   w  ww.j  av a  2 s.  c om*/
        BufferSendResult<FirehoseRecord> sendResult = null;
        List<Record> requestRecords = new ArrayList<>();
        for (FirehoseRecord data : buffer) {
            Record record = new Record();
            record.setData(data.data());
            requestRecords.add(record);
        }
        PutRecordBatchRequest request = new PutRecordBatchRequest();
        request.setRecords(requestRecords);
        request.setDeliveryStreamName(getDestination());
        PutRecordBatchResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalBatchPutCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to firehose {}...", flow.getId(), buffer, getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getFirehoseClient().putRecordBatch(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalBatchPutLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (PutRecordBatchResponseEntry responseEntry : result.getRequestResponses()) {
                Record record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent firehose {}: {}. Failed records: {}", flow.getId(), buffer,
                    getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from firehose {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activeBatchPutCalls.decrementAndGet();
    }
}

From source file:com.nextdoor.bender.ipc.firehose.FirehoseTransport.java

License:Apache License

public void sendBatch(TransportBuffer buffer) {
    FirehoseTransportBuffer tb = (FirehoseTransportBuffer) buffer;

    /*//from   w w  w.  j  a v a2s .  c om
     * Create batch put request with given records
     */
    PutRecordBatchRequest batch = new PutRecordBatchRequest().withDeliveryStreamName(this.deliveryStreamName)
            .withRecords(tb.getInternalBuffer());

    /*
     * Put recored
     */
    client.putRecordBatch(batch);
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.FirehoseTarget.java

License:Apache License

private void flush(List<com.amazonaws.services.kinesisfirehose.model.Record> records, List<Record> sdcRecords)
        throws StageException {

    if (records.isEmpty()) {
        return;/*from w w w.  ja v a 2 s .com*/
    }

    PutRecordBatchRequest batchRequest = new PutRecordBatchRequest().withDeliveryStreamName(conf.streamName)
            .withRecords(records);
    PutRecordBatchResult result = firehoseClient.putRecordBatch(batchRequest);
    int numFailed = result.getFailedPutCount();
    if (numFailed != 0) {
        List<PutRecordBatchResponseEntry> responses = result.getRequestResponses();
        for (int i = 0; i < responses.size(); i++) {
            PutRecordBatchResponseEntry response = responses.get(i);
            if (response.getErrorCode() != null) {
                errorRecordHandler.onError(new OnRecordErrorException(sdcRecords.get(i), Errors.KINESIS_05,
                        sdcRecords.get(i), response.getErrorMessage()));
            }
        }
    }

    recordCounter += records.size();

    records.clear();
    sdcRecords.clear();
}

From source file:com.tcl.gateway.firehose.log4j.FirehoseAppender.java

License:Open Source License

/**
 * This method is called whenever a logging happens via logger.log(..) API
 * calls. Implementation for this appender will take in log events instantly
 * as long as the buffer is not full (as per user configuration). This call
 * will block if internal buffer is full until internal threads create some
 * space by publishing some of the records.
 * /*from  ww  w . j  a v  a2  s  .com*/
 * If there is any error in parsing logevents, those logevents would be
 * dropped.
 */
@Override
public void append(LoggingEvent logEvent) {
    if (initializationFailed) {
        error("Check the configuration and whether the configured stream " + deliveryStreamName
                + " exists and is active. Failed to initialize kinesis log4j appender: " + name);
        return;
    }
    try {

        if (request == null) {
            request = new PutRecordBatchRequest();
        }

        String message = layout.format(logEvent);
        ByteBuffer data = ByteBuffer.wrap(message.getBytes(encoding));

        request.withRecords(new Record().withData(data));

        if (request.getRecords().size() >= batchSize) {
            firehoseClient.putRecordBatchAsync(request, asyncCallHander);
        }
    } catch (Exception e) {
        LOGGER.error("Failed to schedule log entry for publishing into Kinesis stream: " + deliveryStreamName);
        errorHandler.error(
                "Failed to schedule log entry for publishing into Kinesis stream: " + deliveryStreamName, e,
                ErrorCode.WRITE_FAILURE, logEvent);
    }
}

From source file:org.apache.nifi.processors.aws.kinesis.firehose.PutKinesisFirehose.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B)
            .longValue();/*from w  w w. jav  a2s.c o  m*/

    List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes,
            AWS_KINESIS_FIREHOSE_ERROR_MESSAGE);
    HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
    HashMap<String, List<Record>> recordHash = new HashMap<String, List<Record>>();

    final AmazonKinesisFirehoseClient client = getClient();

    try {
        List<FlowFile> failedFlowFiles = new ArrayList<>();
        List<FlowFile> successfulFlowFiles = new ArrayList<>();

        // Prepare batch of records
        for (int i = 0; i < flowFiles.size(); i++) {
            FlowFile flowFile = flowFiles.get(i);

            final String firehoseStreamName = context.getProperty(KINESIS_FIREHOSE_DELIVERY_STREAM_NAME)
                    .evaluateAttributeExpressions(flowFile).getValue();

            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
            session.exportTo(flowFile, baos);

            if (recordHash.containsKey(firehoseStreamName) == false) {
                recordHash.put(firehoseStreamName, new ArrayList<>());
            }

            if (hashFlowFiles.containsKey(firehoseStreamName) == false) {
                hashFlowFiles.put(firehoseStreamName, new ArrayList<>());
            }

            hashFlowFiles.get(firehoseStreamName).add(flowFile);
            recordHash.get(firehoseStreamName).add(new Record().withData(ByteBuffer.wrap(baos.toByteArray())));
        }

        for (Map.Entry<String, List<Record>> entryRecord : recordHash.entrySet()) {
            String streamName = entryRecord.getKey();
            List<Record> records = entryRecord.getValue();

            if (records.size() > 0) {
                // Send the batch
                PutRecordBatchRequest putRecordBatchRequest = new PutRecordBatchRequest();
                putRecordBatchRequest.setDeliveryStreamName(streamName);
                putRecordBatchRequest.setRecords(records);
                PutRecordBatchResult results = client.putRecordBatch(putRecordBatchRequest);

                // Separate out the successful and failed flow files
                List<PutRecordBatchResponseEntry> responseEntries = results.getRequestResponses();
                for (int i = 0; i < responseEntries.size(); i++) {

                    PutRecordBatchResponseEntry entry = responseEntries.get(i);
                    FlowFile flowFile = hashFlowFiles.get(streamName).get(i);

                    Map<String, String> attributes = new HashMap<>();
                    attributes.put(AWS_KINESIS_FIREHOSE_RECORD_ID, entry.getRecordId());
                    flowFile = session.putAttribute(flowFile, AWS_KINESIS_FIREHOSE_RECORD_ID,
                            entry.getRecordId());
                    if (StringUtils.isBlank(entry.getErrorCode()) == false) {
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_CODE, entry.getErrorCode());
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_MESSAGE, entry.getErrorMessage());
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        failedFlowFiles.add(flowFile);
                    } else {
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        successfulFlowFiles.add(flowFile);
                    }
                }
                recordHash.get(streamName).clear();
                records.clear();
            }
        }

        if (failedFlowFiles.size() > 0) {
            session.transfer(failedFlowFiles, REL_FAILURE);
            getLogger().error("Failed to publish to kinesis firehose {}", new Object[] { failedFlowFiles });
        }
        if (successfulFlowFiles.size() > 0) {
            session.transfer(successfulFlowFiles, REL_SUCCESS);
            getLogger().info("Successfully published to kinesis firehose {}",
                    new Object[] { successfulFlowFiles });
        }

    } catch (final Exception exception) {
        getLogger().error("Failed to publish to kinesis firehose {} with exception {}",
                new Object[] { flowFiles, exception });
        session.transfer(flowFiles, REL_FAILURE);
        context.yield();
    }
}

From source file:org.voltdb.exportclient.FirehoseSink.java

License:Open Source License

ListenableFuture<?> asWriteTask(List<Record> recordsList) {
    final int hashed = ThreadLocalRandom.current().nextInt(m_concurrentWriters);
    if (m_executors.get(hashed).isShutdown()) {
        return Futures
                .immediateFailedFuture(new FirehoseExportException("Firehose sink executor is shut down"));
    }/*from   w ww  .ja va 2  s  .c  o  m*/
    return m_executors.get(hashed).submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            PutRecordBatchRequest batchRequest = new PutRecordBatchRequest()
                    .withDeliveryStreamName(m_streamName).withRecords(recordsList);
            applyBackPressure();
            PutRecordBatchResult res = m_client.putRecordBatch(batchRequest);
            if (res.getFailedPutCount() > 0) {
                setBackPressure(true);
                String msg = "%d Firehose records failed";
                LOG.warn(msg, res.getFailedPutCount());
                throw new FirehoseExportException(msg, res.getFailedPutCount());
            }
            setBackPressure(false);
            return null;
        }
    });
}

From source file:org.voltdb.exportclient.FirehoseSink.java

License:Open Source License

public void syncWrite(Queue<List<Record>> records) {

    for (List<Record> recordsList : records) {
        int retry = MAX_RETRY;
        while (retry > 0) {
            try {
                PutRecordBatchRequest batchRequest = new PutRecordBatchRequest()
                        .withDeliveryStreamName(m_streamName).withRecords(recordsList);
                PutRecordBatchResult res = m_client.putRecordBatch(batchRequest);
                if (res.getFailedPutCount() > 0) {
                    String msg = "Records failed with the batch: %d, retry: #%d";
                    if (retry == 1) {
                        throw new FirehoseExportException(msg, res.getFailedPutCount(),
                                (MAX_RETRY - retry + 1));
                    } else {
                        LOG.warn(msg, res.getFailedPutCount(), (MAX_RETRY - retry + 1));
                        backoffSleep(retry);
                    }//from   w  ww . j  av  a  2  s.  c om
                } else {
                    recordsList.clear();
                    break;
                }
            } catch (ServiceUnavailableException e) {
                if (retry == 1) {
                    throw new FirehoseExportException("Failed to send record batch", e, true);
                } else {
                    LOG.warn("Failed to send record batch: %s. Retry #%d", e.getErrorMessage(),
                            (MAX_RETRY - retry + 1));
                    backoffSleep(retry);
                }
            }
            retry--;
        }
    }
}