Example usage for com.amazonaws.services.kinesisfirehose.model PutRecordBatchResponseEntry getErrorCode

List of usage examples for com.amazonaws.services.kinesisfirehose.model PutRecordBatchResponseEntry getErrorCode

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesisfirehose.model PutRecordBatchResponseEntry getErrorCode.

Prototype


public String getErrorCode() 

Source Link

Document

The error code for an individual record result.

Usage

From source file:com.amazon.kinesis.streaming.agent.tailing.FirehoseSender.java

License:Open Source License

@Override
protected BufferSendResult<FirehoseRecord> attemptSend(RecordBuffer<FirehoseRecord> buffer) {
    activeBatchPutCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "DeliveryStream:" + getDestination());
    try {//from w ww. j  a v  a 2 s.  c o m
        BufferSendResult<FirehoseRecord> sendResult = null;
        List<Record> requestRecords = new ArrayList<>();
        for (FirehoseRecord data : buffer) {
            Record record = new Record();
            record.setData(data.data());
            requestRecords.add(record);
        }
        PutRecordBatchRequest request = new PutRecordBatchRequest();
        request.setRecords(requestRecords);
        request.setDeliveryStreamName(getDestination());
        PutRecordBatchResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalBatchPutCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to firehose {}...", flow.getId(), buffer, getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getFirehoseClient().putRecordBatch(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalBatchPutLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (PutRecordBatchResponseEntry responseEntry : result.getRequestResponses()) {
                Record record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent firehose {}: {}. Failed records: {}", flow.getId(), buffer,
                    getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from firehose {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activeBatchPutCalls.decrementAndGet();
    }
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.FirehoseTarget.java

License:Apache License

private void flush(List<com.amazonaws.services.kinesisfirehose.model.Record> records, List<Record> sdcRecords)
        throws StageException {

    if (records.isEmpty()) {
        return;// ww w. j  a  v  a 2  s  .  co  m
    }

    PutRecordBatchRequest batchRequest = new PutRecordBatchRequest().withDeliveryStreamName(conf.streamName)
            .withRecords(records);
    PutRecordBatchResult result = firehoseClient.putRecordBatch(batchRequest);
    int numFailed = result.getFailedPutCount();
    if (numFailed != 0) {
        List<PutRecordBatchResponseEntry> responses = result.getRequestResponses();
        for (int i = 0; i < responses.size(); i++) {
            PutRecordBatchResponseEntry response = responses.get(i);
            if (response.getErrorCode() != null) {
                errorRecordHandler.onError(new OnRecordErrorException(sdcRecords.get(i), Errors.KINESIS_05,
                        sdcRecords.get(i), response.getErrorMessage()));
            }
        }
    }

    recordCounter += records.size();

    records.clear();
    sdcRecords.clear();
}

From source file:org.apache.nifi.processors.aws.kinesis.firehose.PutKinesisFirehose.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B)
            .longValue();//from   w  w w  . j av a  2 s.co m

    List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes,
            AWS_KINESIS_FIREHOSE_ERROR_MESSAGE);
    HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
    HashMap<String, List<Record>> recordHash = new HashMap<String, List<Record>>();

    final AmazonKinesisFirehoseClient client = getClient();

    try {
        List<FlowFile> failedFlowFiles = new ArrayList<>();
        List<FlowFile> successfulFlowFiles = new ArrayList<>();

        // Prepare batch of records
        for (int i = 0; i < flowFiles.size(); i++) {
            FlowFile flowFile = flowFiles.get(i);

            final String firehoseStreamName = context.getProperty(KINESIS_FIREHOSE_DELIVERY_STREAM_NAME)
                    .evaluateAttributeExpressions(flowFile).getValue();

            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
            session.exportTo(flowFile, baos);

            if (recordHash.containsKey(firehoseStreamName) == false) {
                recordHash.put(firehoseStreamName, new ArrayList<>());
            }

            if (hashFlowFiles.containsKey(firehoseStreamName) == false) {
                hashFlowFiles.put(firehoseStreamName, new ArrayList<>());
            }

            hashFlowFiles.get(firehoseStreamName).add(flowFile);
            recordHash.get(firehoseStreamName).add(new Record().withData(ByteBuffer.wrap(baos.toByteArray())));
        }

        for (Map.Entry<String, List<Record>> entryRecord : recordHash.entrySet()) {
            String streamName = entryRecord.getKey();
            List<Record> records = entryRecord.getValue();

            if (records.size() > 0) {
                // Send the batch
                PutRecordBatchRequest putRecordBatchRequest = new PutRecordBatchRequest();
                putRecordBatchRequest.setDeliveryStreamName(streamName);
                putRecordBatchRequest.setRecords(records);
                PutRecordBatchResult results = client.putRecordBatch(putRecordBatchRequest);

                // Separate out the successful and failed flow files
                List<PutRecordBatchResponseEntry> responseEntries = results.getRequestResponses();
                for (int i = 0; i < responseEntries.size(); i++) {

                    PutRecordBatchResponseEntry entry = responseEntries.get(i);
                    FlowFile flowFile = hashFlowFiles.get(streamName).get(i);

                    Map<String, String> attributes = new HashMap<>();
                    attributes.put(AWS_KINESIS_FIREHOSE_RECORD_ID, entry.getRecordId());
                    flowFile = session.putAttribute(flowFile, AWS_KINESIS_FIREHOSE_RECORD_ID,
                            entry.getRecordId());
                    if (StringUtils.isBlank(entry.getErrorCode()) == false) {
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_CODE, entry.getErrorCode());
                        attributes.put(AWS_KINESIS_FIREHOSE_ERROR_MESSAGE, entry.getErrorMessage());
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        failedFlowFiles.add(flowFile);
                    } else {
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        successfulFlowFiles.add(flowFile);
                    }
                }
                recordHash.get(streamName).clear();
                records.clear();
            }
        }

        if (failedFlowFiles.size() > 0) {
            session.transfer(failedFlowFiles, REL_FAILURE);
            getLogger().error("Failed to publish to kinesis firehose {}", new Object[] { failedFlowFiles });
        }
        if (successfulFlowFiles.size() > 0) {
            session.transfer(successfulFlowFiles, REL_SUCCESS);
            getLogger().info("Successfully published to kinesis firehose {}",
                    new Object[] { successfulFlowFiles });
        }

    } catch (final Exception exception) {
        getLogger().error("Failed to publish to kinesis firehose {} with exception {}",
                new Object[] { flowFiles, exception });
        session.transfer(flowFiles, REL_FAILURE);
        context.yield();
    }
}