Example usage for com.amazonaws.services.kinesis.model PutRecordsResultEntry getErrorMessage

List of usage examples for com.amazonaws.services.kinesis.model PutRecordsResultEntry getErrorMessage

Introduction

In this page you can find the example usage for com.amazonaws.services.kinesis.model PutRecordsResultEntry getErrorMessage.

Prototype


public String getErrorMessage() 

Source Link

Document

The error message for an individual record result.

Usage

From source file:com.amazon.kinesis.streaming.agent.tailing.KinesisSender.java

License:Open Source License

@Override
protected BufferSendResult<KinesisRecord> attemptSend(RecordBuffer<KinesisRecord> buffer) {
    activePutRecordsCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "KinesisStream:" + getDestination());
    try {//from w  w  w  . j  av  a2  s. c  om
        BufferSendResult<KinesisRecord> sendResult = null;
        List<PutRecordsRequestEntry> requestRecords = new ArrayList<>();
        for (KinesisRecord data : buffer) {
            PutRecordsRequestEntry record = new PutRecordsRequestEntry();
            record.setData(data.data());
            record.setPartitionKey(data.partitionKey());
            requestRecords.add(record);
        }
        PutRecordsRequest request = new PutRecordsRequest();
        request.setStreamName(getDestination());
        request.setRecords(requestRecords);
        PutRecordsResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalPutRecordsCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to kinesis stream {}...", flow.getId(), buffer,
                    getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getKinesisClient().putRecords(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalPutRecordsLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (final PutRecordsResultEntry responseEntry : result.getRecords()) {
                final PutRecordsRequestEntry record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent to kinesis stream {}: {}. Failed records: {}", flow.getId(),
                    buffer, getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from kinesis stream {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activePutRecordsCalls.decrementAndGet();
    }
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.KinesisTarget.java

License:Apache License

private void processBulkPut(List<Record> records) throws StageException {
    PutRecordsRequest request = new PutRecordsRequest();
    request.setStreamName(streamName);/*www  .  j a va  2  s  . co  m*/

    List<PutRecordsRequestEntry> requestEntries = new ArrayList<>();

    int i = 0;
    for (Record record : records) {
        final PutRecordsRequestEntry entry = new PutRecordsRequestEntry();

        ByteArrayOutputStream bytes = new ByteArrayOutputStream(1024 * records.size());
        try {
            DataGenerator generator = generatorFactory.getGenerator(bytes);
            generator.write(record);
            generator.close();

            entry.setData(ByteBuffer.wrap(bytes.toByteArray()));
            entry.setPartitionKey(getPartitionKey(i));

            requestEntries.add(entry);
            ++i;
        } catch (IOException e) {
            handleFailedRecord(record, "Failed to serialize record");
        }
    }

    request.setRecords(requestEntries);
    try {
        PutRecordsResult result = kinesisClient.putRecords(request);

        final Integer failedRecordCount = result.getFailedRecordCount();
        if (failedRecordCount > 0) {
            List<PutRecordsResultEntry> resultEntries = result.getRecords();
            i = 0;
            for (PutRecordsResultEntry resultEntry : resultEntries) {
                final String errorCode = resultEntry.getErrorCode();
                if (null != errorCode) {
                    switch (errorCode) {
                    case "ProvisionedThroughputExceededException":
                    case "InternalFailure":
                        // Records are processed in the order you submit them,
                        // so this will align with the initial record batch
                        handleFailedRecord(records.get(i), errorCode + ":" + resultEntry.getErrorMessage());
                        break;
                    default:
                        validateSuccessfulRecord(records.get(i), resultEntry);
                        break;
                    }
                } else {
                    validateSuccessfulRecord(records.get(i), resultEntry);
                }
                ++i;
            }
        }
    } catch (AmazonClientException e) {
        // Unrecoverable exception -- invalidate the entire batch
        LOG.debug("Exception while putting records", e);
        for (Record record : records) {
            handleFailedRecord(record, "Batch failed due to Amazon service exception: " + e.getMessage());
        }
    }
}

From source file:org.apache.nifi.processors.aws.kinesis.stream.PutKinesisStream.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B)
            .longValue();//from   w ww . j  ava2 s . c o m

    List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes,
            AWS_KINESIS_ERROR_MESSAGE);

    HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
    HashMap<String, List<PutRecordsRequestEntry>> recordHash = new HashMap<String, List<PutRecordsRequestEntry>>();

    final AmazonKinesisClient client = getClient();

    try {

        List<FlowFile> failedFlowFiles = new ArrayList<>();
        List<FlowFile> successfulFlowFiles = new ArrayList<>();

        // Prepare batch of records
        for (int i = 0; i < flowFiles.size(); i++) {
            FlowFile flowFile = flowFiles.get(i);

            String streamName = context.getProperty(KINESIS_STREAM_NAME).evaluateAttributeExpressions(flowFile)
                    .getValue();
            ;

            final ByteArrayOutputStream baos = new ByteArrayOutputStream();
            session.exportTo(flowFile, baos);
            PutRecordsRequestEntry record = new PutRecordsRequestEntry()
                    .withData(ByteBuffer.wrap(baos.toByteArray()));

            String partitionKey = context.getProperty(PutKinesisStream.KINESIS_PARTITION_KEY)
                    .evaluateAttributeExpressions(flowFiles.get(i)).getValue();

            if (StringUtils.isBlank(partitionKey) == false) {
                record.setPartitionKey(partitionKey);
            } else {
                record.setPartitionKey(Integer.toString(randomParitionKeyGenerator.nextInt()));
            }

            if (recordHash.containsKey(streamName) == false) {
                recordHash.put(streamName, new ArrayList<>());
            }
            if (hashFlowFiles.containsKey(streamName) == false) {
                hashFlowFiles.put(streamName, new ArrayList<>());
            }

            hashFlowFiles.get(streamName).add(flowFile);
            recordHash.get(streamName).add(record);
        }

        for (Map.Entry<String, List<PutRecordsRequestEntry>> entryRecord : recordHash.entrySet()) {
            String streamName = entryRecord.getKey();
            List<PutRecordsRequestEntry> records = entryRecord.getValue();

            if (records.size() > 0) {

                PutRecordsRequest putRecordRequest = new PutRecordsRequest();
                putRecordRequest.setStreamName(streamName);
                putRecordRequest.setRecords(records);
                PutRecordsResult results = client.putRecords(putRecordRequest);

                List<PutRecordsResultEntry> responseEntries = results.getRecords();
                for (int i = 0; i < responseEntries.size(); i++) {
                    PutRecordsResultEntry entry = responseEntries.get(i);
                    FlowFile flowFile = hashFlowFiles.get(streamName).get(i);

                    Map<String, String> attributes = new HashMap<>();
                    attributes.put(AWS_KINESIS_SHARD_ID, entry.getShardId());
                    attributes.put(AWS_KINESIS_SEQUENCE_NUMBER, entry.getSequenceNumber());

                    if (StringUtils.isBlank(entry.getErrorCode()) == false) {
                        attributes.put(AWS_KINESIS_ERROR_CODE, entry.getErrorCode());
                        attributes.put(AWS_KINESIS_ERROR_MESSAGE, entry.getErrorMessage());
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        failedFlowFiles.add(flowFile);
                    } else {
                        flowFile = session.putAllAttributes(flowFile, attributes);
                        successfulFlowFiles.add(flowFile);
                    }
                }
            }
            recordHash.get(streamName).clear();
            records.clear();
        }

        if (failedFlowFiles.size() > 0) {
            session.transfer(failedFlowFiles, REL_FAILURE);
            getLogger().error("Failed to publish to kinesis records {}", new Object[] { failedFlowFiles });
        }
        if (successfulFlowFiles.size() > 0) {
            session.transfer(successfulFlowFiles, REL_SUCCESS);
            getLogger().debug("Successfully published to kinesis records {}",
                    new Object[] { successfulFlowFiles });
        }

    } catch (final Exception exception) {
        getLogger().error("Failed to publish due to exception {} flowfiles {} ",
                new Object[] { exception, flowFiles });
        session.transfer(flowFiles, REL_FAILURE);
        context.yield();
    }
}