Example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getUnprocessedItems

List of usage examples for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getUnprocessedItems

Introduction

In this page you can find the example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getUnprocessedItems.

Prototype


public java.util.Map<String, java.util.List<WriteRequest>> getUnprocessedItems() 

Source Link

Document

A map of tables and requests against those tables that were not processed.

Usage

From source file:com.clicktravel.infrastructure.persistence.aws.dynamodb.DynamoDbTemplate.java

License:Apache License

/**
 * This method removes items which did not process in the batch write. The results of the batch write tell us which
 * PutRequests were not processed, and from this we can use our maps to find out which items these belonged to.
 * These items can then be removed from the results and their versions won't be updated.
 * @param itemsWritten - the successfully written items
 * @param itemVersions - the map of version to items, so we know which versions belong to which items
 * @param itemPutRequests - the map of put requests to items, so we know which put request relates to which item
 * @param itemResult - the result of the batch write, we use this to get the unprocessed items
 */// w  w  w  .  ja v a2  s  . c o m
private <T extends Item> void removeUnprocessedItems(final List<T> itemsWritten,
        final Map<T, Long> itemVersions, final Map<PutRequest, T> itemPutRequests,
        final BatchWriteItemResult itemResult) {
    if (itemResult != null && itemResult.getUnprocessedItems() != null) {
        for (final String tableName : itemResult.getUnprocessedItems().keySet()) {
            for (final WriteRequest writeRequest : itemResult.getUnprocessedItems().get(tableName)) {
                itemVersions.remove(itemPutRequests.get(writeRequest.getPutRequest()));
                itemPutRequests.remove(writeRequest.getPutRequest());
            }
        }

        itemsWritten.addAll(itemPutRequests.values());
    }

}

From source file:com.dell.doradus.db.s3.DynamoDBService2.java

License:Apache License

private void commitPartial(List<WriteRequest> list) {
    Timer t = new Timer();
    Map<String, List<WriteRequest>> map = new HashMap<>();
    map.put(getTenant().getName(), list);
    BatchWriteItemResult result = m_client.batchWriteItem(new BatchWriteItemRequest(map));
    int retry = 0;
    while (result.getUnprocessedItems().size() > 0) {
        if (retry == RETRY_SLEEPS.length)
            throw new RuntimeException("All retries failed");
        m_logger.debug("Committing {} unprocessed items, retry: {}", result.getUnprocessedItems().size(),
                retry + 1);//ww w.  java2s  . c o m
        try {
            Thread.sleep(RETRY_SLEEPS[retry++]);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        result = m_client.batchWriteItem(new BatchWriteItemRequest(result.getUnprocessedItems()));
    }
    m_logger.debug("Committed {} writes in {}", list.size(), t);
    list.clear();
}

From source file:com.erudika.para.persistence.AWSDynamoDAO.java

License:Apache License

private void batchWrite(Map<String, List<WriteRequest>> items) {
    if (items == null || items.isEmpty()) {
        return;// w w  w . j  av a2  s .co m
    }
    try {
        BatchWriteItemResult result = client().batchWriteItem(new BatchWriteItemRequest()
                .withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL).withRequestItems(items));
        if (result == null) {
            return;
        }
        logger.debug("batchWrite(): total {}, cc {}", items.size(), result.getConsumedCapacity());

        if (result.getUnprocessedItems() != null && !result.getUnprocessedItems().isEmpty()) {
            Thread.sleep(1000);
            logger.warn("UNPROCESSED {0}", result.getUnprocessedItems().size());
            batchWrite(result.getUnprocessedItems());
        }
    } catch (Exception e) {
        logger.error(null, e);
    }
}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

License:Apache License

private void submitBatchWriteItemRequest() {
    long capacityConsumed = 0;

    List<WriteRequest> writeRequests = Lists.newArrayListWithCapacity(DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST);

    // fill up the queue (pass in the floor of current capacity to be conservative)
    long bytesToWrite = this.queue.drainTo(writeRequests, (long) this.currentWriteCapacity,
            (long) this.maxWriteCapacity);

    int numWriteRequests = writeRequests.size();
    // nothing to do
    if (numWriteRequests == 0) {
        return;/*from  w  w w  . j  av a  2 s . c  o m*/
    }

    // send the data over
    Map<String, List<WriteRequest>> unprocessedItems = new HashMap<String, List<WriteRequest>>(1);
    unprocessedItems.put(this.tableName, writeRequests);

    try {
        for (int currentRetry = 0; currentRetry < this.maxNumRetriesPerBatchWrite; currentRetry += 1) {

            if (currentRetry > 0) {
                reportCounter(DYNAMO_COUNTER_RETRIES, 1);
            }

            BatchWriteItemRequest request = new BatchWriteItemRequest().withRequestItems(unprocessedItems);
            BatchWriteItemResult result = this.dynamo.batchWriteItem(request);
            unprocessedItems = result.getUnprocessedItems();

            // track capacity used
            capacityConsumed += getConsumedCapacity(result);

            if (unprocessedItems.isEmpty()) {

                reportCounter(DYNAMO_COUNTER_CONSUMED_CAPACITY, capacityConsumed);
                reportCounter(DYNAMO_COUNTER_BYTES_WRITTEN, bytesToWrite);

                // reduce capacity
                this.currentWriteCapacity -= capacityConsumed;

                //log.debug("Successfully sent " + numWriteRequests +
                //        " records to dynamo, using write capacity: " + capacityConsumed +
                //        ", new available capacity: " + this.currentWriteCapacity);

                // success
                break;
            } else {
                long retryMs = getRetryMs(currentRetry);
                log.info("Pausing " + retryMs + " ms before retrying write for "
                        + unprocessedItems.get(this.tableName).size() + " items to Dynamo.  Retries so far: "
                        + currentRetry);
                try {
                    Thread.sleep(retryMs);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }
        }
        if (!unprocessedItems.isEmpty()) {
            throw new RuntimeException(
                    "Out of retries trying to add items to DynamoDB table. Unprocessed items: "
                            + unprocessedItems);
        }

    } catch (AmazonServiceException e) {
        log.error("Error during storing. E.g. validation error.", e);
    }

    // track bytes and records written
    reportCounter(DYNAMO_COUNTER_RECORDS_WRITTEN, numWriteRequests);
}

From source file:dynamok.sink.DynamoDbSinkTask.java

License:Apache License

@Override
public void put(Collection<SinkRecord> records) {
    if (records.isEmpty())
        return;//  ww  w  .  j a v a2 s . co  m

    try {
        if (records.size() == 1 || config.batchSize == 1) {
            for (final SinkRecord record : records) {
                client.putItem(tableName(record), toPutRequest(record).getItem());
            }
        } else {
            final Iterator<SinkRecord> recordIterator = records.iterator();
            while (recordIterator.hasNext()) {
                final Map<String, List<WriteRequest>> writesByTable = toWritesByTable(recordIterator);
                final BatchWriteItemResult batchWriteResponse = client
                        .batchWriteItem(new BatchWriteItemRequest(writesByTable));
                if (!batchWriteResponse.getUnprocessedItems().isEmpty()) {
                    throw new UnprocessedItemsException(batchWriteResponse.getUnprocessedItems());
                }
            }
        }
    } catch (LimitExceededException | ProvisionedThroughputExceededException e) {
        log.debug("Write failed with Limit/Throughput Exceeded exception; backing off");
        context.timeout(config.retryBackoffMs);
        throw new RetriableException(e);
    } catch (AmazonDynamoDBException | UnprocessedItemsException e) {
        log.warn("Write failed, remainingRetries={}", 0, remainingRetries, e);
        if (remainingRetries == 0) {
            throw new ConnectException(e);
        } else {
            remainingRetries--;
            context.timeout(config.retryBackoffMs);
            throw new RetriableException(e);
        }
    }

    remainingRetries = config.maxRetries;
}

From source file:jp.xet.uncommons.spring.DynamoPersistentTokenRepository.java

License:Apache License

@Override
public void removeUserTokens(String username) {
    if (logger.isTraceEnabled()) {
        logger.trace("Remove token: username={}", username);
    }//from   w  w w.j  a  va2 s  .  c o  m

    try {
        Condition cond = new Condition().withComparisonOperator(ComparisonOperator.EQ)
                .withAttributeValueList(new AttributeValue(username));

        ScanRequest scanRequest = new ScanRequest().withTableName(persistentLoginTable)
                .withAttributesToGet(SERIES).withScanFilter(Collections.singletonMap(USERNAME, cond));
        ScanResult result = dynamoDb.scan(scanRequest);

        List<WriteRequest> writeRequests = Lists.newArrayListWithCapacity(result.getCount());
        for (Map<String, AttributeValue> item : result.getItems()) {
            DeleteRequest deleteRequest = new DeleteRequest()
                    .withKey(Collections.singletonMap(SERIES, item.get(SERIES)));
            writeRequests.add(new WriteRequest().withDeleteRequest(deleteRequest));
        }

        Map<String, List<WriteRequest>> requestItems = Maps.newHashMapWithExpectedSize(0);
        requestItems.put(persistentLoginTable, writeRequests);

        BatchWriteItemResult batchItemResult;
        do {
            BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
                    .withRequestItems(requestItems);
            batchItemResult = dynamoDb.batchWriteItem(batchWriteItemRequest);
            requestItems = batchItemResult.getUnprocessedItems();
            if (logger.isDebugEnabled()) {
                logger.debug("Token removed: {}", batchItemResult);
            }
        } while (batchItemResult.getUnprocessedItems().size() > 0);
    } catch (Exception e) {
        logger.error("unknown exception", e);
    }
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

/**
 * @param roomNeeded number of bytes that writeBatch MUST make room for
 *//*from w  w w.ja v  a  2 s  . c om*/
private BatchWriteItemResult writeBatch(Reporter reporter, final int roomNeeded) {
    final BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
            .withRequestItems(writeBatchMap).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    RetryResult<BatchWriteItemResult> retryResult = getRetryDriver()
            .runWithRetry(new Callable<BatchWriteItemResult>() {
                @Override
                public BatchWriteItemResult call() throws UnsupportedEncodingException, InterruptedException {
                    pauseExponentially(batchWriteRetries);
                    BatchWriteItemResult result = dynamoDB.batchWriteItem(batchWriteItemRequest);

                    Map<String, List<WriteRequest>> unprocessedItems = result.getUnprocessedItems();
                    if (unprocessedItems == null || unprocessedItems.isEmpty()) {
                        batchWriteRetries = 0;
                    } else {
                        batchWriteRetries++;

                        int unprocessedItemCount = 0;
                        for (List<WriteRequest> unprocessedWriteRequests : unprocessedItems.values()) {
                            unprocessedItemCount += unprocessedWriteRequests.size();

                            int batchSizeBytes = 0;
                            for (WriteRequest request : unprocessedWriteRequests) {
                                batchSizeBytes += DynamoDBUtil
                                        .getItemSizeBytes(request.getPutRequest().getItem());
                            }

                            long maxItemsPerBatch = config.getLong(MAX_ITEMS_PER_BATCH,
                                    DEFAULT_MAX_ITEMS_PER_BATCH);
                            long maxBatchSize = config.getLong(MAX_BATCH_SIZE, DEFAULT_MAX_BATCH_SIZE);

                            if (unprocessedWriteRequests.size() >= maxItemsPerBatch
                                    || (maxBatchSize - batchSizeBytes) < roomNeeded) {
                                throw new AmazonClientException("Full list of write requests not processed");
                            }
                        }

                        double consumed = 0.0;
                        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
                            consumed = consumedCapacity.getCapacityUnits();
                        }

                        int batchSize = 0;
                        for (List<WriteRequest> writeRequests : batchWriteItemRequest.getRequestItems()
                                .values()) {
                            batchSize += writeRequests.size();
                        }

                        log.debug("BatchWriteItem attempted " + batchSize + " items, consumed " + consumed + " "
                                + "wcu, left unprocessed " + unprocessedItemCount + " items," + " " + "now at "
                                + "" + batchWriteRetries + " retries");
                    }
                    return result;
                }
            }, reporter, PrintCounter.DynamoDBWriteThrottle);

    writeBatchMap.clear();
    writeBatchMapSizeBytes = 0;

    // If some items failed to go through, add them back to the writeBatchMap
    Map<String, List<WriteRequest>> unprocessedItems = retryResult.result.getUnprocessedItems();
    for (Entry<String, List<WriteRequest>> entry : unprocessedItems.entrySet()) {
        String key = entry.getKey();
        List<WriteRequest> requests = entry.getValue();
        for (WriteRequest request : requests) {
            writeBatchMapSizeBytes += DynamoDBUtil.getItemSizeBytes(request.getPutRequest().getItem());
        }
        writeBatchMap.put(key, requests);
    }
    return retryResult.result;
}

From source file:org.apache.hadoop.dynamodb.write.AbstractDynamoDBRecordWriter.java

License:Open Source License

@Override
public void write(K key, V value) throws IOException {
    if (value == null) {
        throw new RuntimeException("Null record encoutered. At least the key columns must be " + "specified.");
    }/*from w ww  .ja v a2s.  c o  m*/

    verifyInterval();
    if (progressable != null) {
        progressable.progress();
    }

    DynamoDBItemWritable item = convertValueToDynamoDBItem(key, value);
    BatchWriteItemResult result = client.putBatch(tableName, item.getItem(),
            permissibleWritesPerSecond - writesPerSecond, reporter);

    batchSize++;
    totolItemsWritten++;

    if (result != null) {
        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
            double consumedUnits = consumedCapacity.getCapacityUnits();
            totalIOPSConsumed += consumedUnits;
        }

        int unprocessedItems = 0;
        for (List<WriteRequest> requests : result.getUnprocessedItems().values()) {
            unprocessedItems += requests.size();
        }
        writesPerSecond += batchSize - unprocessedItems;
        batchSize = unprocessedItems;
    }
}

From source file:org.apache.nifi.processors.aws.dynamodb.AbstractWriteDynamoDBProcessor.java

License:Apache License

/**
 * Helper method to handle unprocessed items items
 * @param session process session//from   www .ja  va2s . c  o  m
 * @param keysToFlowFileMap map of flow db primary key to flow file
 * @param table dynamodb table
 * @param hashKeyName the hash key name
 * @param hashKeyValueType the hash key value
 * @param rangeKeyName the range key name
 * @param rangeKeyValueType range key value
 * @param outcome the write outcome
 */
protected void handleUnprocessedItems(final ProcessSession session, Map<ItemKeys, FlowFile> keysToFlowFileMap,
        final String table, final String hashKeyName, final String hashKeyValueType, final String rangeKeyName,
        final String rangeKeyValueType, BatchWriteItemOutcome outcome) {
    BatchWriteItemResult result = outcome.getBatchWriteItemResult();

    // Handle unprocessed items
    List<WriteRequest> unprocessedItems = result.getUnprocessedItems().get(table);
    if (unprocessedItems != null && unprocessedItems.size() > 0) {
        for (WriteRequest request : unprocessedItems) {
            Map<String, AttributeValue> item = getRequestItem(request);
            Object hashKeyValue = getValue(item, hashKeyName, hashKeyValueType);
            Object rangeKeyValue = getValue(item, rangeKeyName, rangeKeyValueType);

            sendUnprocessedToUnprocessedRelationship(session, keysToFlowFileMap, hashKeyValue, rangeKeyValue);
        }
    }
}