Example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getConsumedCapacity

List of usage examples for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getConsumedCapacity

Introduction

In this page you can find the example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemResult getConsumedCapacity.

Prototype


public java.util.List<ConsumedCapacity> getConsumedCapacity() 

Source Link

Document

The capacity units consumed by the entire BatchWriteItem operation.

Usage

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDbDelegate.java

License:Open Source License

public BatchWriteItemResult batchWriteItem(final BatchWriteItemRequest batchRequest) throws BackendException {
    int count = 0;
    for (Entry<String, List<WriteRequest>> entry : batchRequest.getRequestItems().entrySet()) {
        final String tableName = entry.getKey();
        final List<WriteRequest> requests = entry.getValue();
        count += requests.size();/*from  ww  w .j  a v a  2  s  .  c  o  m*/
        if (count > BATCH_WRITE_MAX_NUMBER_OF_ITEMS) {
            throw new IllegalArgumentException("cant have more than 25 requests in a batchwrite");
        }
        for (final WriteRequest request : requests) {
            if ((request.getPutRequest() != null) == (request.getDeleteRequest() != null)) {
                throw new IllegalArgumentException(
                        "Exactly one of PutRequest or DeleteRequest must be set in each WriteRequest in a batch write operation");
            }
            final int wcu;
            final String apiName;
            if (request.getPutRequest() != null) {
                apiName = PUT_ITEM;
                final int bytes = calculateItemSizeInBytes(request.getPutRequest().getItem());
                wcu = computeWcu(bytes);
            } else { //deleterequest
                apiName = DELETE_ITEM;
                wcu = estimateCapacityUnits(apiName, tableName);
            }
            timedWriteThrottle(apiName, tableName, wcu);
        }
    }

    BatchWriteItemResult result;
    setUserAgent(batchRequest);
    final Timer.Context apiTimerContext = getTimerContext(BATCH_WRITE_ITEM, null /*tableName*/);
    try {
        result = client.batchWriteItem(batchRequest);
    } catch (Exception e) {
        throw processDynamoDbApiException(e, BATCH_WRITE_ITEM, null /*tableName*/);
    } finally {
        apiTimerContext.stop();
    }
    if (result.getConsumedCapacity() != null) {
        for (ConsumedCapacity ccu : result.getConsumedCapacity()) {
            meterConsumedCapacity(BATCH_WRITE_ITEM, ccu);
        }
    }
    return result;
}

From source file:com.erudika.para.persistence.AWSDynamoDAO.java

License:Apache License

private void batchWrite(Map<String, List<WriteRequest>> items) {
    if (items == null || items.isEmpty()) {
        return;//from  w  w w  .ja va 2  s .  co m
    }
    try {
        BatchWriteItemResult result = client().batchWriteItem(new BatchWriteItemRequest()
                .withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL).withRequestItems(items));
        if (result == null) {
            return;
        }
        logger.debug("batchWrite(): total {}, cc {}", items.size(), result.getConsumedCapacity());

        if (result.getUnprocessedItems() != null && !result.getUnprocessedItems().isEmpty()) {
            Thread.sleep(1000);
            logger.warn("UNPROCESSED {0}", result.getUnprocessedItems().size());
            batchWrite(result.getUnprocessedItems());
        }
    } catch (Exception e) {
        logger.error(null, e);
    }
}

From source file:com.intuit.tank.persistence.databases.AmazonDynamoDatabaseDocApi.java

License:Open Source License

private void addItemsToTable(String tableName, final BatchWriteItemRequest request) {

    boolean shouldRetry;
    int retries = 0;

    do {//from w  w  w .ja  v  a  2 s  .c  o  m
        shouldRetry = false;
        try {
            BatchWriteItemResult result = dynamoDb.batchWriteItem(request);
            if (result != null) {
                try {
                    List<ConsumedCapacity> consumedCapacity = result.getConsumedCapacity();
                    for (ConsumedCapacity cap : consumedCapacity) {
                        logger.info(cap.getCapacityUnits());
                    }
                } catch (Exception e) {
                    // ignore this
                }
            }
        } catch (AmazonServiceException e) {
            if (e instanceof ProvisionedThroughputExceededException) {
                try {
                    DynamoDB db = new DynamoDB(dynamoDb);
                    Table table = db.getTable(tableName);
                    ProvisionedThroughputDescription oldThroughput = table.getDescription()
                            .getProvisionedThroughput();
                    logger.info("ProvisionedThroughputExceeded throughput = " + oldThroughput);
                    ProvisionedThroughput newThroughput = new ProvisionedThroughput()
                            .withReadCapacityUnits(
                                    table.getDescription().getProvisionedThroughput().getReadCapacityUnits())
                            .withWriteCapacityUnits(getIncreasedThroughput(
                                    table.getDescription().getProvisionedThroughput().getReadCapacityUnits()));
                    if (!oldThroughput.equals(newThroughput)) {
                        logger.info("Updating throughput to " + newThroughput);
                        table.updateTable(newThroughput);
                        table.waitForActive();
                    }
                } catch (Exception e1) {
                    logger.error("Error increasing capacity: " + e, e);
                }
            }
            int status = e.getStatusCode();
            if (status == HttpStatus.SC_INTERNAL_SERVER_ERROR || status == HttpStatus.SC_SERVICE_UNAVAILABLE) {
                shouldRetry = true;
                long delay = (long) (Math.random() * (Math.pow(4, retries++) * 100L));
                try {
                    Thread.sleep(delay);
                } catch (InterruptedException iex) {
                    logger.error("Caught InterruptedException exception", iex);
                }
            } else {
                logger.error("Error writing to DB: " + e.getMessage());
                throw new RuntimeException(e);
            }
        }
    } while (shouldRetry && retries < MAX_NUMBER_OF_RETRIES);

}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

License:Apache License

private long getConsumedCapacity(BatchWriteItemResult result) {
    double consumedCapacity = 0;
    List<ConsumedCapacity> consumedCapacityList = result.getConsumedCapacity();
    if (consumedCapacityList != null) {
        for (ConsumedCapacity capacity : consumedCapacityList) {
            consumedCapacity += capacity.getCapacityUnits();
        }/*from   ww w. j a v a2  s  .c  o m*/
    }
    return new Double(consumedCapacity).longValue();
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

License:Open Source License

public BatchWriteItemResult batchWriteItem(BatchWriteItemRequest batchRequest) throws BackendException {
    int count = 0;
    for (Entry<String, java.util.List<WriteRequest>> entry : batchRequest.getRequestItems().entrySet()) {
        final String tableName = entry.getKey();
        final List<WriteRequest> requests = entry.getValue();
        count += requests.size();//from  w w  w . j a  va 2  s  . c o m
        if (count > 25) {
            throw new IllegalArgumentException("cant have more than 25 requests in a batchwrite");
        }
        for (WriteRequest request : requests) {
            if (!(request.getPutRequest() != null ^ request.getDeleteRequest() != null)) {
                throw new IllegalArgumentException(
                        "Exactly one of PutRequest or DeleteRequest must be set in each WriteRequest in a batch write operation");
            }
            final int wcu;
            final String apiName;
            if (request.getPutRequest() != null) {
                apiName = PUT_ITEM;
                final int bytes = calculateItemSizeInBytes(request.getPutRequest().getItem());
                wcu = computeWcu(bytes);
            } else { //deleterequest
                apiName = DELETE_ITEM;
                wcu = estimateCapacityUnits(apiName, tableName);
            }
            timedWriteThrottle(apiName, tableName, wcu);
        }
    }

    BatchWriteItemResult result;
    setUserAgent(batchRequest);
    final Timer.Context apiTimerContext = getTimerContext(BATCH_WRITE_ITEM, null /*tableName*/);
    try {
        result = client.batchWriteItem(batchRequest);
    } catch (Exception e) {
        throw processDynamoDBAPIException(e, BATCH_WRITE_ITEM, null /*tableName*/);
    } finally {
        apiTimerContext.stop();
    }
    if (result.getConsumedCapacity() != null) {
        for (ConsumedCapacity ccu : result.getConsumedCapacity()) {
            meterConsumedCapacity(BATCH_WRITE_ITEM, ccu);
        }
    }
    return result;
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

/**
 * @param roomNeeded number of bytes that writeBatch MUST make room for
 *//* w ww .  ja  v a2 s.  c om*/
private BatchWriteItemResult writeBatch(Reporter reporter, final int roomNeeded) {
    final BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
            .withRequestItems(writeBatchMap).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    RetryResult<BatchWriteItemResult> retryResult = getRetryDriver()
            .runWithRetry(new Callable<BatchWriteItemResult>() {
                @Override
                public BatchWriteItemResult call() throws UnsupportedEncodingException, InterruptedException {
                    pauseExponentially(batchWriteRetries);
                    BatchWriteItemResult result = dynamoDB.batchWriteItem(batchWriteItemRequest);

                    Map<String, List<WriteRequest>> unprocessedItems = result.getUnprocessedItems();
                    if (unprocessedItems == null || unprocessedItems.isEmpty()) {
                        batchWriteRetries = 0;
                    } else {
                        batchWriteRetries++;

                        int unprocessedItemCount = 0;
                        for (List<WriteRequest> unprocessedWriteRequests : unprocessedItems.values()) {
                            unprocessedItemCount += unprocessedWriteRequests.size();

                            int batchSizeBytes = 0;
                            for (WriteRequest request : unprocessedWriteRequests) {
                                batchSizeBytes += DynamoDBUtil
                                        .getItemSizeBytes(request.getPutRequest().getItem());
                            }

                            long maxItemsPerBatch = config.getLong(MAX_ITEMS_PER_BATCH,
                                    DEFAULT_MAX_ITEMS_PER_BATCH);
                            long maxBatchSize = config.getLong(MAX_BATCH_SIZE, DEFAULT_MAX_BATCH_SIZE);

                            if (unprocessedWriteRequests.size() >= maxItemsPerBatch
                                    || (maxBatchSize - batchSizeBytes) < roomNeeded) {
                                throw new AmazonClientException("Full list of write requests not processed");
                            }
                        }

                        double consumed = 0.0;
                        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
                            consumed = consumedCapacity.getCapacityUnits();
                        }

                        int batchSize = 0;
                        for (List<WriteRequest> writeRequests : batchWriteItemRequest.getRequestItems()
                                .values()) {
                            batchSize += writeRequests.size();
                        }

                        log.debug("BatchWriteItem attempted " + batchSize + " items, consumed " + consumed + " "
                                + "wcu, left unprocessed " + unprocessedItemCount + " items," + " " + "now at "
                                + "" + batchWriteRetries + " retries");
                    }
                    return result;
                }
            }, reporter, PrintCounter.DynamoDBWriteThrottle);

    writeBatchMap.clear();
    writeBatchMapSizeBytes = 0;

    // If some items failed to go through, add them back to the writeBatchMap
    Map<String, List<WriteRequest>> unprocessedItems = retryResult.result.getUnprocessedItems();
    for (Entry<String, List<WriteRequest>> entry : unprocessedItems.entrySet()) {
        String key = entry.getKey();
        List<WriteRequest> requests = entry.getValue();
        for (WriteRequest request : requests) {
            writeBatchMapSizeBytes += DynamoDBUtil.getItemSizeBytes(request.getPutRequest().getItem());
        }
        writeBatchMap.put(key, requests);
    }
    return retryResult.result;
}

From source file:org.apache.hadoop.dynamodb.write.AbstractDynamoDBRecordWriter.java

License:Open Source License

@Override
public void write(K key, V value) throws IOException {
    if (value == null) {
        throw new RuntimeException("Null record encoutered. At least the key columns must be " + "specified.");
    }//  w w w . jav  a2  s  .c o  m

    verifyInterval();
    if (progressable != null) {
        progressable.progress();
    }

    DynamoDBItemWritable item = convertValueToDynamoDBItem(key, value);
    BatchWriteItemResult result = client.putBatch(tableName, item.getItem(),
            permissibleWritesPerSecond - writesPerSecond, reporter);

    batchSize++;
    totolItemsWritten++;

    if (result != null) {
        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
            double consumedUnits = consumedCapacity.getCapacityUnits();
            totalIOPSConsumed += consumedUnits;
        }

        int unprocessedItems = 0;
        for (List<WriteRequest> requests : result.getUnprocessedItems().values()) {
            unprocessedItems += requests.size();
        }
        writesPerSecond += batchSize - unprocessedItems;
        batchSize = unprocessedItems;
    }
}