Example usage for com.amazonaws.services.dynamodbv2.model ConsumedCapacity getCapacityUnits

List of usage examples for com.amazonaws.services.dynamodbv2.model ConsumedCapacity getCapacityUnits

Introduction

In this page you can find the example usage for com.amazonaws.services.dynamodbv2.model ConsumedCapacity getCapacityUnits.

Prototype


public Double getCapacityUnits() 

Source Link

Document

The total number of capacity units consumed by the operation.

Usage

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDbDelegate.java

License:Open Source License

private void meterConsumedCapacity(final String apiName, final ConsumedCapacity ccu) {
    if (ccu != null) {
        getConsumedCapacityMeter(apiName, ccu.getTableName()).mark(Math.round(ccu.getCapacityUnits()));
    }/*from  www  .j  a  va2 s  .co m*/
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.QueryWorker.java

License:Open Source License

@Override
public QueryResultWrapper next() throws BackendException {
    final Query backoff = new ExponentialBackoff.Query(request, delegate, permitsToConsume);
    final QueryResult result = backoff.runWithBackoff();
    final ConsumedCapacity consumedCapacity = result.getConsumedCapacity();
    if (null != consumedCapacity) {
        permitsToConsume = Math.max((int) (consumedCapacity.getCapacityUnits() - 1.0), 1);
        totalCapacityUnits += consumedCapacity.getCapacityUnits();
    }/*from  w ww .  j  a  v  a  2 s.co m*/

    if (result.getLastEvaluatedKey() != null && !result.getLastEvaluatedKey().isEmpty()) {
        request.setExclusiveStartKey(result.getLastEvaluatedKey());
    } else {
        markComplete();
    }
    // a update returned count
    returnedCount += result.getCount();

    // b update scanned count
    scannedCount += result.getScannedCount();
    // c add scanned finalItemList
    finalItemList.addAll(result.getItems());
    return new QueryResultWrapper(titanKey, result);
}

From source file:com.intuit.tank.persistence.databases.AmazonDynamoDatabaseDocApi.java

License:Open Source License

private void addItemsToTable(String tableName, final BatchWriteItemRequest request) {

    boolean shouldRetry;
    int retries = 0;

    do {/*from w  w  w . j  av  a 2  s  .c om*/
        shouldRetry = false;
        try {
            BatchWriteItemResult result = dynamoDb.batchWriteItem(request);
            if (result != null) {
                try {
                    List<ConsumedCapacity> consumedCapacity = result.getConsumedCapacity();
                    for (ConsumedCapacity cap : consumedCapacity) {
                        logger.info(cap.getCapacityUnits());
                    }
                } catch (Exception e) {
                    // ignore this
                }
            }
        } catch (AmazonServiceException e) {
            if (e instanceof ProvisionedThroughputExceededException) {
                try {
                    DynamoDB db = new DynamoDB(dynamoDb);
                    Table table = db.getTable(tableName);
                    ProvisionedThroughputDescription oldThroughput = table.getDescription()
                            .getProvisionedThroughput();
                    logger.info("ProvisionedThroughputExceeded throughput = " + oldThroughput);
                    ProvisionedThroughput newThroughput = new ProvisionedThroughput()
                            .withReadCapacityUnits(
                                    table.getDescription().getProvisionedThroughput().getReadCapacityUnits())
                            .withWriteCapacityUnits(getIncreasedThroughput(
                                    table.getDescription().getProvisionedThroughput().getReadCapacityUnits()));
                    if (!oldThroughput.equals(newThroughput)) {
                        logger.info("Updating throughput to " + newThroughput);
                        table.updateTable(newThroughput);
                        table.waitForActive();
                    }
                } catch (Exception e1) {
                    logger.error("Error increasing capacity: " + e, e);
                }
            }
            int status = e.getStatusCode();
            if (status == HttpStatus.SC_INTERNAL_SERVER_ERROR || status == HttpStatus.SC_SERVICE_UNAVAILABLE) {
                shouldRetry = true;
                long delay = (long) (Math.random() * (Math.pow(4, retries++) * 100L));
                try {
                    Thread.sleep(delay);
                } catch (InterruptedException iex) {
                    logger.error("Caught InterruptedException exception", iex);
                }
            } else {
                logger.error("Error writing to DB: " + e.getMessage());
                throw new RuntimeException(e);
            }
        }
    } while (shouldRetry && retries < MAX_NUMBER_OF_RETRIES);

}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

License:Apache License

private long getConsumedCapacity(BatchWriteItemResult result) {
    double consumedCapacity = 0;
    List<ConsumedCapacity> consumedCapacityList = result.getConsumedCapacity();
    if (consumedCapacityList != null) {
        for (ConsumedCapacity capacity : consumedCapacityList) {
            consumedCapacity += capacity.getCapacityUnits();
        }//w ww . j  ava2 s  .  c om
    }
    return new Double(consumedCapacity).longValue();
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

License:Open Source License

public final void meterConsumedCapacity(String apiName, ConsumedCapacity ccu) {
    if (ccu != null) {
        getConsumedCapacityMeter(apiName, ccu.getTableName()).mark(Math.round(ccu.getCapacityUnits()));
    }/*from  www.j a v a  2s  . c o m*/
}

From source file:com.rapid7.diskstorage.dynamodb.QueryWorker.java

License:Open Source License

@Override
public QueryResultWrapper next() throws BackendException {
    ExponentialBackoff.Query backoff = new ExponentialBackoff.Query(request, delegate, permitsToConsume);
    QueryResult result = backoff.runWithBackoff();
    ConsumedCapacity consumedCapacity = result.getConsumedCapacity();
    if (null != consumedCapacity) {
        permitsToConsume = Math.max((int) (consumedCapacity.getCapacityUnits() - 1.0), 1);
        totalCapacityUnits += consumedCapacity.getCapacityUnits();
    }/*from   w ww. j a  v a 2s  .co m*/

    if (result.getLastEvaluatedKey() != null && !result.getLastEvaluatedKey().isEmpty()) {
        request.setExclusiveStartKey(result.getLastEvaluatedKey());
    } else {
        markComplete();
    }
    // a update returned count
    returnedCount += result.getCount();

    // b update scanned count
    scannedCount += result.getScannedCount();
    // c add scanned items
    items.addAll(result.getItems());
    return new QueryResultWrapper(titanKey, result);
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

/**
 * @param roomNeeded number of bytes that writeBatch MUST make room for
 *///from   w w w  .jav a2 s. c om
private BatchWriteItemResult writeBatch(Reporter reporter, final int roomNeeded) {
    final BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
            .withRequestItems(writeBatchMap).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    RetryResult<BatchWriteItemResult> retryResult = getRetryDriver()
            .runWithRetry(new Callable<BatchWriteItemResult>() {
                @Override
                public BatchWriteItemResult call() throws UnsupportedEncodingException, InterruptedException {
                    pauseExponentially(batchWriteRetries);
                    BatchWriteItemResult result = dynamoDB.batchWriteItem(batchWriteItemRequest);

                    Map<String, List<WriteRequest>> unprocessedItems = result.getUnprocessedItems();
                    if (unprocessedItems == null || unprocessedItems.isEmpty()) {
                        batchWriteRetries = 0;
                    } else {
                        batchWriteRetries++;

                        int unprocessedItemCount = 0;
                        for (List<WriteRequest> unprocessedWriteRequests : unprocessedItems.values()) {
                            unprocessedItemCount += unprocessedWriteRequests.size();

                            int batchSizeBytes = 0;
                            for (WriteRequest request : unprocessedWriteRequests) {
                                batchSizeBytes += DynamoDBUtil
                                        .getItemSizeBytes(request.getPutRequest().getItem());
                            }

                            long maxItemsPerBatch = config.getLong(MAX_ITEMS_PER_BATCH,
                                    DEFAULT_MAX_ITEMS_PER_BATCH);
                            long maxBatchSize = config.getLong(MAX_BATCH_SIZE, DEFAULT_MAX_BATCH_SIZE);

                            if (unprocessedWriteRequests.size() >= maxItemsPerBatch
                                    || (maxBatchSize - batchSizeBytes) < roomNeeded) {
                                throw new AmazonClientException("Full list of write requests not processed");
                            }
                        }

                        double consumed = 0.0;
                        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
                            consumed = consumedCapacity.getCapacityUnits();
                        }

                        int batchSize = 0;
                        for (List<WriteRequest> writeRequests : batchWriteItemRequest.getRequestItems()
                                .values()) {
                            batchSize += writeRequests.size();
                        }

                        log.debug("BatchWriteItem attempted " + batchSize + " items, consumed " + consumed + " "
                                + "wcu, left unprocessed " + unprocessedItemCount + " items," + " " + "now at "
                                + "" + batchWriteRetries + " retries");
                    }
                    return result;
                }
            }, reporter, PrintCounter.DynamoDBWriteThrottle);

    writeBatchMap.clear();
    writeBatchMapSizeBytes = 0;

    // If some items failed to go through, add them back to the writeBatchMap
    Map<String, List<WriteRequest>> unprocessedItems = retryResult.result.getUnprocessedItems();
    for (Entry<String, List<WriteRequest>> entry : unprocessedItems.entrySet()) {
        String key = entry.getKey();
        List<WriteRequest> requests = entry.getValue();
        for (WriteRequest request : requests) {
            writeBatchMapSizeBytes += DynamoDBUtil.getItemSizeBytes(request.getPutRequest().getItem());
        }
        writeBatchMap.put(key, requests);
    }
    return retryResult.result;
}

From source file:org.apache.hadoop.dynamodb.write.AbstractDynamoDBRecordWriter.java

License:Open Source License

@Override
public void write(K key, V value) throws IOException {
    if (value == null) {
        throw new RuntimeException("Null record encoutered. At least the key columns must be " + "specified.");
    }//  www.j ava2s. c  o m

    verifyInterval();
    if (progressable != null) {
        progressable.progress();
    }

    DynamoDBItemWritable item = convertValueToDynamoDBItem(key, value);
    BatchWriteItemResult result = client.putBatch(tableName, item.getItem(),
            permissibleWritesPerSecond - writesPerSecond, reporter);

    batchSize++;
    totolItemsWritten++;

    if (result != null) {
        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
            double consumedUnits = consumedCapacity.getCapacityUnits();
            totalIOPSConsumed += consumedUnits;
        }

        int unprocessedItems = 0;
        for (List<WriteRequest> requests : result.getUnprocessedItems().values()) {
            unprocessedItems += requests.size();
        }
        writesPerSecond += batchSize - unprocessedItems;
        batchSize = unprocessedItems;
    }
}

From source file:org.xmlsh.aws.util.AWSDDBCommand.java

License:BSD License

protected void writeConsumedCapacity(ConsumedCapacity consumedCapacity) throws XMLStreamException {
    if (consumedCapacity == null)
        return;/*w  w  w .  ja  va  2s . c  o  m*/
    startElement("consumed-capacity");
    attribute("total-units", consumedCapacity.getCapacityUnits());
    startElement("table");
    writeCapacity("table-name", consumedCapacity.getTableName(), consumedCapacity.getTable());
    endElement();
    startElement("local-secondary-indexes");
    for (Entry<String, Capacity> ce : consumedCapacity.getLocalSecondaryIndexes().entrySet())
        writeCapacity("index-name", ce.getKey(), ce.getValue());
    endElement();

    startElement("global-secondary-indexes");
    for (Entry<String, Capacity> ce : consumedCapacity.getGlobalSecondaryIndexes().entrySet())
        writeCapacity("index-name", ce.getKey(), ce.getValue());
    endElement();
    endElement();

}