List of usage examples for com.amazonaws.services.dynamodbv2.model WriteRequest getPutRequest
public PutRequest getPutRequest()
A request to perform a PutItem
operation.
From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDbDelegate.java
License:Open Source License
public BatchWriteItemResult batchWriteItem(final BatchWriteItemRequest batchRequest) throws BackendException { int count = 0; for (Entry<String, List<WriteRequest>> entry : batchRequest.getRequestItems().entrySet()) { final String tableName = entry.getKey(); final List<WriteRequest> requests = entry.getValue(); count += requests.size();/* w ww .j a va 2s.c o m*/ if (count > BATCH_WRITE_MAX_NUMBER_OF_ITEMS) { throw new IllegalArgumentException("cant have more than 25 requests in a batchwrite"); } for (final WriteRequest request : requests) { if ((request.getPutRequest() != null) == (request.getDeleteRequest() != null)) { throw new IllegalArgumentException( "Exactly one of PutRequest or DeleteRequest must be set in each WriteRequest in a batch write operation"); } final int wcu; final String apiName; if (request.getPutRequest() != null) { apiName = PUT_ITEM; final int bytes = calculateItemSizeInBytes(request.getPutRequest().getItem()); wcu = computeWcu(bytes); } else { //deleterequest apiName = DELETE_ITEM; wcu = estimateCapacityUnits(apiName, tableName); } timedWriteThrottle(apiName, tableName, wcu); } } BatchWriteItemResult result; setUserAgent(batchRequest); final Timer.Context apiTimerContext = getTimerContext(BATCH_WRITE_ITEM, null /*tableName*/); try { result = client.batchWriteItem(batchRequest); } catch (Exception e) { throw processDynamoDbApiException(e, BATCH_WRITE_ITEM, null /*tableName*/); } finally { apiTimerContext.stop(); } if (result.getConsumedCapacity() != null) { for (ConsumedCapacity ccu : result.getConsumedCapacity()) { meterConsumedCapacity(BATCH_WRITE_ITEM, ccu); } } return result; }
From source file:com.clicktravel.infrastructure.persistence.aws.dynamodb.DynamoDbTemplate.java
License:Apache License
/** * This method removes items which did not process in the batch write. The results of the batch write tell us which * PutRequests were not processed, and from this we can use our maps to find out which items these belonged to. * These items can then be removed from the results and their versions won't be updated. * @param itemsWritten - the successfully written items * @param itemVersions - the map of version to items, so we know which versions belong to which items * @param itemPutRequests - the map of put requests to items, so we know which put request relates to which item * @param itemResult - the result of the batch write, we use this to get the unprocessed items *///from ww w.jav a 2 s . com private <T extends Item> void removeUnprocessedItems(final List<T> itemsWritten, final Map<T, Long> itemVersions, final Map<PutRequest, T> itemPutRequests, final BatchWriteItemResult itemResult) { if (itemResult != null && itemResult.getUnprocessedItems() != null) { for (final String tableName : itemResult.getUnprocessedItems().keySet()) { for (final WriteRequest writeRequest : itemResult.getUnprocessedItems().get(tableName)) { itemVersions.remove(itemPutRequests.get(writeRequest.getPutRequest())); itemPutRequests.remove(writeRequest.getPutRequest()); } } itemsWritten.addAll(itemPutRequests.values()); } }
From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java
License:Open Source License
public BatchWriteItemResult batchWriteItem(BatchWriteItemRequest batchRequest) throws BackendException { int count = 0; for (Entry<String, java.util.List<WriteRequest>> entry : batchRequest.getRequestItems().entrySet()) { final String tableName = entry.getKey(); final List<WriteRequest> requests = entry.getValue(); count += requests.size();//w w w .j a v a 2 s .co m if (count > 25) { throw new IllegalArgumentException("cant have more than 25 requests in a batchwrite"); } for (WriteRequest request : requests) { if (!(request.getPutRequest() != null ^ request.getDeleteRequest() != null)) { throw new IllegalArgumentException( "Exactly one of PutRequest or DeleteRequest must be set in each WriteRequest in a batch write operation"); } final int wcu; final String apiName; if (request.getPutRequest() != null) { apiName = PUT_ITEM; final int bytes = calculateItemSizeInBytes(request.getPutRequest().getItem()); wcu = computeWcu(bytes); } else { //deleterequest apiName = DELETE_ITEM; wcu = estimateCapacityUnits(apiName, tableName); } timedWriteThrottle(apiName, tableName, wcu); } } BatchWriteItemResult result; setUserAgent(batchRequest); final Timer.Context apiTimerContext = getTimerContext(BATCH_WRITE_ITEM, null /*tableName*/); try { result = client.batchWriteItem(batchRequest); } catch (Exception e) { throw processDynamoDBAPIException(e, BATCH_WRITE_ITEM, null /*tableName*/); } finally { apiTimerContext.stop(); } if (result.getConsumedCapacity() != null) { for (ConsumedCapacity ccu : result.getConsumedCapacity()) { meterConsumedCapacity(BATCH_WRITE_ITEM, ccu); } } return result; }
From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java
License:Open Source License
/** * @param roomNeeded number of bytes that writeBatch MUST make room for *///from w w w. ja va 2 s . c o m private BatchWriteItemResult writeBatch(Reporter reporter, final int roomNeeded) { final BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest() .withRequestItems(writeBatchMap).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL); RetryResult<BatchWriteItemResult> retryResult = getRetryDriver() .runWithRetry(new Callable<BatchWriteItemResult>() { @Override public BatchWriteItemResult call() throws UnsupportedEncodingException, InterruptedException { pauseExponentially(batchWriteRetries); BatchWriteItemResult result = dynamoDB.batchWriteItem(batchWriteItemRequest); Map<String, List<WriteRequest>> unprocessedItems = result.getUnprocessedItems(); if (unprocessedItems == null || unprocessedItems.isEmpty()) { batchWriteRetries = 0; } else { batchWriteRetries++; int unprocessedItemCount = 0; for (List<WriteRequest> unprocessedWriteRequests : unprocessedItems.values()) { unprocessedItemCount += unprocessedWriteRequests.size(); int batchSizeBytes = 0; for (WriteRequest request : unprocessedWriteRequests) { batchSizeBytes += DynamoDBUtil .getItemSizeBytes(request.getPutRequest().getItem()); } long maxItemsPerBatch = config.getLong(MAX_ITEMS_PER_BATCH, DEFAULT_MAX_ITEMS_PER_BATCH); long maxBatchSize = config.getLong(MAX_BATCH_SIZE, DEFAULT_MAX_BATCH_SIZE); if (unprocessedWriteRequests.size() >= maxItemsPerBatch || (maxBatchSize - batchSizeBytes) < roomNeeded) { throw new AmazonClientException("Full list of write requests not processed"); } } double consumed = 0.0; for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) { consumed = consumedCapacity.getCapacityUnits(); } int batchSize = 0; for (List<WriteRequest> writeRequests : batchWriteItemRequest.getRequestItems() .values()) { batchSize += writeRequests.size(); } log.debug("BatchWriteItem attempted " + batchSize + " items, consumed " + consumed + " " + "wcu, left unprocessed " + unprocessedItemCount + " items," + " " + "now at " + "" + batchWriteRetries + " retries"); } return result; } }, reporter, PrintCounter.DynamoDBWriteThrottle); writeBatchMap.clear(); writeBatchMapSizeBytes = 0; // If some items failed to go through, add them back to the writeBatchMap Map<String, List<WriteRequest>> unprocessedItems = retryResult.result.getUnprocessedItems(); for (Entry<String, List<WriteRequest>> entry : unprocessedItems.entrySet()) { String key = entry.getKey(); List<WriteRequest> requests = entry.getValue(); for (WriteRequest request : requests) { writeBatchMapSizeBytes += DynamoDBUtil.getItemSizeBytes(request.getPutRequest().getItem()); } writeBatchMap.put(key, requests); } return retryResult.result; }
From source file:org.apache.nifi.processors.aws.dynamodb.PutDynamoDB.java
License:Apache License
/** * {@inheritDoc}//from w ww.j a v a 2 s . c o m */ protected Map<String, AttributeValue> getRequestItem(WriteRequest writeRequest) { return writeRequest.getPutRequest().getItem(); }