Example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemRequest BatchWriteItemRequest

List of usage examples for com.amazonaws.services.dynamodbv2.model BatchWriteItemRequest BatchWriteItemRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.dynamodbv2.model BatchWriteItemRequest BatchWriteItemRequest.

Prototype

public BatchWriteItemRequest() 

Source Link

Document

Default constructor for BatchWriteItemRequest object.

Usage

From source file:amazon.dynamodb.config.DynamoDBManager.java

License:Open Source License

public BatchWritePointResult batchWritePoints(List<PutPointRequest> putPointRequests) {
    BatchWriteItemRequest batchItemRequest = new BatchWriteItemRequest();
    List<WriteRequest> writeRequests = new ArrayList<WriteRequest>();
    for (PutPointRequest putPointRequest : putPointRequests) {
        long geohash = S2Manager.generateGeohash(putPointRequest.getGeoPoint());
        long hashKey = S2Manager.generateHashKey(geohash, config.getHashKeyLength());
        String geoJson = GeoJsonMapper.stringFromGeoObject(putPointRequest.getGeoPoint());

        PutRequest putRequest = putPointRequest.getPutRequest();
        AttributeValue hashKeyValue = new AttributeValue().withN(String.valueOf(hashKey));
        putRequest.getItem().put(config.getHashKeyAttributeName(), hashKeyValue);
        putRequest.getItem().put(config.getRangeKeyAttributeName(), putPointRequest.getRangeKeyValue());
        AttributeValue geohashValue = new AttributeValue().withN(Long.toString(geohash));
        putRequest.getItem().put(config.getGeohashAttributeName(), geohashValue);
        AttributeValue geoJsonValue = new AttributeValue().withS(geoJson);
        putRequest.getItem().put(config.getGeoJsonAttributeName(), geoJsonValue);

        WriteRequest writeRequest = new WriteRequest(putRequest);
        writeRequests.add(writeRequest);
    }//  w w  w  . j ava  2 s . c o m
    Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>();
    requestItems.put(config.getTableName(), writeRequests);
    batchItemRequest.setRequestItems(requestItems);
    BatchWriteItemResult batchWriteItemResult = config.getDynamoDBClient().batchWriteItem(batchItemRequest);
    BatchWritePointResult batchWritePointResult = new BatchWritePointResult(batchWriteItemResult);
    return batchWritePointResult;
}

From source file:com.clicktravel.infrastructure.persistence.aws.dynamodb.DynamoDbTemplate.java

License:Apache License

/**
 * Turns the items into DynamoDb PutRequests to allow them to be batch written. These are then bound inside a
 * BatchWriteItemRequest which allows us to batch write them into DynamoDB. Any requests in the batch that fail to
 * write are removed from the results, then each successfully written Property has it's version set accordingly.
 * Maps are used to keep track of which versions belong to which items, and also a map to keep track of which
 * PutRequest object relates to which Item, so we can remove unsuccessful writes. This will throw an
 * IllegalArgumentException if the item being batch written has unique constraints. This method does not implement
 * row-level locking, you will need to implement your own locking to ensure consistency is achieved.
 *//*from  w w w. ja v  a  2s .c om*/
@Override
public <T extends Item> List<T> batchWrite(final List<T> items, final Class<T> itemClass)
        throws IllegalArgumentException, PersistenceResourceFailureException {
    final ItemConfiguration itemConfiguration = getItemConfiguration(itemClass);
    final List<T> itemsWritten = new ArrayList<T>();
    final Map<T, Long> itemVersions = new HashMap<T, Long>();
    final Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>();
    final Map<PutRequest, T> itemPutRequests = new HashMap<PutRequest, T>();

    if (!itemConfiguration.uniqueConstraints().isEmpty()) {
        throw new IllegalArgumentException("Cannot perform batch write for item of type" + itemClass);
    }

    createRequestItems(itemConfiguration, itemVersions, requestItems, items, itemPutRequests);

    final BatchWriteItemRequest itemRequest = new BatchWriteItemRequest().withRequestItems(requestItems);

    try {
        final BatchWriteItemResult itemResult = amazonDynamoDbClient.batchWriteItem(itemRequest);
        removeUnprocessedItems(itemsWritten, itemVersions, itemPutRequests, itemResult);
    } catch (final AmazonServiceException amazonServiceException) {
        throw new PersistenceResourceFailureException("Failed to do Dynamo DB batch write",
                amazonServiceException);
    }

    // any items that were successfully processed will need their versions setting.
    for (final T item : itemsWritten) {
        item.setVersion(itemVersions.get(item));
    }

    return itemsWritten;
}

From source file:com.erudika.para.persistence.AWSDynamoDAO.java

License:Apache License

private void batchWrite(Map<String, List<WriteRequest>> items) {
    if (items == null || items.isEmpty()) {
        return;/*from   ww  w. j a  v a  2s.c om*/
    }
    try {
        BatchWriteItemResult result = client().batchWriteItem(new BatchWriteItemRequest()
                .withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL).withRequestItems(items));
        if (result == null) {
            return;
        }
        logger.debug("batchWrite(): total {}, cc {}", items.size(), result.getConsumedCapacity());

        if (result.getUnprocessedItems() != null && !result.getUnprocessedItems().isEmpty()) {
            Thread.sleep(1000);
            logger.warn("UNPROCESSED {0}", result.getUnprocessedItems().size());
            batchWrite(result.getUnprocessedItems());
        }
    } catch (Exception e) {
        logger.error(null, e);
    }
}

From source file:com.intuit.tank.persistence.databases.AmazonDynamoDatabaseDocApi.java

License:Open Source License

/**
 * @param tableName//w  w w  .  j av a 2  s  . co m
 * @param requests
 */
private void sendBatch(final String tableName, List<WriteRequest> requests) {
    int numBatches = (int) Math.ceil(requests.size() / (BATCH_SIZE * 1D));
    for (int i = 0; i < numBatches; i++) {
        Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>();
        List<WriteRequest> batch = requests.subList(i * BATCH_SIZE,
                Math.min(i * BATCH_SIZE + BATCH_SIZE, requests.size()));
        requestItems.put(tableName, batch);
        addItemsToTable(tableName, new BatchWriteItemRequest().withRequestItems(requestItems));
    }
}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

License:Apache License

private void submitBatchWriteItemRequest() {
    long capacityConsumed = 0;

    List<WriteRequest> writeRequests = Lists.newArrayListWithCapacity(DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST);

    // fill up the queue (pass in the floor of current capacity to be conservative)
    long bytesToWrite = this.queue.drainTo(writeRequests, (long) this.currentWriteCapacity,
            (long) this.maxWriteCapacity);

    int numWriteRequests = writeRequests.size();
    // nothing to do
    if (numWriteRequests == 0) {
        return;/*from   w w w.jav  a2s .  com*/
    }

    // send the data over
    Map<String, List<WriteRequest>> unprocessedItems = new HashMap<String, List<WriteRequest>>(1);
    unprocessedItems.put(this.tableName, writeRequests);

    try {
        for (int currentRetry = 0; currentRetry < this.maxNumRetriesPerBatchWrite; currentRetry += 1) {

            if (currentRetry > 0) {
                reportCounter(DYNAMO_COUNTER_RETRIES, 1);
            }

            BatchWriteItemRequest request = new BatchWriteItemRequest().withRequestItems(unprocessedItems);
            BatchWriteItemResult result = this.dynamo.batchWriteItem(request);
            unprocessedItems = result.getUnprocessedItems();

            // track capacity used
            capacityConsumed += getConsumedCapacity(result);

            if (unprocessedItems.isEmpty()) {

                reportCounter(DYNAMO_COUNTER_CONSUMED_CAPACITY, capacityConsumed);
                reportCounter(DYNAMO_COUNTER_BYTES_WRITTEN, bytesToWrite);

                // reduce capacity
                this.currentWriteCapacity -= capacityConsumed;

                //log.debug("Successfully sent " + numWriteRequests +
                //        " records to dynamo, using write capacity: " + capacityConsumed +
                //        ", new available capacity: " + this.currentWriteCapacity);

                // success
                break;
            } else {
                long retryMs = getRetryMs(currentRetry);
                log.info("Pausing " + retryMs + " ms before retrying write for "
                        + unprocessedItems.get(this.tableName).size() + " items to Dynamo.  Retries so far: "
                        + currentRetry);
                try {
                    Thread.sleep(retryMs);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }
        }
        if (!unprocessedItems.isEmpty()) {
            throw new RuntimeException(
                    "Out of retries trying to add items to DynamoDB table. Unprocessed items: "
                            + unprocessedItems);
        }

    } catch (AmazonServiceException e) {
        log.error("Error during storing. E.g. validation error.", e);
    }

    // track bytes and records written
    reportCounter(DYNAMO_COUNTER_RECORDS_WRITTEN, numWriteRequests);
}

From source file:com.netflix.config.sources.DynamoDbIntegrationTestHelper.java

License:Apache License

static void addElements(AmazonDynamoDB dbClient, String tableName) {
    Map<String, List<WriteRequest>> requestMap = new HashMap<String, List<WriteRequest>>(1);
    List<WriteRequest> writeList = new ArrayList<WriteRequest>(3);

    Map<String, AttributeValue> item1 = new HashMap<String, AttributeValue>(1);
    item1.put(DynamoDbConfigurationSource.defaultKeyAttribute, new AttributeValue().withS("test1"));
    item1.put(DynamoDbConfigurationSource.defaultValueAttribute, new AttributeValue().withS("val1"));
    writeList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(item1)));

    HashMap<String, AttributeValue> item2 = new HashMap<String, AttributeValue>(1);
    item2.put(DynamoDbConfigurationSource.defaultKeyAttribute, new AttributeValue().withS("test2"));
    item2.put(DynamoDbConfigurationSource.defaultValueAttribute, new AttributeValue().withS("val2"));
    writeList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(item2)));

    HashMap<String, AttributeValue> item3 = new HashMap<String, AttributeValue>(1);
    item3.put(DynamoDbConfigurationSource.defaultKeyAttribute, new AttributeValue().withS("test3"));
    item3.put(DynamoDbConfigurationSource.defaultValueAttribute, new AttributeValue().withS("val3"));
    writeList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(item3)));

    requestMap.put(tableName, writeList);

    BatchWriteItemRequest request = new BatchWriteItemRequest().withRequestItems(requestMap);
    dbClient.batchWriteItem(request);/*  ww  w .j a  va  2  s .  c o  m*/
}

From source file:jp.xet.uncommons.spring.DynamoPersistentTokenRepository.java

License:Apache License

@Override
public void removeUserTokens(String username) {
    if (logger.isTraceEnabled()) {
        logger.trace("Remove token: username={}", username);
    }//from  w  w w. j  av a2s  . com

    try {
        Condition cond = new Condition().withComparisonOperator(ComparisonOperator.EQ)
                .withAttributeValueList(new AttributeValue(username));

        ScanRequest scanRequest = new ScanRequest().withTableName(persistentLoginTable)
                .withAttributesToGet(SERIES).withScanFilter(Collections.singletonMap(USERNAME, cond));
        ScanResult result = dynamoDb.scan(scanRequest);

        List<WriteRequest> writeRequests = Lists.newArrayListWithCapacity(result.getCount());
        for (Map<String, AttributeValue> item : result.getItems()) {
            DeleteRequest deleteRequest = new DeleteRequest()
                    .withKey(Collections.singletonMap(SERIES, item.get(SERIES)));
            writeRequests.add(new WriteRequest().withDeleteRequest(deleteRequest));
        }

        Map<String, List<WriteRequest>> requestItems = Maps.newHashMapWithExpectedSize(0);
        requestItems.put(persistentLoginTable, writeRequests);

        BatchWriteItemResult batchItemResult;
        do {
            BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
                    .withRequestItems(requestItems);
            batchItemResult = dynamoDb.batchWriteItem(batchWriteItemRequest);
            requestItems = batchItemResult.getUnprocessedItems();
            if (logger.isDebugEnabled()) {
                logger.debug("Token removed: {}", batchItemResult);
            }
        } while (batchItemResult.getUnprocessedItems().size() > 0);
    } catch (Exception e) {
        logger.error("unknown exception", e);
    }
}

From source file:org.apache.beam.sdk.io.aws.dynamodb.DynamoDBIOTestHelper.java

License:Apache License

static BatchWriteItemRequest generateBatchWriteItemRequest(String tableName, int numOfItems) {
    BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest();
    batchWriteItemRequest.addRequestItemsEntry(tableName, generateWriteRequests(numOfItems));
    return batchWriteItemRequest;
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

/**
 * @param roomNeeded number of bytes that writeBatch MUST make room for
 *///from w w  w .j  a  v  a2s  . c  om
private BatchWriteItemResult writeBatch(Reporter reporter, final int roomNeeded) {
    final BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
            .withRequestItems(writeBatchMap).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    RetryResult<BatchWriteItemResult> retryResult = getRetryDriver()
            .runWithRetry(new Callable<BatchWriteItemResult>() {
                @Override
                public BatchWriteItemResult call() throws UnsupportedEncodingException, InterruptedException {
                    pauseExponentially(batchWriteRetries);
                    BatchWriteItemResult result = dynamoDB.batchWriteItem(batchWriteItemRequest);

                    Map<String, List<WriteRequest>> unprocessedItems = result.getUnprocessedItems();
                    if (unprocessedItems == null || unprocessedItems.isEmpty()) {
                        batchWriteRetries = 0;
                    } else {
                        batchWriteRetries++;

                        int unprocessedItemCount = 0;
                        for (List<WriteRequest> unprocessedWriteRequests : unprocessedItems.values()) {
                            unprocessedItemCount += unprocessedWriteRequests.size();

                            int batchSizeBytes = 0;
                            for (WriteRequest request : unprocessedWriteRequests) {
                                batchSizeBytes += DynamoDBUtil
                                        .getItemSizeBytes(request.getPutRequest().getItem());
                            }

                            long maxItemsPerBatch = config.getLong(MAX_ITEMS_PER_BATCH,
                                    DEFAULT_MAX_ITEMS_PER_BATCH);
                            long maxBatchSize = config.getLong(MAX_BATCH_SIZE, DEFAULT_MAX_BATCH_SIZE);

                            if (unprocessedWriteRequests.size() >= maxItemsPerBatch
                                    || (maxBatchSize - batchSizeBytes) < roomNeeded) {
                                throw new AmazonClientException("Full list of write requests not processed");
                            }
                        }

                        double consumed = 0.0;
                        for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
                            consumed = consumedCapacity.getCapacityUnits();
                        }

                        int batchSize = 0;
                        for (List<WriteRequest> writeRequests : batchWriteItemRequest.getRequestItems()
                                .values()) {
                            batchSize += writeRequests.size();
                        }

                        log.debug("BatchWriteItem attempted " + batchSize + " items, consumed " + consumed + " "
                                + "wcu, left unprocessed " + unprocessedItemCount + " items," + " " + "now at "
                                + "" + batchWriteRetries + " retries");
                    }
                    return result;
                }
            }, reporter, PrintCounter.DynamoDBWriteThrottle);

    writeBatchMap.clear();
    writeBatchMapSizeBytes = 0;

    // If some items failed to go through, add them back to the writeBatchMap
    Map<String, List<WriteRequest>> unprocessedItems = retryResult.result.getUnprocessedItems();
    for (Entry<String, List<WriteRequest>> entry : unprocessedItems.entrySet()) {
        String key = entry.getKey();
        List<WriteRequest> requests = entry.getValue();
        for (WriteRequest request : requests) {
            writeBatchMapSizeBytes += DynamoDBUtil.getItemSizeBytes(request.getPutRequest().getItem());
        }
        writeBatchMap.put(key, requests);
    }
    return retryResult.result;
}

From source file:org.kairosdb.datastore.dynamodb.WriteBuffer.java

License:Apache License

@Override
public void run() {
    while (!m_exit) {
        try {//ww  w  . ja  v  a 2s . co  m
            Thread.sleep(m_writeDelay);
        } catch (InterruptedException ignored) {
        }

        LinkedList<WriteRequest> pending = null;

        if (m_bufferCount != 0) {
            m_mutatorLock.lock();
            try {
                m_writeStats.saveWriteSize(m_bufferCount);

                pending = m_writeBuffer;
                m_writeBuffer = new LinkedList<WriteRequest>();
                m_bufferCount = 0;
                m_lockCondition.signalAll();
            } finally {
                m_mutatorLock.unlock();
            }
        }

        try {
            if (pending != null)
                m_client.batchWriteItem(new BatchWriteItemRequest().addRequestItemsEntry(m_tableName, pending));

            pending = null;
        } catch (Exception e) {
            logger.error("Error sending data to DynamoDB", e);

            m_maxBufferSize = m_maxBufferSize * 3 / 4;

            logger.error("Reducing write buffer size to " + m_maxBufferSize + ".  "
                    + "You need to increase your dynamodb capacity or change the "
                    + "kairosdb.datastore.dynamodb.write_buffer_max_size property.");
        }

        // If the batch failed we will retry it without changing the buffer size.
        while (pending != null) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ignored) {
            }

            try {
                m_client.batchWriteItem(new BatchWriteItemRequest().addRequestItemsEntry(m_tableName, pending));
                pending = null;
            } catch (Exception e) {
                logger.error("Error resending data to DynamoDB", e);
            }
        }
    }
}