Example usage for com.amazonaws.services.dynamodbv2.model QueryRequest setExclusiveStartKey

List of usage examples for com.amazonaws.services.dynamodbv2.model QueryRequest setExclusiveStartKey

Introduction

In this page you can find the example usage for com.amazonaws.services.dynamodbv2.model QueryRequest setExclusiveStartKey.

Prototype


public void setExclusiveStartKey(java.util.Map<String, AttributeValue> exclusiveStartKey) 

Source Link

Document

The primary key of the first item that this operation will evaluate.

Usage

From source file:com.numenta.taurus.service.TaurusClient.java

License:Open Source License

/**
 * Get list of tweets for the given metric filtered by the given time range returning the
 * results as they become available asynchronously.
 *
 * @param metricName The metric name to retrieve the tweets from
 * @param from       The start time (aggregated) inclusive.
 * @param to         The end time (aggregated) inclusive.
 * @param callback   Callback for asynchronous call. It will be called on every {@link Tweet}
 *//*from   w ww. j a  va 2s .co m*/
public void getTweets(String metricName, Date from, Date to, DataCallback<Tweet> callback)
        throws GrokException, IOException {
    if (metricName == null) {
        throw new ObjectNotFoundException("Cannot get tweets without metric name");
    }

    final TaurusDataFactory dataFactory = TaurusApplication.getInstance().getDataFactory();
    final SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.US);
    timestampFormat.setTimeZone(TimeZone.getTimeZone("UTC"));

    // Key conditions
    Map<String, Condition> keyConditions = new HashMap<>();

    // uid = modelId
    Condition modelIdCond = new Condition().withComparisonOperator(ComparisonOperator.EQ)
            .withAttributeValueList(new AttributeValue(metricName));
    keyConditions.put("metric_name", modelIdCond);

    Condition timestampCondition;
    if (from != null && to != null) {
        // timestamp >= from and timestamp <=to
        timestampCondition = new Condition().withComparisonOperator(ComparisonOperator.BETWEEN)
                .withAttributeValueList(new AttributeValue().withS(timestampFormat.format(from)),
                        new AttributeValue().withS(timestampFormat.format(to)));
        keyConditions.put("agg_ts", timestampCondition);
    } else if (from != null) {
        // timestamp >= from
        timestampCondition = new Condition().withComparisonOperator(ComparisonOperator.GT)
                .withAttributeValueList(new AttributeValue().withS(timestampFormat.format(from)));
        keyConditions.put("agg_ts", timestampCondition);
    } else if (to != null) {
        // timestamp <= to
        timestampCondition = new Condition().withComparisonOperator(ComparisonOperator.LT)
                .withAttributeValueList(new AttributeValue().withS(timestampFormat.format(to)));
        keyConditions.put("agg_ts", timestampCondition);
    }

    // Prepare query request
    QueryRequest query = new QueryRequest().withTableName(TWEETS_TABLE)
            .withAttributesToGet("tweet_uid", "userid", "text", "username", "agg_ts", "created_at",
                    "retweet_count")
            .withKeyConditions(keyConditions).withScanIndexForward(false)
            .withIndexName("taurus.metric_data-metric_name_index");

    QueryResult result;
    String tweetId;
    String userId;
    String userName;
    String text;
    Date created;
    Date aggregated;
    AttributeValue retweet;
    int retweetCount;
    Map<String, AttributeValue> lastKey;
    try {
        do {
            // Get results
            result = _awsClient.query(query);
            for (Map<String, AttributeValue> item : result.getItems()) {
                tweetId = item.get("tweet_uid").getS();
                userId = item.get("userid").getS();
                text = item.get("text").getS();
                userName = item.get("username").getS();
                aggregated = DataUtils.parseGrokDate(item.get("agg_ts").getS());
                created = DataUtils.parseGrokDate(item.get("created_at").getS());

                // "retweet_count" is optional
                retweet = item.get("retweet_count");
                if (retweet != null && retweet.getN() != null) {
                    retweetCount = Integer.parseInt(retweet.getN());
                } else {
                    retweetCount = 0;
                }
                if (!callback.onData(dataFactory.createTweet(tweetId, aggregated, created, userId, userName,
                        text, retweetCount))) {
                    // Canceled by the user
                    break;
                }
            }
            // Make sure to get all pages
            lastKey = result.getLastEvaluatedKey();
            query.setExclusiveStartKey(lastKey);
        } while (lastKey != null);
    } catch (AmazonClientException e) {
        // Wraps Amazon's unchecked exception as IOException
        throw new IOException(e);
    }
}

From source file:com.numenta.taurus.service.TaurusClient.java

License:Open Source License

/**
 * Get Metric values only from DynamoDB/* w  w w . j  a v  a  2s. co m*/
 *
 * @param modelId   The model to get the data from
 * @param from      The starting timestamp
 * @param to        The ending timestamp
 * @param ascending Specifies ascending (true) or descending (false)
 * @param callback  User defined callback to receive data
 */
public void getMetricValues(@NonNull String modelId, @NonNull Date from, @NonNull Date to, boolean ascending,
        @NonNull MetricValuesCallback callback) throws GrokException, IOException {

    // Get metric from cache
    ConcurrentSkipListMap<Long, CachedMetricValue> cache = _cachedMetricValues.get(modelId);
    if (cache == null) {
        cache = new ConcurrentSkipListMap<>();
        ConcurrentSkipListMap<Long, CachedMetricValue> oldValues = _cachedMetricValues.putIfAbsent(modelId,
                cache);
        if (oldValues != null) {
            // Found old cached values
            cache = oldValues;
        }
    }

    // Try to get metric values from cache
    ConcurrentNavigableMap<Long, CachedMetricValue> cached = cache.subMap(from.getTime(), true, to.getTime(),
            true);
    if (!cached.isEmpty()) {
        Log.d(TAG, "from=" + from.getTime() + ", firstKey=" + cache.firstKey());
        Log.d(TAG, "to=" + to.getTime() + ", lastKey=" + cache.lastKey());
        // Check if we found the values in the cache
        if (!cached.isEmpty()) {
            // Return cached values sorted based on "ascending" order
            Set<Map.Entry<Long, CachedMetricValue>> values;
            if (ascending) {
                values = cached.entrySet();
            } else {
                values = cached.descendingMap().entrySet();
            }
            for (Map.Entry<Long, CachedMetricValue> metricValue : values) {
                if (!callback.onData(modelId, metricValue.getKey(), metricValue.getValue().value,
                        metricValue.getValue().anomaly)) {
                    // Canceled by the user
                    break;
                }
            }
            return;
        }
    }
    final SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.US);
    timestampFormat.setTimeZone(TimeZone.getTimeZone("UTC"));

    // Key conditions
    Map<String, Condition> keyConditions = new HashMap<>();

    // uid = modelId
    keyConditions.put("uid", new Condition().withComparisonOperator(ComparisonOperator.EQ)
            .withAttributeValueList(new AttributeValue(modelId)));

    // timestamp >= from and timestamp <=to
    Condition timestampCondition = new Condition().withComparisonOperator(ComparisonOperator.BETWEEN);
    if (from.compareTo(to) <= 0) {
        timestampCondition.withAttributeValueList(new AttributeValue().withS(timestampFormat.format(from)),
                new AttributeValue().withS(timestampFormat.format(to)));
    } else {
        // FIXME This should not happen.
        Log.e(TAG, "TaurusClient#getMetricValues: 'from date' should not be greater than 'to date");
        timestampCondition.withAttributeValueList(new AttributeValue().withS(timestampFormat.format(to)),
                new AttributeValue().withS(timestampFormat.format(from)));

    }
    keyConditions.put("timestamp", timestampCondition);

    // Prepare query request
    QueryRequest query = new QueryRequest().withTableName(METRIC_DATA_TABLE)
            .withAttributesToGet("timestamp", "metric_value", "anomaly_score").withKeyConditions(keyConditions)
            .withScanIndexForward(ascending);

    QueryResult result;
    Map<String, AttributeValue> lastKey;
    try {
        do {
            long timestamp;
            // Get results
            result = _awsClient.query(query);
            for (Map<String, AttributeValue> item : result.getItems()) {
                CachedMetricValue metricValue = new CachedMetricValue();
                timestamp = DataUtils.parseGrokDate(item.get("timestamp").getS()).getTime();
                metricValue.value = Float.parseFloat(item.get("metric_value").getN());
                metricValue.anomaly = Float.parseFloat(item.get("anomaly_score").getN());
                cache.put(timestamp, metricValue);
                if (!callback.onData(modelId, timestamp, metricValue.value, metricValue.anomaly)) {
                    // Canceled by the user
                    break;
                }
            }
            // Make sure to get all pages
            lastKey = result.getLastEvaluatedKey();
            query.setExclusiveStartKey(lastKey);
        } while (lastKey != null);
    } catch (AmazonClientException e) {
        // Wraps Amazon's unchecked exception as IOException
        throw new IOException(e);
    }
}

From source file:com.numenta.taurus.service.TaurusClient.java

License:Open Source License

/**
 * Get hourly aggregated data for all instances for a single day for the given time range
 *
 * @param date      The date to get the data from
 * @param fromHour  The start hour// ww w .ja  v  a 2s  . c o  m
 * @param toHour    The end hour
 * @param ascending Specifies ascending (true) or descending (false)
 * @param callback  User defined callback to receive instance data
 */
public void getAllInstanceDataForDate(@NonNull Date date, int fromHour, int toHour, boolean ascending,
        @NonNull DataCallback<InstanceData> callback) throws GrokException, IOException {

    Map<String, Condition> keyConditions = new HashMap<>();

    // Use "date" as hash key
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd", Locale.US);
    dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    keyConditions.put("date", new Condition().withComparisonOperator(ComparisonOperator.EQ)
            .withAttributeValueList(new AttributeValue(dateFormat.format(date))));

    String start = fromHour > 9 ? Integer.toString(fromHour) : "0" + fromHour;
    if (fromHour == toHour) {
        // One single hour
        keyConditions.put("hour", new Condition().withComparisonOperator(ComparisonOperator.EQ)
                .withAttributeValueList(new AttributeValue(start)));
    } else {
        // Use "hour" as range key
        String end = toHour > 9 ? Integer.toString(toHour) : "0" + toHour;
        keyConditions.put("hour", new Condition().withComparisonOperator(ComparisonOperator.BETWEEN)
                .withAttributeValueList(new AttributeValue(start), new AttributeValue(end)));
    }

    // Prepare query request
    QueryRequest query = new QueryRequest().withTableName(INSTANCE_DATA_HOURLY_TABLE)
            .withAttributesToGet("instance_id", "date_hour", "anomaly_score").withKeyConditions(keyConditions)
            .withScanIndexForward(ascending).withIndexName("taurus.instance_data_hourly-date_hour_index");

    Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
    QueryResult result;
    String instanceId;
    float anomalyScore;
    float score;
    Map<String, AttributeValue> scores;
    Map<String, AttributeValue> lastKey;
    Matcher match;
    EnumSet<MetricType> metricMask;
    TaurusDataFactory dataFactory = TaurusApplication.getInstance().getDataFactory();
    try {
        do {
            // Get data from DynamoDB
            result = _awsClient.query(query);
            for (Map<String, AttributeValue> item : result.getItems()) {
                // Convert "date_hour" to java milliseconds time
                match = DATE_HOUR_FORMAT_REGEX.matcher(item.get("date_hour").getS());
                if (match.matches()) {
                    calendar.clear();
                    calendar.set(Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)) - 1,
                            Integer.parseInt(match.group(3)), Integer.parseInt(match.group(4)), 0, 0);
                    instanceId = item.get("instance_id").getS();

                    // Get max anomaly scores
                    scores = item.get("anomaly_score").getM();
                    anomalyScore = 0;
                    double scaledScore;
                    metricMask = EnumSet.noneOf(MetricType.class);
                    for (Map.Entry<String, AttributeValue> entry : scores.entrySet()) {
                        score = Float.parseFloat(entry.getValue().getN());
                        scaledScore = DataUtils.logScale(Math.abs(score));
                        if (scaledScore >= TaurusApplication.getYellowBarFloor()) {
                            metricMask.add(MetricType.valueOf(entry.getKey()));
                        }
                        anomalyScore = Math.max(score, anomalyScore);
                    }

                    if (!callback.onData(dataFactory.createInstanceData(instanceId, AggregationType.Day,
                            calendar.getTimeInMillis(), anomalyScore, metricMask))) {
                        // Canceled by the user
                        break;
                    }
                }
            }
            // Make sure to get all pages
            lastKey = result.getLastEvaluatedKey();
            query.setExclusiveStartKey(lastKey);
        } while (lastKey != null);
    } catch (AmazonClientException e) {
        // Wraps Amazon's unchecked exception as IOException
        throw new IOException(e);
    }
}