Example usage for com.google.common.util.concurrent AtomicLongMap incrementAndGet

List of usage examples for com.google.common.util.concurrent AtomicLongMap incrementAndGet

Introduction

In this page you can find the example usage for com.google.common.util.concurrent AtomicLongMap incrementAndGet.

Prototype

public long incrementAndGet(K key) 

Source Link

Document

Increments by one the value currently associated with key , and returns the new value.

Usage

From source file:co.mitro.core.util.RpcLogReader.java

public static void main(String[] args) throws IOException {
    AtomicLongMap<String> counter = AtomicLongMap.<String>create();
    Map<String, Span> txnLength = new HashMap<>();
    Span duration = new Span();

    for (int i = 0; i < args.length; ++i) {
        String filename = args[i];
        System.err.println("Reading file: " + filename);
        JsonRecordReader rr = JsonRecordReader.MakeFromFilename(filename);
        JsonRecordReader.JsonLog log;/* www.jav  a  2s. co m*/
        try {
            while (null != (log = rr.readJson())) {
                counter.incrementAndGet(log.metadata.endpoint);
                duration.addTime(log.metadata.timestamp);
                if (log.metadata.endpoint.endsWith("BeignTransaction")
                        || log.payload.implicitBeginTransaction) {
                    txnLength.put((String) ((Map) log.metadata.response).get("transactionId"),
                            new Span(log.metadata.timestamp));
                } else if (!Strings.isNullOrEmpty(log.payload.transactionId)) {
                    txnLength.get(log.payload.transactionId).addTime(log.metadata.timestamp);
                }
            }
        } catch (EOFException e) {
            System.err.println("unexpected end of file; skipping");
        }
    }
    System.out.println("total duration: " + duration.duration());
    for (String k : counter.asMap().keySet()) {
        System.out.println(k + ": " + counter.get(k));
    }
    List<Long> times = new ArrayList<>();

    for (Span s : txnLength.values()) {
        times.add(s.duration());
    }
    Collections.sort(times);
    double meanTime = 0;
    for (Long l : times) {
        meanTime += l;
    }

    meanTime /= txnLength.size();
    double stdDev = 0;
    for (Long l : times) {
        stdDev += Math.pow((l - meanTime), 2);
    }
    stdDev /= txnLength.size();
    stdDev = Math.pow(stdDev, 0.5);

    // percentiles
    long PERCENTILES = 10;
    for (int i = 0; i <= PERCENTILES; i += 1) {
        System.out.println("percentile " + i * PERCENTILES + ": "
                + times.get((int) ((times.size() - 1) * i / PERCENTILES)));
    }

    System.out.println("write txns:");
    System.out.println("num: " + txnLength.size() + ", mean:" + meanTime + ", stddev:" + stdDev);

}

From source file:com.cloudera.oryx.app.mllib.rdf.RDFUpdate.java

/**
 * @param trainPointData data to run down trees
 * @param model random decision forest model to count on
 * @return maps of node IDs to the count of training examples that reached that node, one
 *  per tree in the model/*from   ww  w .j av  a2 s .c o  m*/
 * @see #predictorExampleCounts(JavaRDD,RandomForestModel)
 */
private static List<Map<Integer, Long>> treeNodeExampleCounts(JavaRDD<LabeledPoint> trainPointData,
        final RandomForestModel model) {
    List<AtomicLongMap<Integer>> maps = trainPointData
            .mapPartitions(new FlatMapFunction<Iterator<LabeledPoint>, List<AtomicLongMap<Integer>>>() {
                @Override
                public Iterable<List<AtomicLongMap<Integer>>> call(Iterator<LabeledPoint> data) {
                    DecisionTreeModel[] trees = model.trees();
                    int numTrees = trees.length;
                    List<AtomicLongMap<Integer>> treeNodeIDCounts = new ArrayList<>(numTrees);
                    for (int i = 0; i < numTrees; i++) {
                        treeNodeIDCounts.add(AtomicLongMap.<Integer>create());
                    }
                    while (data.hasNext()) {
                        LabeledPoint datum = data.next();
                        double[] featureVector = datum.features().toArray();
                        for (int i = 0; i < trees.length; i++) {
                            DecisionTreeModel tree = trees[i];
                            AtomicLongMap<Integer> nodeIDCount = treeNodeIDCounts.get(i);
                            org.apache.spark.mllib.tree.model.Node node = tree.topNode();
                            // This logic cloned from Node.predict:
                            while (!node.isLeaf()) {
                                // Count node ID
                                nodeIDCount.incrementAndGet(node.id());
                                Split split = node.split().get();
                                int featureIndex = split.feature();
                                node = nextNode(featureVector, node, split, featureIndex);
                            }
                            nodeIDCount.incrementAndGet(node.id());
                        }
                    }
                    return Collections.singleton(treeNodeIDCounts);
                }
            })
            .reduce(new Function2<List<AtomicLongMap<Integer>>, List<AtomicLongMap<Integer>>, List<AtomicLongMap<Integer>>>() {
                @Override
                public List<AtomicLongMap<Integer>> call(List<AtomicLongMap<Integer>> a,
                        List<AtomicLongMap<Integer>> b) {
                    Preconditions.checkArgument(a.size() == b.size());
                    for (int i = 0; i < a.size(); i++) {
                        merge(a.get(i), b.get(i));
                    }
                    return a;
                }
            });

    List<Map<Integer, Long>> result = new ArrayList<>(maps.size());
    for (AtomicLongMap<Integer> map : maps) {
        result.add(map.asMap());
    }
    return result;
}

From source file:com.cloudera.oryx.app.mllib.rdf.RDFUpdate.java

/**
 * @param trainPointData data to run down trees
 * @param model random decision forest model to count on
 * @return map of predictor index to the number of training examples that reached a
 *  node whose decision is based on that feature. The index is among predictors, not all
 *  features, since there are fewer predictors than features. That is, the index will
 *  match the one used in the {@link RandomForestModel}.
 *//*from  www . ja  va 2 s .c o m*/
private static Map<Integer, Long> predictorExampleCounts(JavaRDD<LabeledPoint> trainPointData,
        final RandomForestModel model) {
    return trainPointData.mapPartitions(new FlatMapFunction<Iterator<LabeledPoint>, AtomicLongMap<Integer>>() {
        @Override
        public Iterable<AtomicLongMap<Integer>> call(Iterator<LabeledPoint> data) {
            AtomicLongMap<Integer> featureIndexCount = AtomicLongMap.create();
            while (data.hasNext()) {
                LabeledPoint datum = data.next();
                double[] featureVector = datum.features().toArray();
                for (DecisionTreeModel tree : model.trees()) {
                    org.apache.spark.mllib.tree.model.Node node = tree.topNode();
                    // This logic cloned from Node.predict:
                    while (!node.isLeaf()) {
                        Split split = node.split().get();
                        int featureIndex = split.feature();
                        // Count feature
                        featureIndexCount.incrementAndGet(featureIndex);
                        node = nextNode(featureVector, node, split, featureIndex);
                    }
                }
            }
            return Collections.singleton(featureIndexCount);
        }
    }).reduce(new Function2<AtomicLongMap<Integer>, AtomicLongMap<Integer>, AtomicLongMap<Integer>>() {
        @Override
        public AtomicLongMap<Integer> call(AtomicLongMap<Integer> a, AtomicLongMap<Integer> b) {
            return merge(a, b);
        }
    }).asMap();
}