Example usage for com.google.common.collect Multiset entrySet

List of usage examples for com.google.common.collect Multiset entrySet

Introduction

In this page you can find the example usage for com.google.common.collect Multiset entrySet.

Prototype

Set<Entry<E>> entrySet();

Source Link

Document

Returns a view of the contents of this multiset, grouped into Multiset.Entry instances, each providing an element of the multiset and the count of that element.

Usage

From source file:com.dssmp.agent.tailing.FirehoseSender.java

@Override
protected BufferSendResult<FirehoseRecord> attemptSend(RecordBuffer<FirehoseRecord> buffer) {
    activeBatchPutCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "DeliveryStream:" + getDestination());
    try {/*from   w w w.  j  a v a 2  s.c o m*/
        BufferSendResult<FirehoseRecord> sendResult = null;
        List<Record> requestRecords = new ArrayList<>();
        for (FirehoseRecord data : buffer) {
            Record record = new Record();
            record.setData(data.data());
            requestRecords.add(record);
        }
        PutRecordBatchRequest request = new PutRecordBatchRequest();
        request.setRecords(requestRecords);
        request.setDeliveryStreamName(getDestination());
        PutRecordBatchResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalBatchPutCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to firehose {}...", flow.getId(), buffer, getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getFirehoseClient().putRecordBatch(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalBatchPutLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (PutRecordBatchResponseEntry responseEntry : result.getRequestResponses()) {
                Record record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent firehose {}: {}. Failed records: {}", flow.getId(), buffer,
                    getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from firehose {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activeBatchPutCalls.decrementAndGet();
    }
}

From source file:com.amazon.kinesis.streaming.agent.tailing.KinesisSender.java

@Override
protected BufferSendResult<KinesisRecord> attemptSend(RecordBuffer<KinesisRecord> buffer) {
    activePutRecordsCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "KinesisStream:" + getDestination());
    try {/*  w w w .ja v  a  2s .c o m*/
        BufferSendResult<KinesisRecord> sendResult = null;
        List<PutRecordsRequestEntry> requestRecords = new ArrayList<>();
        for (KinesisRecord data : buffer) {
            PutRecordsRequestEntry record = new PutRecordsRequestEntry();
            record.setData(data.data());
            record.setPartitionKey(data.partitionKey());
            requestRecords.add(record);
        }
        PutRecordsRequest request = new PutRecordsRequest();
        request.setStreamName(getDestination());
        request.setRecords(requestRecords);
        PutRecordsResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalPutRecordsCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to kinesis stream {}...", flow.getId(), buffer,
                    getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getKinesisClient().putRecords(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalPutRecordsLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (final PutRecordsResultEntry responseEntry : result.getRecords()) {
                final PutRecordsRequestEntry record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent to kinesis stream {}: {}. Failed records: {}", flow.getId(),
                    buffer, getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from kinesis stream {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activePutRecordsCalls.decrementAndGet();
    }
}

From source file:org.dllearner.algorithms.qtl.experiments.PRConvergenceExperiment.java

private RDFResourceTree applyBaseLine(ExamplesWrapper examples, Baseline baselineApproach) {
    logger.info("Computing baseline...");
    Collection<RDFResourceTree> posExamples = examples.posExamplesMapping.values();
    Collection<RDFResourceTree> negExamples = examples.negExamplesMapping.values();

    RDFResourceTree solution = null;/*  www .j a  v a 2s . com*/

    switch (baselineApproach) {
    case RANDOM:// 1.
        String query = "SELECT ?cls WHERE {?cls a owl:Class .} ORDER BY RAND() LIMIT 1";
        QueryExecution qe = qef.createQueryExecution(query);
        ResultSet rs = qe.execSelect();
        if (rs.hasNext()) {
            QuerySolution qs = rs.next();
            Resource cls = qs.getResource("cls");
            solution = new RDFResourceTree();
            solution.addChild(new RDFResourceTree(cls.asNode()), RDF.type.asNode());
        }
        break;
    case MOST_POPULAR_TYPE_IN_KB:// 2.
        query = "SELECT ?cls WHERE {?cls a owl:Class . ?s a ?cls .} ORDER BY DESC(COUNT(?s)) LIMIT 1";
        qe = qef.createQueryExecution(query);
        rs = qe.execSelect();
        if (rs.hasNext()) {
            QuerySolution qs = rs.next();
            Resource cls = qs.getResource("cls");
            solution = new RDFResourceTree();
            solution.addChild(new RDFResourceTree(cls.asNode()), RDF.type.asNode());
        }
        break;
    case MOST_FREQUENT_TYPE_IN_EXAMPLES:// 3.
        Multiset<Node> types = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            List<RDFResourceTree> children = ex.getChildren(RDF.type.asNode());
            for (RDFResourceTree child : children) {
                types.add(child.getData());
            }
        }
        Node mostFrequentType = Ordering.natural().onResultOf(new Function<Multiset.Entry<Node>, Integer>() {
            @Override
            public Integer apply(Multiset.Entry<Node> entry) {
                return entry.getCount();
            }
        }).max(types.entrySet()).getElement();
        solution = new RDFResourceTree();
        solution.addChild(new RDFResourceTree(mostFrequentType), RDF.type.asNode());
        break;
    case MOST_FREQUENT_EDGE_IN_EXAMPLES:// 4.
        Multiset<Pair<Node, Node>> pairs = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            SortedSet<Node> edges = ex.getEdges();
            for (Node edge : edges) {
                List<RDFResourceTree> children = ex.getChildren(edge);
                for (RDFResourceTree child : children) {
                    pairs.add(new Pair<>(edge, child.getData()));
                }
            }
        }
        Pair<Node, Node> mostFrequentPair = Ordering.natural()
                .onResultOf(new Function<Multiset.Entry<Pair<Node, Node>>, Integer>() {
                    @Override
                    public Integer apply(Multiset.Entry<Pair<Node, Node>> entry) {
                        return entry.getCount();
                    }
                }).max(pairs.entrySet()).getElement();
        solution = new RDFResourceTree();
        solution.addChild(new RDFResourceTree(mostFrequentPair.getValue()), mostFrequentPair.getKey());
        break;
    case MOST_INFORMATIVE_EDGE_IN_EXAMPLES:
        // get all p-o in pos examples
        Multiset<Pair<Node, Node>> edgeObjectPairs = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            SortedSet<Node> edges = ex.getEdges();
            for (Node edge : edges) {
                List<RDFResourceTree> children = ex.getChildren(edge);
                for (RDFResourceTree child : children) {
                    edgeObjectPairs.add(new Pair<>(edge, child.getData()));
                }
            }
        }

        double bestAccuracy = -1;
        solution = new RDFResourceTree();

        for (Pair<Node, Node> pair : edgeObjectPairs.elementSet()) {
            Node edge = pair.getKey();
            Node childValue = pair.getValue();

            // compute accuracy
            int tp = edgeObjectPairs.count(pair);
            int fn = posExamples.size() - tp;
            int fp = 0;
            for (RDFResourceTree ex : negExamples) { // compute false positives
                List<RDFResourceTree> children = ex.getChildren(edge);
                if (children != null) {
                    for (RDFResourceTree child : children) {
                        if (child.getData().equals(childValue)) {
                            fp++;
                            break;
                        }
                    }
                }
            }
            int tn = negExamples.size() - fp;

            double accuracy = Heuristics.getPredictiveAccuracy(posExamples.size(), negExamples.size(), tp, tn,
                    1.0);
            // update best solution
            if (accuracy >= bestAccuracy) {
                solution = new RDFResourceTree();
                solution.addChild(new RDFResourceTree(childValue), edge);
                bestAccuracy = accuracy;
            }
        }
        break;
    case LGG:
        LGGGenerator lggGenerator = new LGGGeneratorSimple();
        solution = lggGenerator.getLGG(Lists.newArrayList(posExamples));
        break;
    default:
        break;
    }
    logger.info("Baseline solution:\n" + owlRenderer.render(QueryTreeUtils.toOWLClassExpression(solution)));

    return solution;
}

From source file:com.google.googlejavaformat.java.JavaInputAstVisitor.java

/** Returns true if {@code atLeastM} of the expressions in the given column are the same kind. */
private static boolean expressionsAreParallel(List<List<ExpressionTree>> rows, int column, int atLeastM) {
    Multiset<Tree.Kind> nodeTypes = HashMultiset.create();
    for (List<? extends ExpressionTree> row : rows) {
        if (column >= row.size()) {
            continue;
        }//from   w  w w. j a  v a  2  s  .c o  m
        nodeTypes.add(row.get(column).getKind());
    }
    for (Multiset.Entry<Tree.Kind> nodeType : nodeTypes.entrySet()) {
        if (nodeType.getCount() >= atLeastM) {
            return true;
        }
    }
    return false;
}

From source file:org.dllearner.algorithms.qtl.experiments.QTLEvaluation.java

private RDFResourceTree applyBaseLine(ExamplesWrapper examples, Baseline baselineApproach) {
    Collection<RDFResourceTree> posExamples = examples.posExamplesMapping.values();
    Collection<RDFResourceTree> negExamples = examples.negExamplesMapping.values();

    switch (baselineApproach) {
    case RANDOM:// 1.
        String query = "SELECT ?cls WHERE {?cls a owl:Class .} ORDER BY RAND() LIMIT 1";
        QueryExecution qe = qef.createQueryExecution(query);
        ResultSet rs = qe.execSelect();
        if (rs.hasNext()) {
            QuerySolution qs = rs.next();
            Resource cls = qs.getResource("cls");
            RDFResourceTree solution = new RDFResourceTree();
            solution.addChild(new RDFResourceTree(cls.asNode()), RDF.type.asNode());
            return solution;
        }//from   w w  w. j  a va2 s .  c o m
    case MOST_POPULAR_TYPE_IN_KB:// 2.
        query = "SELECT ?cls WHERE {?cls a owl:Class . ?s a ?cls .} ORDER BY DESC(COUNT(?s)) LIMIT 1";
        qe = qef.createQueryExecution(query);
        rs = qe.execSelect();
        if (rs.hasNext()) {
            QuerySolution qs = rs.next();
            Resource cls = qs.getResource("cls");
            RDFResourceTree solution = new RDFResourceTree();
            solution.addChild(new RDFResourceTree(cls.asNode()), RDF.type.asNode());
            return solution;
        }
    case MOST_FREQUENT_TYPE_IN_EXAMPLES:// 3.
        Multiset<Node> types = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            List<RDFResourceTree> children = ex.getChildren(RDF.type.asNode());
            for (RDFResourceTree child : children) {
                types.add(child.getData());
            }
        }
        Node mostFrequentType = Ordering.natural().onResultOf(new Function<Multiset.Entry<Node>, Integer>() {
            @Override
            public Integer apply(Multiset.Entry<Node> entry) {
                return entry.getCount();
            }
        }).max(types.entrySet()).getElement();
        RDFResourceTree solution = new RDFResourceTree();
        solution.addChild(new RDFResourceTree(mostFrequentType), RDF.type.asNode());
        return solution;
    case MOST_FREQUENT_EDGE_IN_EXAMPLES:// 4.
    {
        Multiset<Pair<Node, Node>> pairs = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            SortedSet<Node> edges = ex.getEdges();
            for (Node edge : edges) {
                List<RDFResourceTree> children = ex.getChildren(edge);
                for (RDFResourceTree child : children) {
                    pairs.add(new Pair<>(edge, child.getData()));
                }
            }
        }
        Pair<Node, Node> mostFrequentPair = Ordering.natural()
                .onResultOf(new Function<Multiset.Entry<Pair<Node, Node>>, Integer>() {
                    @Override
                    public Integer apply(Multiset.Entry<Pair<Node, Node>> entry) {
                        return entry.getCount();
                    }
                }).max(pairs.entrySet()).getElement();
        solution = new RDFResourceTree();
        solution.addChild(new RDFResourceTree(mostFrequentPair.getValue()), mostFrequentPair.getKey());
        return solution;
    }
    case MOST_INFORMATIVE_EDGE_IN_EXAMPLES:
        // get all p-o in pos examples
        Multiset<Pair<Node, Node>> edgeObjectPairs = HashMultiset.create();
        for (RDFResourceTree ex : posExamples) {
            SortedSet<Node> edges = ex.getEdges();
            for (Node edge : edges) {
                List<RDFResourceTree> children = ex.getChildren(edge);
                for (RDFResourceTree child : children) {
                    edgeObjectPairs.add(new Pair<>(edge, child.getData()));
                }
            }
        }

        double bestAccuracy = -1;
        solution = new RDFResourceTree();

        for (Pair<Node, Node> pair : edgeObjectPairs.elementSet()) {
            Node edge = pair.getKey();
            Node childValue = pair.getValue();

            // compute accuracy
            int tp = edgeObjectPairs.count(pair);
            int fn = posExamples.size() - tp;
            int fp = 0;
            for (RDFResourceTree ex : negExamples) { // compute false positives
                List<RDFResourceTree> children = ex.getChildren(edge);
                if (children != null) {
                    for (RDFResourceTree child : children) {
                        if (child.getData().equals(childValue)) {
                            fp++;
                            break;
                        }
                    }
                }
            }
            int tn = negExamples.size() - fp;

            double accuracy = Heuristics.getPredictiveAccuracy(posExamples.size(), negExamples.size(), tp, tn,
                    1.0);
            // update best solution
            if (accuracy >= bestAccuracy) {
                solution = new RDFResourceTree();
                solution.addChild(new RDFResourceTree(childValue), edge);
                bestAccuracy = accuracy;
            }
        }
        return solution;
    case LGG:
        LGGGenerator lggGenerator = new LGGGeneratorSimple();
        RDFResourceTree lgg = lggGenerator.getLGG(Lists.newArrayList(posExamples));
        return lgg;

    default:
        break;

    }
    return null;
}

From source file:tufts.vue.ds.DataTree.java

private void addMissingRowsToMap(final LWMap map) {
    // todo: we'll want to merge some of this code w/DropHandler code, as
    // this is somewhat of a special case of doing a drop
    final List<DataRow> newRows = new ArrayList();

    for (DataNode n : mAllRowsNode.getChildren()) {
        if (!n.isMapPresent()) {
            //Log.debug("ADDING TO MAP: " + n);
            newRows.add(n.getRow());// w w w .  j  a v a  2 s . c  om
        }
    }

    final List<LWComponent> newRowNodes = DataAction.makeRowNodes(mSchema, newRows);

    Multiset<LWComponent> targetsUsed = null;
    List<LWLink> linksAdded = Collections.EMPTY_LIST;

    try {
        final Object[] result = DataAction.addDataLinksForNodes(map, newRowNodes, (Field) null);
        targetsUsed = (Multiset) result[0];
        linksAdded = (List) result[1];
    } catch (Throwable t) {
        Log.error("problem creating links on " + map + " for new nodes: " + Util.tags(newRowNodes), t);
    }

    if (DEBUG.Enabled && targetsUsed != null) {
        final Set entries = targetsUsed.entrySet();
        Log.debug("TARGETS USED: " + targetsUsed.size() + " / " + entries.size());
        Util.dump(entries);
    }

    if (newRowNodes.size() > 0) {

        // we cannot run setXYByClustering before adding to the map w/out refactoring projectNodes
        // (or for that matter, centroidCluster, which also uses projectNodes).  E.g. -- we
        // can't use this as an initial fallback/failsafe.
        //tufts.vue.VueUtil.setXYByClustering(map, nodes);

        //-----------------------------------------------------------------------------
        // add all the "missing" / newly-arrived rows to the map
        //-----------------------------------------------------------------------------
        map.getOrCreateLayer("New Data Nodes").addChildren(newRowNodes);

        // PROBLEM/BUG: the above add to a special layer appears to be failing (to
        // the user) somtimes and the nodes wind up in the same layer as the
        // relating nodes -- this is when ArrangeAction.clusterLinked is then used
        // below.  It does some reparenting which it needs to do in case nodes had
        // been collected as children, but in some cases, it doesn't need doing and
        // ends up just pulling the nodes right back out of the "New Data Nodes"
        // layer after we just moved them there.
        // -----------------------------------------------------------------------------

        if (newRowNodes.size() > NEW_ROW_NODE_MAP_REORG_THRESHOLD) {

            if (targetsUsed.size() > 0) { // Note: won't currently trigger for cross-schema joins, as targesUsed aren't reported

                //-------------------------------------------------------
                // RE-CLUSTER THE ENTIRE MAP
                //-------------------------------------------------------

                // If there is was more than one value-node link per row-node created (e.g.,
                // multiple sets of value nodes are already on the map), prioritizing those
                // targets with the most first spreads the nodes out the most as the targets
                // with the fewest links would are at least be guaranteed to get some of the
                // row nodes.  Using the push-method in this case would be far too slow -- we'd
                // have to push based on every row node.

                final List<Multiset.Entry<LWComponent>> ordered = ByDecreasingFrequency
                        .sortedCopy(targetsUsed.entrySet());

                for (Multiset.Entry<LWComponent> e : ordered) {
                    tufts.vue.Actions.ArrangeAction.clusterLinked(e.getElement());
                }

                // note: if we wished, we could also decide here
                // what to cluster on based on what targets are
                // selected (currently have the selection bit set)

            } else {
                // fallback: randomly layout anything that isn't first XY clustered:
                tufts.vue.LayoutAction.random.act(tufts.vue.VueUtil.setXYByClustering(newRowNodes));
            }
        } else {
            //-------------------------------------------------------
            // Centroid cluster
            //-------------------------------------------------------
            DataAction.centroidCluster(map, newRowNodes, true);
            //-------------------------------------------------------
        }

        VUE.getSelection().setTo(newRowNodes);
    }

    map.getUndoManager().mark("Add New Data Nodes");
}

From source file:org.onebusaway.nyc.vehicle_tracking.impl.inference.ParticleFactoryImpl.java

@Override
public Multiset<Particle> createParticles(double timestamp, Observation obs) throws ParticleFilterException {

    final Set<BlockStateObservation> potentialBlocks = _blocksFromObservationService
            .determinePotentialBlockStatesForObservation(obs);

    final Multiset<Particle> particles = HashMultiset.create();

    double normOffset = Double.NEGATIVE_INFINITY;
    for (int i = 0; i < _initialNumberOfParticles; ++i) {
        final CategoricalDist<Particle> transitionProb = new CategoricalDist<Particle>();

        for (final BlockStateObservation blockState : potentialBlocks) {
            final SensorModelResult transProb = new SensorModelResult("transition");
            final double inMotionSample = threadLocalRng.get().nextDouble();
            final boolean vehicleNotMoved = inMotionSample < 0.5;
            final MotionState motionState = _motionModel.updateMotionState(obs, vehicleNotMoved);

            BlockStateObservation sampledBlockState;
            if (blockState != null) {
                /*//from   www . j  a  va2 s .  c om
                 * Sample a distance along the block using the snapped observation
                 * results as priors.
                 */
                if (blockState.isSnapped()) {
                    sampledBlockState = blockState;
                } else {
                    sampledBlockState = _blockStateSamplingStrategy
                            .samplePriorScheduleState(blockState.getBlockState().getBlockInstance(), obs);
                }
            } else {
                sampledBlockState = null;
            }
            final JourneyState journeyState = _journeyStateTransitionModel.getJourneyState(sampledBlockState,
                    null, obs, vehicleNotMoved);

            final VehicleState state = vehicleState(motionState, sampledBlockState, journeyState, obs);
            final Context context = new Context(null, state, obs);

            transProb.addResultAsAnd(_motionModel.getEdgeLikelihood().likelihood(context));
            transProb.addResultAsAnd(_motionModel.getGpsLikelihood().likelihood(context));
            transProb.addResultAsAnd(_motionModel.getSchedLikelihood().likelihood(context));
            transProb.addResultAsAnd(_motionModel.dscLikelihood.likelihood(context));
            transProb.addResultAsAnd(_motionModel.runLikelihood.likelihood(context));
            transProb.addResultAsAnd(_motionModel.runTransitionLikelihood.likelihood(context));
            transProb.addResultAsAnd(_motionModel.nullStateLikelihood.likelihood(context));
            transProb.addResultAsAnd(_motionModel.nullLocationLikelihood.likelihood(context));

            final Particle newParticle = new Particle(timestamp, null, 0.0, state);
            newParticle.setResult(transProb);

            transitionProb.logPut(transProb.getLogProbability(), newParticle);
        }

        final Particle newSample;
        if (transitionProb.canSample()) {
            newSample = transitionProb.sample();
            newSample.setLogWeight(newSample.getResult().getLogProbability());
            particles.add(newSample);
        } else {
            final double inMotionSample = ParticleFactoryImpl.getThreadLocalRng().get().nextDouble();
            final boolean vehicleNotMoved = inMotionSample < 0.5;
            final MotionState motionState = _motionModel.updateMotionState(obs, vehicleNotMoved);
            final JourneyState journeyState = _journeyStateTransitionModel.getJourneyState(null, null, obs,
                    vehicleNotMoved);
            final VehicleState nullState = new VehicleState(motionState, null, journeyState, null, obs);
            final Context context = new Context(null, nullState, obs);
            final SensorModelResult priorProb = new SensorModelResult("prior creation");
            priorProb.addResultAsAnd(_motionModel.getEdgeLikelihood().likelihood(context));
            priorProb.addResultAsAnd(_motionModel.getGpsLikelihood().likelihood(context));
            priorProb.addResultAsAnd(_motionModel.getSchedLikelihood().likelihood(context));
            priorProb.addResultAsAnd(_motionModel.dscLikelihood.likelihood(context));
            priorProb.addResultAsAnd(_motionModel.runLikelihood.likelihood(context));
            priorProb.addResultAsAnd(_motionModel.runTransitionLikelihood.likelihood(context));
            priorProb.addResultAsAnd(_motionModel.nullStateLikelihood.likelihood(context));
            priorProb.addResultAsAnd(_motionModel.nullLocationLikelihood.likelihood(context));

            newSample = new Particle(timestamp, null, 0.0, nullState);
            newSample.setResult(priorProb);
            particles.add(newSample);
            newSample.setLogWeight(newSample.getResult().getLogProbability());
        }

        normOffset = LogMath.add(newSample.getLogWeight(), normOffset);
    }

    /*
     * Normalize
     */
    for (final Entry<Particle> p : particles.entrySet()) {
        p.getElement()
                .setLogNormedWeight(p.getElement().getLogWeight() + FastMath.log(p.getCount()) - normOffset);
    }

    return particles;
}

From source file:com.zimbra.cs.account.ProvUtil.java

/**
 * Convert an array of the form:/*from   ww  w .ja  va  2  s  .c o  m*/
 *
 * a1 v1 a2 v2 a2 v3
 *
 * to a map of the form:
 *
 * a1 -> v1 a2 -> [v2, v3]
 *
 * For binary attribute, the argument following an attribute name will be treated as a file path and value for the
 * attribute will be the base64 encoded string of the content of the file.
 */
private Map<String, Object> keyValueArrayToMultiMap(String[] args, int offset, boolean isCreateCmd)
        throws IOException, ServiceException {
    AttributeManager attrMgr = AttributeManager.getInstance();

    Map<String, Object> attrs = new HashMap<String, Object>();

    String safeguarded_attrs_prop = LC.get("zmprov_safeguarded_attrs");
    Set<String> safeguarded_attrs = safeguarded_attrs_prop == null ? Sets.<String>newHashSet()
            : Sets.newHashSet(safeguarded_attrs_prop.toLowerCase().split(","));
    Multiset<String> multiValAttrsToCheck = HashMultiset.create();

    for (int i = offset; i < args.length; i += 2) {
        String n = args[i];
        if (i + 1 >= args.length) {
            throw new IllegalArgumentException("not enough arguments");
        }
        String v = args[i + 1];
        String attrName = n;
        if (n.charAt(0) == '+' || n.charAt(0) == '-') {
            attrName = attrName.substring(1);
        } else if (safeguarded_attrs.contains(attrName.toLowerCase()) && attrMgr.isMultiValued(attrName)) {
            multiValAttrsToCheck.add(attrName.toLowerCase());
        }
        if (needsBinaryIO(attrMgr, attrName) && v.length() > 0) {
            File file = new File(v);
            byte[] bytes = ByteUtil.getContent(file);
            v = ByteUtil.encodeLDAPBase64(bytes);
        }
        StringUtil.addToMultiMap(attrs, n, v);
    }

    if (!allowMultiValuedAttrReplacement && !isCreateCmd) {
        for (Multiset.Entry<String> entry : multiValAttrsToCheck.entrySet()) {
            if (entry.getCount() == 1) {
                // If multiple values are being assigned to an attr as part of the same command
                // then we don't consider it an unsafe replacement
                printError("error: cannot replace multi-valued attr value unless -r is specified");
                System.exit(2);
            }
        }
    }

    return attrs;
}