Example usage for com.google.common.collect Multiset iterator

List of usage examples for com.google.common.collect Multiset iterator

Introduction

In this page you can find the example usage for com.google.common.collect Multiset iterator.

Prototype

@Override
Iterator<E> iterator();

Source Link

Document

Elements that occur multiple times in the multiset will appear multiple times in this iterator, though not necessarily sequentially.

Usage

From source file:com.github.fhirschmann.clozegen.lib.util.MultisetUtils.java

/**
 * Merges two multisets./*  ww w  .j  av  a 2s.c  o m*/
 *
 * @param <E> the type of the elements of both multisets
 * @param multiset1 multiset to merge
 * @param multiset2 multiset to merge
 * @return a new merged multiset
 */
public static <E> Multiset<E> mergeMultiSets(final Multiset<E> multiset1, final Multiset<E> multiset2) {
    final Multiset<E> multiset = LinkedHashMultiset.create(multiset1);
    Iterators.addAll(multiset, multiset2.iterator());
    return multiset;
}

From source file:org.apache.lucene.benchmark.quality.mc.IntrinsicEvaluator.java

static void prune(Map<String, Multiset<String>> map, int minTF) {

    for (Map.Entry<String, Multiset<String>> entry : map.entrySet()) {
        Multiset<String> set = entry.getValue();

        Iterator<String> iterator = set.iterator();

        while (iterator.hasNext()) {

            String string = iterator.next();
            int count = set.count(string);

            if (count < minTF)
                iterator.remove();/*from w ww  .j a v a 2s.  c o  m*/

        }
    }
}

From source file:org.onebusaway.nyc.vehicle_tracking.impl.particlefilter.ParticleFilter.java

/**
 * Low variance sampler. Follows Thrun's example in Probabilistic Robots.
 * @throws ParticleFilterException /* w ww. ja v a 2 s  . c o  m*/
 */
public static Multiset<Particle> lowVarianceSampler(Multiset<Particle> particles, double M)
        throws BadProbabilityParticleFilterException {
    Preconditions.checkArgument(particles.size() > 0);
    Preconditions.checkArgument(M > 0);

    final Multiset<Particle> resampled = HashMultiset.create((int) M);
    final double r = ParticleFactoryImpl.getLocalRng().nextDouble() / M;
    final Iterator<Particle> pIter = particles.iterator();
    Particle p = pIter.next();
    double c = p.getLogNormedWeight() - FastMath.log(particles.count(p));
    for (int m = 0; m < M; ++m) {
        final double U = FastMath.log(r + m / M);
        while (U > c && pIter.hasNext()) {
            p = pIter.next();
            c = LogMath.add(p.getLogNormedWeight() - FastMath.log(particles.count(p)), c);
        }
        resampled.add(p);
    }

    if (resampled.size() != M)
        throw new BadProbabilityParticleFilterException("low variance sampler did not return a valid sample");

    return resampled;
}

From source file:tufts.vue.VueApplet.java

@SuppressWarnings("unchecked")
public static void addLinksToMap(final String content) {
    AccessController.doPrivileged(new PrivilegedAction() {

        public Object run() {
            //        Reader reader = openReader();
            javax.xml.parsers.DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
            factory.setIgnoringElementContentWhitespace(true);
            factory.setIgnoringComments(true);
            //factory.setCoalescing(true);
            factory.setValidating(false);
            // We don't use is.setEncoding(), as openReader will already have handled that
            //      is.setCharacterStream(reader);
            InputStream is;//  w  w  w.j  a va  2  s  . c om
            try {
                is = new java.io.ByteArrayInputStream(content.getBytes("UTF-8"));
                final org.w3c.dom.Document doc = factory.newDocumentBuilder().parse((InputStream) is);
                NodeList nodeLst = doc.getElementsByTagName("link");

                //build multimap of links
                Multimap<String, String> map = Multimaps.newArrayListMultimap();
                for (int s = 0; s < nodeLst.getLength(); s++) {

                    Node fstNode = nodeLst.item(s);

                    if (fstNode.getNodeType() == Node.ELEMENT_NODE) {
                        NamedNodeMap atts = fstNode.getAttributes();

                        Node n = atts.item(0);
                        Node val = atts.item(1);

                        map.put(n.getNodeValue(), val.getNodeValue());
                    }

                }

                java.util.Collection<LWComponent> comps = VUE.getActiveMap().getAllDescendents();
                java.util.Iterator<LWComponent> iter = comps.iterator();
                //build map of data row nodes
                HashMap<String, LWNode> dataRowNodes = new HashMap<String, LWNode>();
                while (iter.hasNext()) {
                    LWComponent comp = iter.next();
                    if (comp.isDataRowNode()) {
                        String fromId = comp.getDataValue("id");
                        dataRowNodes.put(fromId, (LWNode) comp);
                    }
                }

                //draw links
                Multiset<String> keys = map.keys();
                java.util.Iterator<String> linkIterator = keys.iterator();

                while (linkIterator.hasNext()) {
                    String fromId = linkIterator.next();

                    Collection<String> toIds = map.get(fromId);
                    java.util.Iterator<String> toIdIterator = toIds.iterator();
                    while (toIdIterator.hasNext()) {
                        String tid = toIdIterator.next();
                        LWNode fromNode = dataRowNodes.get(fromId);
                        LWNode toNode = dataRowNodes.get(tid);

                        if (fromNode != null && toNode != null) {
                            LWLink link = new LWLink(fromNode, toNode);
                            VUE.getActiveMap().add(link);
                        }
                    }
                }

            } catch (UnsupportedEncodingException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            } catch (SAXException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            } catch (ParserConfigurationException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            return null;
        };
    });

}

From source file:bots.mctsbot.ai.bots.bot.gametree.rollout.BucketRollOut.java

private double[] calcDeadCardWeights(Multiset<Integer> ranks, Multiset<Integer> deadRanks) {
    Iterator<Integer> iter = ranks.iterator();
    double[] deadCardWeights = new double[nbBuckets];
    for (int bucket = 0; bucket < nbBuckets; bucket++) {
        double nbDead = 0;
        for (int j = 0; j < nbSamplesPerBucket; j++) {
            int rank = iter.next();
            double count = ranks.count(rank);
            double deadCount = deadRanks.count(rank);
            nbDead += deadCount / count;
        }//from   w w  w .  j ava 2  s . com
        deadCardWeights[bucket] = ((nbSamplesPerBucket - nbDead) / nbSamplesPerBucket);
    }
    return deadCardWeights;
}

From source file:bots.mctsbot.ai.bots.bot.gametree.rollout.BucketRollOut.java

private WinDistribution[] calcWinDistributions(int botRank, Multiset<Integer> ranks,
        Multiset<Integer> deadRanks) {
    Iterator<Integer> iter = ranks.iterator();
    WinDistribution[] winProbs = new WinDistribution[10];
    for (int bucket = 0; bucket < nbBuckets; bucket++) {
        double winWeight = 0;
        double drawWeight = 0;
        double loseWeight = 0;
        for (int j = 0; j < nbSamplesPerBucket; j++) {
            int rank = iter.next();
            double weight = 1 - deadRanks.count(rank) / ranks.count(rank);
            if (rank < botRank) {
                winWeight += weight;/*from w w  w  .  j  a v  a2s  . co  m*/
            } else if (rank > botRank) {
                loseWeight += weight;
            } else {
                drawWeight += weight;
            }
        }
        double nbSamples = winWeight + drawWeight + loseWeight;
        if (nbSamples == 0)
            nbSamples = 1;
        winProbs[bucket] = new WinDistribution(winWeight / nbSamples, drawWeight / nbSamples,
                loseWeight / nbSamples);
    }
    return winProbs;
}

From source file:fabric.worker.transaction.DeadlockDetectorThread.java

/**
 * Resolves deadlocks by aborting transactions.
 * //from  www.  ja v  a  2 s.c o  m
 * @param cycles
 *          the set of deadlocks, represented by the logs of transactions
 *          involved in waits-for cycles.
 */
private void resolveDeadlocks(Set<Set<Log>> cycles) {
    // Turn the set of cycles into a map from top-level TIDs to sorted multisets
    // of transaction logs. The multisets are sorted by transaction depth, outer
    // transactions first.
    LongKeyMap<Multiset<Log>> logsByTopLevelTid = new LongKeyHashMap<Multiset<Log>>();
    for (Set<Log> cycle : cycles) {
        for (Log log : cycle) {
            long topLevelTid = log.getTid().topTid;
            Multiset<Log> logs = logsByTopLevelTid.get(topLevelTid);
            if (logs == null) {
                logs = TreeMultiset.create(LOG_COMPARATOR);
                logsByTopLevelTid.put(topLevelTid, logs);
            }

            logs.add(log);
        }
    }

    // Abort transactions to break up cycles. Transactions involved in more
    // cycles are aborted first.
    while (!cycles.isEmpty()) {
        // Figure out which top-level transaction(s) is involved in the most number
        // of deadlocks.
        int curMax = 0;
        LongSet abortCandidates = new LongHashSet();
        for (LongKeyMap.Entry<Multiset<Log>> entry : logsByTopLevelTid.entrySet()) {
            int curSize = entry.getValue().size();
            if (curMax > curSize)
                continue;

            if (curMax < curSize) {
                curMax = curSize;
                abortCandidates.clear();
            }

            abortCandidates.add(entry.getKey());
        }

        // Figure out which transaction to abort. (Pick the newest one.)
        Log toAbort = null;
        Multiset<Log> abortSet = null;
        for (LongIterator it = abortCandidates.iterator(); it.hasNext();) {
            long curTopLevelTid = it.next();
            Multiset<Log> curCandidateSet = logsByTopLevelTid.get(curTopLevelTid);
            Log curCandidate = curCandidateSet.iterator().next();

            if (toAbort == null || toAbort.startTime < curCandidate.startTime) {
                toAbort = curCandidate;
                abortSet = curCandidateSet;
            }
        }

        // Abort the transaction.
        WORKER_DEADLOCK_LOGGER.log(Level.FINE, "Aborting {0}", toAbort);
        toAbort.flagRetry();

        // Fix up our data structures to reflect the aborted transaction.
        for (Iterator<Set<Log>> cycleIt = cycles.iterator(); cycleIt.hasNext();) {
            Set<Log> cycle = cycleIt.next();

            // Check if the cycle has a transaction that was aborted.
            if (!haveCommonElements(cycle, abortSet.elementSet()))
                continue;

            // Cycle was broken, so remove from the set of cycles.
            cycleIt.remove();

            // Fix up logsByTopLevelTid.
            for (Log log : cycle) {
                long topLevelTid = log.getTid().topTid;
                Multiset<Log> logs = logsByTopLevelTid.get(topLevelTid);
                logs.remove(log);
                if (logs.isEmpty()) {
                    logsByTopLevelTid.remove(topLevelTid);
                }
            }
        }
    }
}

From source file:qa.qcri.nadeef.core.pipeline.EquivalentClass.java

/**
 * {@inheritDoc}/*  w w w.j  av  a  2  s  .  c o  m*/
 */
@Override
public Collection<Fix> decide(Collection<Fix> fixes) {
    List<HashSet<Cell>> clusters = Lists.newArrayList();
    // a map between a cell and n
    HashMap<Cell, HashSet<Cell>> clusterMap = Maps.newHashMap();
    HashMap<Cell, String> assignMap = Maps.newHashMap();
    // a map between cell and fix, used for getting the original vid.
    HashMap<Cell, Fix> fixMap = Maps.newHashMap();

    // Clustering all the fixes.
    int count = 0;
    for (Fix fix : fixes) {
        Cell leftCell = fix.getLeft();
        fixMap.put(leftCell, fix);

        if (fix.isRightConstant()) {
            // TODO: do a statistic on the assign count.
            assignMap.put(leftCell, fix.getRightValue());
            continue;
        }

        Cell rightCell = fix.getRight();
        fixMap.put(rightCell, fix);
        if (assignMap.containsKey(leftCell)) {
            assignMap.remove(leftCell);
        }

        if (assignMap.containsKey(rightCell)) {
            assignMap.remove(rightCell);
        }

        HashSet<Cell> leftCluster = null;
        HashSet<Cell> rightCluster = null;

        // when the left column is already in a cluster
        if (clusterMap.containsKey(leftCell)) {
            leftCluster = clusterMap.get(leftCell);
            if (!leftCluster.contains(rightCell)) {
                // union of two cluster of cell sets.
                if (clusterMap.containsKey(rightCell)) {
                    rightCluster = clusterMap.get(rightCell);
                    for (Cell cell : rightCluster) {
                        leftCluster.add(cell);
                        clusterMap.put(cell, leftCluster);
                    }

                    rightCluster.clear();
                    clusters.remove(rightCluster);
                } else {
                    clusterMap.put(rightCell, leftCluster);
                    leftCluster.add(rightCell);
                }
            }
        } else if (clusterMap.containsKey(rightCell)) {
            // when the right column is already in the cluster
            rightCluster = clusterMap.get(rightCell);
            if (!rightCluster.contains(leftCell)) {
                // union of two cluster of cell sets.
                if (clusterMap.containsKey(leftCell)) {
                    leftCluster = clusterMap.get(leftCell);
                    for (Cell cell : leftCluster) {
                        rightCluster.add(cell);
                        clusterMap.put(cell, rightCluster);
                    }

                    for (Cell cell : leftCluster) {
                        leftCluster.remove(cell);
                    }

                    clusters.remove(leftCluster);
                } else {
                    clusterMap.put(leftCell, rightCluster);
                    rightCluster.add(leftCell);
                }
            }
        } else {
            // both left and right are not in any of the cluster
            // create a new cluster of containing both.
            HashSet<Cell> cluster = Sets.newHashSet();
            cluster.add(leftCell);
            cluster.add(rightCell);
            clusterMap.put(leftCell, cluster);
            clusterMap.put(rightCell, cluster);
            clusters.add(cluster);
        }
    }

    // start to count each cluster and decide the final fix based on
    // percentage.
    List<Fix> result = Lists.newArrayList();
    // for final execution of all the fixes, we use 0 as default as the fix id.
    Fix.Builder fixBuilder = new Fix.Builder();
    count = 0;
    for (HashSet<Cell> cluster : clusters) {
        Multiset<Object> countSet = HashMultiset.create();
        for (Cell cell : cluster) {
            countSet.add(cell.getValue());
        }

        countSet = Multisets.copyHighestCountFirst(countSet);
        Object value = countSet.iterator().next();
        for (Cell cell : cluster) {
            if (cell.getValue().equals(value)) {
                // skip the correct value.
                continue;
            }
            Fix originalFix = fixMap.get(cell);
            Fix newFix = fixBuilder.vid(originalFix.getVid()).left(cell).right(value.toString()).build();
            result.add(newFix);
        }
        count++;
    }

    // collect the remaining constant assign fix.
    Set<Map.Entry<Cell, String>> entries = assignMap.entrySet();
    for (Map.Entry<Cell, String> entry : entries) {
        Fix newFix = fixBuilder.left(entry.getKey()).right(entry.getValue()).build();
        result.add(newFix);
    }

    setPercentage(1.0f);
    return result;
}

From source file:com.mycompany.wolf.Room.java

/**
 * //from  w w  w.  ja va2s .c o  m
 */
private void assignRoles() {
    Multiset<String> roleCounts = HashMultiset.create(roleCounts());
    Map<String, String> roleMap = new HashMap();
    competeRoles.values().stream().filter(c -> roleCounts.remove(c.role)).forEach(c -> {
        roleMap.put(c.playerId, c.role);
    });
    List<String> restPlayerId = sessions.stream().map(s -> getPlayerId(s)).filter(s -> !roleMap.containsKey(s))
            .collect(Collectors.toList());
    Collections.shuffle(restPlayerId);
    Iterator<String> restRoleIt = roleCounts.iterator();
    Iterator<String> restPlayerIdIt = restPlayerId.iterator();
    for (; restRoleIt.hasNext();) {
        String role = restRoleIt.next();
        String playerId = restPlayerIdIt.next();
        roleMap.put(playerId, role);
    }
    sessions.stream().forEach(s -> {
        s.getUserProperties().put("role", roleMap.get(getPlayerId(s)));
    });

    List<ImmutableMap<String, String>> assignedRoles = roleMap.entrySet().stream()
            .map(entry -> ImmutableMap.of("playerId", entry.getKey(), "role", entry.getValue()))
            .collect(Collectors.toCollection(LinkedList::new));
    Map<String, Object> assignRoles = ImmutableMap.of("code", "assignRoles", "properties", assignedRoles);
    String jsonText = JsonUtils.toString(assignRoles);
    sessions.stream().forEach(s -> {
        s.getAsyncRemote().sendText(jsonText);
    });
}

From source file:visualizer.corpus.zip.InvertedZipCorpus.java

/**
 * Method that creates the ngrams./*from w  w  w  .j  a v a  2  s . c  om*/
 * 
 * @param filename
 * @return
 * @throws IOException
 */
private List<Ngram> getNgramsFromFile(String filename) throws IOException {
    String filecontent = corpus.getFullContent(filename);
    List<Ngram> ngrams = new ArrayList<Ngram>();

    if (filecontent == null || StringUtil.isEmpty(filecontent)) {
        return ngrams;
    }

    Multiset<String> bag = HashMultiset.create();
    TermExtractor<String> termExtractor = new RegExpTermExtractor(filecontent);

    StringCircularBuffer[] buffers = new StringCircularBuffer[nrGrams - 1];
    for (int i = 2; i <= nrGrams; i++) {
        buffers[i - 2] = new StringCircularBuffer(i);
        buffers[i - 2].setSeparator(Corpus.NGRAM_SEPARATOR);
    }

    String term;
    while ((term = termExtractor.next()) != null) {
        if (!StringUtil.isEmpty(term)) {
            bag.add(term);
            for (StringCircularBuffer buffer : buffers) {
                String ngram = buffer.add(term);
                if (ngram != null) {
                    bag.add(ngram);
                }
            }
        }
    }
    for (StringCircularBuffer buffer : buffers) {
        String leftover = buffer.reset();
        if (leftover != null) {
            bag.add(leftover);
        }
    }

    Iterator<String> i = bag.iterator();
    while (i.hasNext()) {
        String ngramText = i.next();
        Ngram ngram = new Ngram(ngramText);
        ngram.setFrequency(bag.count(ngramText));
        ngrams.add(ngram);
    }

    return ngrams;
}