Example usage for com.google.common.collect Multiset count

List of usage examples for com.google.common.collect Multiset count

Introduction

In this page you can find the example usage for com.google.common.collect Multiset count.

Prototype

int count(@Nullable Object element);

Source Link

Document

Returns the number of occurrences of an element in this multiset (the count of the element).

Usage

From source file:br.com.bluesoft.pronto.model.Ticket.java

public boolean isEmAndamento() {
    if (temFilhos()) {
        final Multiset<Integer> contadorDeStatus = HashMultiset.create();
        final int quantidadeDeFilhos = filhos.size();
        for (final Ticket filho : filhos) {
            contadorDeStatus.add(filho.getKanbanStatus().getKanbanStatusKey());
        }/*from  w  w  w.  j  av a  2 s  .c  o  m*/

        if (contadorDeStatus.count(KanbanStatus.DONE) != quantidadeDeFilhos
                && contadorDeStatus.count(KanbanStatus.TO_DO) != quantidadeDeFilhos) {
            return true;
        } else {
            return false;
        }

    } else {
        return !isDone() && !isToDo();
    }

}

From source file:com.facebook.presto.raptor.storage.BucketBalancer.java

private static Multimap<String, BucketAssignment> computeAssignmentChanges(ClusterState clusterState) {
    Multimap<String, BucketAssignment> sourceToAllocationChanges = HashMultimap.create();

    Map<String, Long> allocationBytes = new HashMap<>(clusterState.getAssignedBytes());
    Set<String> activeNodes = clusterState.getActiveNodes();

    for (Distribution distribution : clusterState.getDistributionAssignments().keySet()) {
        // number of buckets in this distribution assigned to a node
        Multiset<String> allocationCounts = HashMultiset.create();
        Collection<BucketAssignment> distributionAssignments = clusterState.getDistributionAssignments()
                .get(distribution);//w  w  w . java2  s . c  om
        distributionAssignments.stream().map(BucketAssignment::getNodeIdentifier)
                .forEach(allocationCounts::add);

        int currentMin = allocationBytes.keySet().stream().mapToInt(allocationCounts::count).min().getAsInt();
        int currentMax = allocationBytes.keySet().stream().mapToInt(allocationCounts::count).max().getAsInt();

        int numBuckets = distributionAssignments.size();
        int targetMin = (int) Math.floor((numBuckets * 1.0) / clusterState.getActiveNodes().size());
        int targetMax = (int) Math.ceil((numBuckets * 1.0) / clusterState.getActiveNodes().size());

        log.info("Distribution %s: Current bucket skew: min %s, max %s. Target bucket skew: min %s, max %s",
                distribution.getId(), currentMin, currentMax, targetMin, targetMax);

        for (String source : ImmutableSet.copyOf(allocationCounts)) {
            List<BucketAssignment> existingAssignments = distributionAssignments.stream()
                    .filter(assignment -> assignment.getNodeIdentifier().equals(source)).collect(toList());

            for (BucketAssignment existingAssignment : existingAssignments) {
                if (activeNodes.contains(source) && allocationCounts.count(source) <= targetMin) {
                    break;
                }

                // identify nodes with bucket counts lower than the computed target, and greedily select from this set based on projected disk utilization.
                // greediness means that this may produce decidedly non-optimal results if one looks at the global distribution of buckets->nodes.
                // also, this assumes that nodes in a cluster have identical storage capacity
                String target = activeNodes.stream()
                        .filter(candidate -> !candidate.equals(source)
                                && allocationCounts.count(candidate) < targetMax)
                        .sorted(comparingInt(allocationCounts::count))
                        .min(Comparator.comparingDouble(allocationBytes::get))
                        .orElseThrow(() -> new VerifyException("unable to find target for rebalancing"));

                long bucketSize = clusterState.getDistributionBucketSize().get(distribution);

                // only move bucket if it reduces imbalance
                if (activeNodes.contains(source) && (allocationCounts.count(source) == targetMax
                        && allocationCounts.count(target) == targetMin)) {
                    break;
                }

                allocationCounts.remove(source);
                allocationCounts.add(target);
                allocationBytes.compute(source, (k, v) -> v - bucketSize);
                allocationBytes.compute(target, (k, v) -> v + bucketSize);

                sourceToAllocationChanges.put(existingAssignment.getNodeIdentifier(), new BucketAssignment(
                        existingAssignment.getDistributionId(), existingAssignment.getBucketNumber(), target));
            }
        }
    }

    return sourceToAllocationChanges;
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ComponentRepositoryBuilder.java

private void computeVersionDependencies() {
    task.start("Computing library version to version dependencies");
    // Build map from FqnVersions to LibraryVersions
    Multimap<FqnVersion, LibraryVersion> fqnVersionToLibVersion = HashMultimap.create();
    for (Library library : repo.getLibraries()) {
        for (LibraryVersion version : library.getVersions()) {
            for (FqnVersion fqn : version.getFqnVersions()) {
                fqnVersionToLibVersion.put(fqn, version);
            }//from w  w w  . j ava2  s .c  o  m
        }
    }

    for (Library library : repo.getLibraries()) {
        for (LibraryVersion version : library.getVersions()) {
            // For each version of the library, look up all the libraries that contain that fqn
            Multiset<LibraryVersion> versionSet = HashMultiset.create();
            for (FqnVersion fqn : version.getFqnVersions()) {
                versionSet.addAll(fqnVersionToLibVersion.get(fqn));
            }

            // See if any other library contains a subset of the fqn versions for this library
            for (LibraryVersion libVersion : versionSet.elementSet()) {
                if (version != libVersion
                        && versionSet.count(libVersion) == libVersion.getFqnVersions().size()) {
                    version.addVersionDependency(libVersion);
                }
            }
        }
    }
    task.finish();
}

From source file:de.iteratec.iteraplan.businesslogic.service.DashboardServiceImpl.java

/** {@inheritDoc} */
public Map<String, Integer> getIsrSealStateMap(List<InformationSystemRelease> isrs) {
    Multiset<SealState> multiset = EnumMultiset.create(SealState.class);

    for (InformationSystemRelease isr : isrs) {
        multiset.add(isr.getSealState());
    }//from   w w  w  .  j av  a  2 s  . com

    Map<String, Integer> statusMap = Maps.newLinkedHashMap();
    for (SealState sealState : SealState.values()) {
        statusMap.put(sealState.toString(), Integer.valueOf(multiset.count(sealState)));
    }

    return statusMap;
}

From source file:edu.stanford.nlp.util.JBLEU.java

public void stats(List<String> hyp, List<List<String>> refs, int[] result) {
    assert result.length == 9;
    assert refs.size() > 0;

    // 1) choose reference length
    int selectedRef = pickReference(hyp, refs, verbosity);
    int selectedRefLen = refs.get(selectedRef).size();

    // TODO: Integer-ify everything inside Ngram? Or is there too much
    // overhead there?

    // 2) determine the bag of n-grams we can score against
    // build a simple tries
    Multiset<Ngram> clippedRefNgrams = HashMultiset.create();
    for (List<String> ref : refs) {
        Multiset<Ngram> refNgrams = HashMultiset.create();
        for (int order = 1; order <= N; order++) {
            for (int i = 0; i <= ref.size() - order; i++) {
                List<String> toks = ref.subList(i, i + order);
                Ngram ngram = new Ngram(toks);
                refNgrams.add(ngram);//ww w .j  a v a2s.c o  m
            }
        }
        // clip n-grams by taking the maximum number of counts for any given reference
        for (Ngram ngram : refNgrams) {
            int clippedCount = Math.max(refNgrams.count(ngram), clippedRefNgrams.count(ngram));
            clippedRefNgrams.setCount(ngram, clippedCount);
        }
    }

    // 3) now match n-grams
    int[] attempts = new int[N];
    int[] matches = new int[N];
    for (int order = 1; order <= N; order++) {
        for (int i = 0; i <= hyp.size() - order; i++) {
            List<String> toks = hyp.subList(i, i + order);
            Ngram ngram = new Ngram(toks);
            boolean found = clippedRefNgrams.remove(ngram);
            ++attempts[order - 1];
            if (found) {
                ++matches[order - 1];
            }
        }
    }

    // 4) assign sufficient stats
    System.arraycopy(attempts, 0, result, 0, N);
    System.arraycopy(matches, 0, result, N, N);
    result[N * 2] = selectedRefLen;
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ComponentRepositoryBuilder.java

private JarSet createSimpleLibraries() {
    JarSet assignedJars = JarSet.create();

    PriorityQueue<Cluster> sortedClusters = new PriorityQueue<>(clusters.size(),
            Cluster.DESCENDING_SIZE_COMPARATOR);
    Map<VersionedFqnNode, Cluster> fqnToCluster = new HashMap<>();
    for (Cluster cluster : clusters) {
        sortedClusters.add(cluster);/*  w  w w  . j  a  v a  2  s  .c  o m*/
        for (VersionedFqnNode fqn : cluster.getCoreFqns()) {
            fqnToCluster.put(fqn, cluster);
        }
        for (VersionedFqnNode fqn : cluster.getVersionFqns()) {
            fqnToCluster.put(fqn, cluster);
        }
    }

    int phantomCount = 0;
    task.start("Creating simple libraries from clusters", "clusters examined", 10_000);
    while (!sortedClusters.isEmpty()) {
        // Perhaps this needs to be iterated, too?
        Cluster biggest = sortedClusters.poll();

        Library library = Library.create(biggest);
        repo.addLibrary(library);
        {
            Set<VersionedFqnNode> globalPotentials = new HashSet<>();
            Set<VersionedFqnNode> globalPartials = new HashSet<>();

            // For each version, find any fqns that always occur
            for (ClusterVersion version : biggest.getVersions()) {
                Multiset<FqnVersion> potentials = HashMultiset.create();
                for (Jar jar : version.getJars()) {
                    for (FqnVersion fqn : jar.getFqns()) {
                        potentials.add(fqn);
                    }
                }

                int max = version.getJars().size();
                for (FqnVersion fqn : potentials) {
                    if (potentials.count(fqn) == max) {
                        globalPotentials.add(fqn.getFqn());
                    } else {
                        globalPartials.add(fqn.getFqn());
                    }
                }
            }
            globalPotentials.removeAll(globalPartials);
            globalPotentials.removeAll(biggest.getCoreFqns());
            globalPotentials.removeAll(biggest.getVersionFqns());
            // Are there any clusters that we match?
            Set<Cluster> potentialClusters = new HashSet<>();
            for (VersionedFqnNode fqn : globalPotentials) {
                Cluster cluster = fqnToCluster.get(fqn);
                if (cluster == null) {
                    logger.severe("Missing cluster for FQN: " + fqn.getFqn());
                } else {
                    potentialClusters.add(cluster);
                }
            }
            for (Cluster cluster : potentialClusters) {
                if (globalPotentials.containsAll(cluster.getCoreFqns())
                        && CollectionUtils.containsNone(globalPartials, cluster.getVersionFqns())) {
                    library.addSecondaryCluster(cluster);
                }
            }
        }

        // Verify the cluster is novel
        Set<Cluster> packaging = new HashSet<>();
        packaging.add(library.getCoreCluster());
        packaging.addAll(library.getSecondaryClusters());

        // Add the jars to the library
        for (Jar jar : biggest.getJars()) {
            boolean addMe = true;
            for (Cluster cluster : jarsToClusters.get(jar)) {
                if (cluster != biggest && !library.getSecondaryClusters().contains(cluster)) {
                    addMe = false;
                    break;
                }
            }
            if (addMe && !assignedJars.contains(jar)) {
                library.addJar(jar);
                assignedJars = assignedJars.add(jar);
            }
        }

        if (library.getJars().isEmpty()) {
            phantomCount++;
        }
        // Split the jars into versions
        splitLibaryIntoVersions(library);
    }

    task.progress();

    task.finish();

    task.report(assignedJars.size() + " jars assigned to " + repo.getLibraries().size()
            + " libraries, of which " + phantomCount + " are phantom libraries");

    return assignedJars;
}

From source file:de.andreasschoknecht.LS3.DocumentCollection.java

/**
 * Insert a model to a model collection. This means that the underlying Term-Document Matrix has to be updated.
 *
 * @param modelPath the path to the model to be inserted.
 *//*from w  w w  . j a  va 2  s. co m*/
public void insertModel(String modelPath) {
    // Make sure file name is correct
    if (!modelPath.endsWith(".pnml"))
        modelPath = modelPath + ".pnml";

    // Create new LS3Document object and add it to the document collection list of documents
    System.out.println("------------------------");
    System.out.println("Model to insert:");
    System.out.println("------------------------");
    System.out.println(modelPath.substring(modelPath.lastIndexOf(File.separator) + 1));
    System.out.println("------------------------");
    System.out.println("Models in list:");
    System.out.println("------------------------");

    String[] updatedFileList = new String[fileList.length + 1];
    for (int i = 0; i <= fileList.length; i++) {
        if (i != fileList.length)
            updatedFileList[i] = fileList[i];
        else
            updatedFileList[i] = modelPath.substring(modelPath.lastIndexOf(File.separator) + 1);

        System.out.println(updatedFileList[i]);

    }

    documentNumber++;

    LS3Document newDocument = new LS3Document(modelPath);
    PNMLReader pnmlReader = new PNMLReader();
    try {
        pnmlReader.processDocument(newDocument);
    } catch (JDOMException | IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    System.out.println("------------------------");
    System.out.println("New LS3Document data:");
    System.out.println("------------------------");
    System.out.println(newDocument.getPNMLPath());
    System.out.println("Amount of terms = " + newDocument.getAmountTerms());
    for (String term : newDocument.getTermCollection()) {
        System.out.println(term);
    }

    // Add new column to the Term-Document Matrix
    int t = tdMatrix.getRowNumber();
    double[] termFrequencies = new double[t];
    String[] termCollectionArray = new String[termCollection.size()];
    termCollection.toArray(termCollectionArray);

    Multiset<String> termsM = newDocument.getTermCollection();
    for (int i = 0; i < t; i++) {
        termFrequencies[i] = termsM.count(termCollectionArray[i]);
        termsM.remove(termCollectionArray[i]);
    }
    System.out.println("------------------------");
    System.out.println("Term frequencies:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(termFrequencies));

    System.out.println("------------------------");
    System.out.println("Old TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

    tdMatrix.addColumn(termFrequencies);

    System.out.println("------------------------");
    System.out.println("New TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

    // Add new terms of the new model to the term list of the document collection
    System.out.println("------------------------");
    System.out.println("Old term collection:");
    System.out.println("------------------------");
    for (String term : termCollection) {
        System.out.println(term);
    }

    System.out.println("------------------------");
    System.out.println("Terms remaining in insertion model:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(termsM.toArray(new String[termsM.size()])));

    Set<String> termSet = termsM.elementSet();
    String[] newTerms = termSet.toArray(new String[termSet.size()]);
    for (String term : newTerms) {
        termCollection.add(term);
    }

    System.out.println("------------------------");
    System.out.println("New term collection:");
    System.out.println("------------------------");
    for (String term : termCollection) {
        System.out.println(term);
    }

    System.out.println("------------------------");
    System.out.println("New term collection TD Matrix:");
    System.out.println("------------------------");
    for (String term : tdMatrix.getTermArray()) {
        System.out.println(term);
    }

    //  Add one row for each new term and add the corresponding Term-Document Matrix entries
    double[] newTermsFrequencies = new double[newTerms.length];
    for (int i = 0; i < newTerms.length; i++) {
        newTermsFrequencies[i] = termsM.count(newTerms[i]);
    }

    System.out.println("------------------------");
    System.out.println("New term frequencies:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(newTermsFrequencies));

    int n = tdMatrix.getColumnNumber();
    for (int i = 0; i < newTermsFrequencies.length; i++) {
        double[] newRow = new double[n];
        for (int j = 0; j < n - 2; j++)
            newRow[j] = 0;

        newRow[n - 1] = newTermsFrequencies[i];
        tdMatrix.addRow(newRow);
    }

    // Update term list of TDMatrix object
    tdMatrix.setTermArray(termCollection.toArray(new String[0]));

    System.out.println("------------------------");
    System.out.println("Final TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

}

From source file:it.units.malelab.ege.ge.mapper.SGEMapper.java

public Node<T> map(SGEGenotype<T> genotype, Map<String, Object> report) throws MappingException {
    int[] usages = new int[genotype.size()];
    //map/*from  ww w.  j  a  va 2s .  c  om*/
    Multiset<Pair<T, Integer>> expandedSymbols = LinkedHashMultiset.create();
    Node<Pair<T, Integer>> tree = new Node<>(nonRecursiveGrammar.getStartingSymbol());
    while (true) {
        Node<Pair<T, Integer>> nodeToBeReplaced = null;
        for (Node<Pair<T, Integer>> node : tree.leafNodes()) {
            if (nonRecursiveGrammar.getRules().keySet().contains(node.getContent())) {
                nodeToBeReplaced = node;
                break;
            }
        }
        if (nodeToBeReplaced == null) {
            break;
        }
        //get codon
        List<Integer> values = genotype.getGenes().get(nodeToBeReplaced.getContent());
        int value = values.get(expandedSymbols.count(nodeToBeReplaced.getContent()));
        int usageIndex = geneFirstIndexes.get(nodeToBeReplaced.getContent())
                + expandedSymbols.count(nodeToBeReplaced.getContent());
        usages[usageIndex] = usages[usageIndex] + 1;
        List<List<Pair<T, Integer>>> options = nonRecursiveGrammar.getRules()
                .get(nodeToBeReplaced.getContent());
        int optionIndex = value;
        //add children
        for (Pair<T, Integer> symbol : options.get(optionIndex)) {
            Node<Pair<T, Integer>> newChild = new Node<>(symbol);
            nodeToBeReplaced.getChildren().add(newChild);
        }
        expandedSymbols.add(nodeToBeReplaced.getContent());
    }
    report.put(BIT_USAGES_INDEX_NAME, usages);
    return transform(tree);
}

From source file:tv.floe.metronome.io.records.RCV1RecordFactory.java

public static void ScanFile(String file, int debug_break_cnt) throws IOException {

    BufferedReader reader = null;
    int line_count = 0;

    Multiset<String> class_count = ConcurrentHashMultiset.create();
    Multiset<String> namespaces = ConcurrentHashMultiset.create();

    try {//  w ww.  j  a va 2 s  . co m
        reader = new BufferedReader(new FileReader(file));

        String line = reader.readLine();

        while (line != null && line.length() > 0) {

            String[] parts = line.split(" ");

            class_count.add(parts[0]);
            namespaces.add(parts[1]);

            line = reader.readLine();
            line_count++;

            Vector v = new RandomAccessSparseVector(FEATURES);

            for (int x = 2; x < parts.length; x++) {
                String[] feature = parts[x].split(":");
                int index = Integer.parseInt(feature[0]) % FEATURES;
                double val = Double.parseDouble(feature[1]);

                System.out.println(feature[1] + " = " + val);

                if (index < FEATURES) {
                    v.set(index, val);
                } else {

                    System.out.println("Could Hash: " + index + " to " + (index % FEATURES));

                }

            }

            System.out.println("###");

            if (line_count > debug_break_cnt) {
                break;
            }

        }

        System.out.println("Total Rec Count: " + line_count);

        System.out.println("-------------------- ");

        System.out.println("Classes");
        for (String word : class_count.elementSet()) {
            System.out.println("Class " + word + ": " + class_count.count(word) + " ");
        }

        System.out.println("-------------------- ");

        System.out.println("NameSpaces:");
        for (String word : namespaces.elementSet()) {
            System.out.println("Namespace " + word + ": " + namespaces.count(word) + " ");
        }

    } finally {
        reader.close();
    }

}

From source file:com.clarkparsia.geneious.SequenceVerificationAnnotation.java

private void guessSOType() {
    SequenceCharSequence expectedSequence = alignmentDoc.getCharSequence();

    substitution = null;/*  w  w w.  java  2 s .c  om*/
    soType = null;

    if (interval.getLength() == 1 && variantType == SequenceVariantType.SUBSTITUTION) {
        int position = interval.getMinimumIndex() - 1;
        SequenceCharSequence e = expectedSequence.subSequence(position - 10, position + 10);
        char expected = expectedSequence.charAt(position);
        LOGGER.debug("          **********V*********");
        LOGGER.debug("Expected: " + e + " " + expected);
        char mostLikelySubstitution = '-';
        int maxOccurence = 0;
        Multiset<Character> possibleSubstitutions = HashMultiset.create();
        for (SequenceDocument actualSequenceDoc : alignmentDoc.getSequencingData()) {
            SequenceCharSequence actualSequence = actualSequenceDoc.getCharSequence();
            char substitution = actualSequence.charAt(position);
            LOGGER.debug("Actual  : " + actualSequence.subSequence(position - 10, position + 10) + " "
                    + substitution);
            if (substitution != '-' && substitution != expected) {
                possibleSubstitutions.add(substitution);
                int occurences = possibleSubstitutions.count(substitution);
                if (occurences > maxOccurence || (occurences == maxOccurence && isValidChar(substitution)
                        && !isValidChar(mostLikelySubstitution))) {
                    mostLikelySubstitution = substitution;
                    maxOccurence = occurences;
                }
            }
        }
        LOGGER.debug("          **********^*********");

        if (isValidChar(mostLikelySubstitution)) {
            substitution = new char[] { expected, mostLikelySubstitution };
            soType = SequenceOntologyUtil.getSOSubstitutionType(expected, mostLikelySubstitution);
        }
    }

    if (substitution == null) {
        soType = SequenceOntologyUtil.getSOType(variantType);
    }
}