Example usage for com.google.common.collect Multiset add

List of usage examples for com.google.common.collect Multiset add

Introduction

In this page you can find the example usage for com.google.common.collect Multiset add.

Prototype

@Override
boolean add(E element);

Source Link

Document

Adds a single occurrence of the specified element to this multiset.

Usage

From source file:org.caleydo.view.domino.internal.NodeSelections.java

private static Set<Node> getNodes(Set<NodeGroup> selection, boolean checkFull) {
    if (selection.isEmpty())
        return Collections.emptySet();
    Multiset<Node> nodes = HashMultiset.create();
    for (NodeGroup group : selection) {
        Node n = group.getNode();
        nodes.add(n);
    }//  www  . j  a  v  a  2s. c o m
    if (checkFull) {
        for (Iterator<Node> it = nodes.elementSet().iterator(); it.hasNext();) {
            Node node = it.next();
            final int expected = node.groupCount();
            if (expected != nodes.count(node)) {
                it.remove();// not all groups
            }
        }
    }
    return nodes.elementSet();
}

From source file:org.sonar.plsqlopen.checks.verifier.PlSqlCheckVerifier.java

private static void validateAnalyzerMessage(Map<IssueAttribute, String> attrs,
        AnalyzerMessage analyzerMessage) {
    AnalyzerMessage.TextSpan textSpan = analyzerMessage.getLocation();
    if (textSpan != null) {
        assertEquals(normalizeColumn(textSpan.startCharacter), attrs, IssueAttribute.START_COLUMN);
        assertEquals(Integer.toString(textSpan.endLine), attrs, IssueAttribute.END_LINE);
        assertEquals(normalizeColumn(textSpan.endCharacter), attrs, IssueAttribute.END_COLUMN);
    }/* www  .ja va 2  s .  c  o m*/
    if (attrs.containsKey(IssueAttribute.SECONDARY_LOCATIONS)) {
        List<AnalyzerMessage> secondaryLocations = analyzerMessage.getSecondaryLocations();
        Multiset<String> actualLines = HashMultiset.create();
        for (AnalyzerMessage secondaryLocation : secondaryLocations) {
            actualLines.add(Integer.toString(secondaryLocation.getLine()));
        }
        List<String> expected = Lists.newArrayList(Splitter.on(",").omitEmptyStrings().trimResults()
                .split(attrs.get(IssueAttribute.SECONDARY_LOCATIONS)));
        List<String> unexpected = new ArrayList<>();
        for (String actualLine : actualLines) {
            if (expected.contains(actualLine)) {
                expected.remove(actualLine);
            } else {
                unexpected.add(actualLine);
            }
        }
        if (!expected.isEmpty() || !unexpected.isEmpty()) {
            Fail.fail("Secondary locations: expected: " + expected + " unexpected:" + unexpected);
        }
    }
}

From source file:org.mule.module.extension.internal.util.MuleExtensionUtils.java

private static Set<String> collectRepeatedNames(Collection<? extends Described> describedCollection) {
    if (CollectionUtils.isEmpty(describedCollection)) {
        return ImmutableSet.of();
    }/* w  w w  .  j  a v  a 2  s .c  om*/

    Multiset<String> names = LinkedHashMultiset.create();

    for (Described described : describedCollection) {
        if (described == null) {
            throw new IllegalArgumentException("A null described was provided");
        }
        names.add(described.getName());
    }

    names = Multisets.copyHighestCountFirst(names);
    Set<String> repeatedNames = new HashSet<>();
    for (String name : names) {
        if (names.count(name) == 1) {
            break;
        }

        repeatedNames.add(name);
    }

    return repeatedNames;
}

From source file:com.sonarsource.lits.Dump.java

static void load(File file, Map<String, Multiset<IssueKey>> result) {
    JSONObject json;//  w  ww .  ja  v  a 2s.  com
    try (FileInputStream fis = new FileInputStream(file);
            InputStreamReader in = new InputStreamReader(fis, StandardCharsets.UTF_8)) {
        json = (JSONObject) JSONValue.parse(in);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }

    String ruleKey = ruleKeyFromFileName(file.getName());
    for (Map.Entry<String, Object> component : json.entrySet()) {
        String componentKey = component.getKey();

        Multiset<IssueKey> issues = result.get(componentKey);
        if (issues == null) {
            issues = HashMultiset.create();
            result.put(componentKey, issues);
        }

        JSONArray lines = (JSONArray) component.getValue();
        for (Object line : lines) {
            issues.add(new IssueKey(componentKey, ruleKey, (Integer) line));
        }
    }
}

From source file:edu.cmu.cs.lti.ark.fn.parsing.FeatureExtractor.java

/**
 * @param featureName feature to add/*from   www  .java  2 s.c o m*/
 * @param level indicates whether to conjoin with role name and/or frame name.
 */
protected static void conjoinAndAdd(String featureName, String frameAndRoleName, String roleName,
        ConjoinLevel level, Multiset<String> featureMap) {
    switch (level) {
    case FRAME_AND_ROLE_NAME:
        featureMap.add(UNDERSCORE.join(featureName, frameAndRoleName));
        //intentional fall through
    case ROLE_NAME:
        featureMap.add(UNDERSCORE.join(featureName, roleName));
    case NO_CONJOIN:
        featureMap.add(featureName);
    default:
        break;
    }
}

From source file:org.sonar.java.checks.verifier.CheckVerifier.java

private static void validateAnalyzerMessage(Map<IssueAttribute, String> attrs,
        AnalyzerMessage analyzerMessage) {
    Double effortToFix = analyzerMessage.getCost();
    if (effortToFix != null) {
        assertEquals(Integer.toString(effortToFix.intValue()), attrs, IssueAttribute.EFFORT_TO_FIX);
    }//  ww w .  ja v  a  2s. c om
    AnalyzerMessage.TextSpan textSpan = analyzerMessage.primaryLocation();
    assertEquals(normalizeColumn(textSpan.startCharacter), attrs, IssueAttribute.START_COLUMN);
    assertEquals(Integer.toString(textSpan.endLine), attrs, IssueAttribute.END_LINE);
    assertEquals(normalizeColumn(textSpan.endCharacter), attrs, IssueAttribute.END_COLUMN);
    if (attrs.containsKey(IssueAttribute.SECONDARY_LOCATIONS)) {
        List<AnalyzerMessage> secondaryLocations = analyzerMessage.flows.stream().map(l -> l.get(0))
                .collect(Collectors.toList());
        Multiset<String> actualLines = HashMultiset.create();
        for (AnalyzerMessage secondaryLocation : secondaryLocations) {
            actualLines.add(Integer.toString(secondaryLocation.getLine()));
        }
        List<String> expected = Lists.newArrayList(Splitter.on(",").omitEmptyStrings().trimResults()
                .split(attrs.get(IssueAttribute.SECONDARY_LOCATIONS)));
        List<String> unexpected = new ArrayList<>();
        for (String actualLine : actualLines) {
            if (expected.contains(actualLine)) {
                expected.remove(actualLine);
            } else {
                unexpected.add(actualLine);
            }
        }
        if (!expected.isEmpty() || !unexpected.isEmpty()) {
            // Line is not covered by JaCoCo because of thrown exception but effectively covered in UT.
            Fail.fail(String.format("Secondary locations: expected: %s unexpected:%s. In %s:%d", expected,
                    unexpected, normalizedFilePath(analyzerMessage), analyzerMessage.getLine()));
        }
    }
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ClusterMerger.java

public static void mergeByVersions(ClusterCollection clusters) {
    TaskProgressLogger task = TaskProgressLogger.get();

    task.start("Merging " + clusters.size() + " clusters by matching versions");

    TreeSet<Cluster> sortedClusters = new TreeSet<>(Cluster.DESCENDING_SIZE_COMPARATOR);
    Map<VersionedFqnNode, Cluster> fqnToCluster = new HashMap<>();
    for (Cluster cluster : clusters) {
        sortedClusters.add(cluster);/*  w  ww .j  a v  a 2  s . co m*/
        for (VersionedFqnNode fqn : cluster.getCoreFqns()) {
            fqnToCluster.put(fqn, cluster);
        }
    }

    Collection<Cluster> remainingClusters = new LinkedList<>();
    Set<VersionedFqnNode> usedFqns = new HashSet<>();
    task.start("Merging clusters", "clusters examined", 500);
    // Starting from the most important jar
    // For each cluster
    while (!sortedClusters.isEmpty()) {
        Cluster biggest = sortedClusters.pollFirst();
        remainingClusters.add(biggest);

        usedFqns.addAll(biggest.getCoreFqns());
        // Repeatedly add new fqns to the cluster, until no new ones can be added
        boolean addedSomething = true;
        while (addedSomething) {
            Set<VersionedFqnNode> globalPotentials = new HashSet<>();
            Set<VersionedFqnNode> globalPartials = new HashSet<>();

            // For each version, find any fqns that always occur
            for (ClusterVersion version : biggest.getVersions()) {
                Multiset<VersionedFqnNode> potentials = HashMultiset.create();
                for (Jar jar : version.getJars()) {
                    for (FqnVersion fqn : jar.getFqns()) {
                        if (!usedFqns.contains(fqn.getFqn())) {
                            potentials.add(fqn.getFqn());
                        }
                    }
                }

                int max = version.getJars().size();
                for (VersionedFqnNode fqn : potentials.elementSet()) {
                    if (potentials.count(fqn) > max) {
                        logger.severe("wtf! " + fqn.getFqn());
                        // Check the jars for duplicates
                        for (Jar jar : version.getJars()) {
                            for (FqnVersion node : jar.getFqns()) {
                                if (node.getFqn() == fqn) {
                                    logger.severe(jar.getJar().getProperties().HASH.getValue() + " "
                                            + node.getFingerprint().serialize());
                                }
                            }
                        }
                    }
                    if (potentials.count(fqn) == max && fqn.getJars().isSubset(biggest.getJars())) {
                        globalPotentials.add(fqn);
                    } else {
                        globalPartials.add(fqn);
                    }
                }
            }

            globalPotentials.removeAll(globalPartials);

            // Collect the clusters we plan on merging
            Set<Cluster> newClusters = new HashSet<>();
            for (VersionedFqnNode fqn : globalPotentials) {
                Cluster newCluster = fqnToCluster.get(fqn);
                if (newCluster == null) {
                    logger.log(Level.SEVERE, "Unable to find cluster for: " + fqn.getFqn());
                } else {
                    newClusters.add(newCluster);
                    usedFqns.add(fqn);
                    biggest.addVersionedCore(fqn);
                }
            }

            //        // Verify the clusters
            //        for (Cluster cluster : newClusters) {
            //          for (VersionedFqnNode fqn : cluster.getCoreFqns()) {
            //            if (!globalPotentials.contains(fqn)) {
            //              logger.severe("Cluster included without fqn: " + fqn.getFqn());
            //              // Every node should have the same JarSet
            //              for (VersionedFqnNode node : cluster.getCoreFqns()) {
            //                logger.severe(" " + node.getFqn() + " " + globalPotentials.contains(node) + " " + globalPartials.contains(node) + " " + usedFqns.contains(node) + " "+ node.getJars().hashCode());
            //              }
            //            }
            //          }
            //        }

            //        usedFqns.addAll(globalPotentials);

            // Remove the clusters from the queue
            sortedClusters.removeAll(newClusters);

            addedSomething = !globalPotentials.isEmpty();
        }

        task.progress();
    }

    task.finish();

    clusters.reset(remainingClusters);
    task.report(clusters.size() + " clusters remain");
    task.finish();
}

From source file:com.continuuity.loom.layout.ClusterLayout.java

/**
 * Derive a ClusterLayout from a set of {@link Node}s and some {@link Constraints}.
 *
 * @param clusterNodes Nodes to derive the layout from.
 * @param constraints Constraints for the cluster layout.
 * @return ClusterLayout derived from the nodes.
 *///from   ww  w  .j a v a2 s  . co m
public static ClusterLayout fromNodes(Set<Node> clusterNodes, Constraints constraints) {
    Multiset<NodeLayout> nodeLayoutCounts = HashMultiset.create();
    for (Node node : clusterNodes) {
        Set<String> nodeServices = Sets.newHashSet();
        for (Service service : node.getServices()) {
            nodeServices.add(service.getName());
        }
        String hardwareType = node.getProperties().getHardwaretype();
        String imageType = node.getProperties().getImagetype();
        nodeLayoutCounts.add(new NodeLayout(hardwareType, imageType, nodeServices));
    }
    return new ClusterLayout(constraints, nodeLayoutCounts);
}

From source file:com.github.rinde.rinsim.scenario.measure.Metrics.java

/**
 * Computes the number of occurrences of each event type in the specified
 * {@link Scenario}./*from  ww w.  j  a v a 2  s. co  m*/
 * @param s The scenario to check.
 * @return A {@link ImmutableMultiset} of event types.
 */
public static ImmutableMultiset<Class<?>> getEventTypeCounts(Scenario s) {
    final Multiset<Class<?>> set = LinkedHashMultiset.create();
    for (final TimedEvent te : s.getEvents()) {
        set.add(te.getClass());
    }
    final List<Class<?>> toMove = new ArrayList<>();
    for (final Class<?> c : set.elementSet()) {
        if (!Modifier.isPublic(c.getModifiers()) && TimedEvent.class.isAssignableFrom(c.getSuperclass())
                && !set.contains(c.getSuperclass())) {
            toMove.add(c);
        }
    }
    for (final Class<?> c : toMove) {
        set.add(c.getSuperclass(), set.count(c));
        set.remove(c, set.count(c));
    }
    return ImmutableMultiset.copyOf(set);
}

From source file:tv.floe.metronome.io.records.RCV1RecordFactory.java

public static void ScanFile(String file, int debug_break_cnt) throws IOException {

    BufferedReader reader = null;
    int line_count = 0;

    Multiset<String> class_count = ConcurrentHashMultiset.create();
    Multiset<String> namespaces = ConcurrentHashMultiset.create();

    try {/*  w  ww .ja va2 s  .c  o  m*/
        reader = new BufferedReader(new FileReader(file));

        String line = reader.readLine();

        while (line != null && line.length() > 0) {

            String[] parts = line.split(" ");

            class_count.add(parts[0]);
            namespaces.add(parts[1]);

            line = reader.readLine();
            line_count++;

            Vector v = new RandomAccessSparseVector(FEATURES);

            for (int x = 2; x < parts.length; x++) {
                String[] feature = parts[x].split(":");
                int index = Integer.parseInt(feature[0]) % FEATURES;
                double val = Double.parseDouble(feature[1]);

                System.out.println(feature[1] + " = " + val);

                if (index < FEATURES) {
                    v.set(index, val);
                } else {

                    System.out.println("Could Hash: " + index + " to " + (index % FEATURES));

                }

            }

            System.out.println("###");

            if (line_count > debug_break_cnt) {
                break;
            }

        }

        System.out.println("Total Rec Count: " + line_count);

        System.out.println("-------------------- ");

        System.out.println("Classes");
        for (String word : class_count.elementSet()) {
            System.out.println("Class " + word + ": " + class_count.count(word) + " ");
        }

        System.out.println("-------------------- ");

        System.out.println("NameSpaces:");
        for (String word : namespaces.elementSet()) {
            System.out.println("Namespace " + word + ": " + namespaces.count(word) + " ");
        }

    } finally {
        reader.close();
    }

}