List of usage examples for com.google.common.collect Multiset count
int count(@Nullable Object element);
From source file:org.caleydo.view.domino.internal.NodeSelections.java
public static Set<Block> getFullBlocks(Set<NodeGroup> selection) { if (selection.isEmpty()) return Collections.emptySet(); Set<Node> nodes = getFullNodes(selection); if (nodes.isEmpty()) return Collections.emptySet(); Multiset<Block> blocks = HashMultiset.create(); for (Node node : nodes) { Block n = node.getBlock();//from w w w . j a va2 s .com blocks.add(n); } for (Iterator<Block> it = blocks.elementSet().iterator(); it.hasNext();) { Block block = it.next(); if (block.nodeCount() != blocks.count(block)) { it.remove();// not all groups } } return blocks.elementSet(); }
From source file:org.caleydo.view.domino.internal.toolbar.NodeTools.java
/** * @param actives/*from w ww. j a va 2 s . co m*/ * @return */ private static <T> T mostFrequent(Multiset<T> sets) { if (sets.isEmpty()) return null; Set<T> elems = sets.elementSet(); T maxV = elems.iterator().next(); int max = sets.count(maxV); for (T elem : elems) { int c = sets.count(elem); if (c > max) { max = c; maxV = elem; } } return maxV; }
From source file:com.google.idea.blaze.java.sync.source.SourceDirectoryCalculator.java
@Nullable private static <T> T pickMostFrequentlyOccurring(Multiset<T> set, String prefer) { T best = null;/* w w w . j a v a 2 s . c o m*/ int bestCount = 0; for (T candidate : set.elementSet()) { int candidateCount = set.count(candidate); if (candidateCount > bestCount || (candidateCount == bestCount && candidate.equals(prefer))) { best = candidate; bestCount = candidateCount; } } return best; }
From source file:net.librec.util.StringUtil.java
/** * Parse a {@code Collection<T>} data into string * * @param ts the input data/* w w w .java 2s . com*/ * @param <T> type parameter * @return string of the input data */ public static <T> String toString(Collection<T> ts) { if (ts instanceof Multiset<?>) { StringBuilder sb = new StringBuilder(); Multiset<T> es = (Multiset<T>) ts; for (T e : es.elementSet()) { int count = es.count(e); sb.append(e + ", " + count + "\n"); } return sb.toString(); } return toString(ts, ","); }
From source file:org.caleydo.view.domino.internal.NodeSelections.java
private static Set<Node> getNodes(Set<NodeGroup> selection, boolean checkFull) { if (selection.isEmpty()) return Collections.emptySet(); Multiset<Node> nodes = HashMultiset.create(); for (NodeGroup group : selection) { Node n = group.getNode(); nodes.add(n);/* w w w. j a va2 s . c o m*/ } if (checkFull) { for (Iterator<Node> it = nodes.elementSet().iterator(); it.hasNext();) { Node node = it.next(); final int expected = node.groupCount(); if (expected != nodes.count(node)) { it.remove();// not all groups } } } return nodes.elementSet(); }
From source file:org.mule.module.extension.internal.util.MuleExtensionUtils.java
private static Set<String> collectRepeatedNames(Collection<? extends Described> describedCollection) { if (CollectionUtils.isEmpty(describedCollection)) { return ImmutableSet.of(); }/*from w w w . ja va 2s.c o m*/ Multiset<String> names = LinkedHashMultiset.create(); for (Described described : describedCollection) { if (described == null) { throw new IllegalArgumentException("A null described was provided"); } names.add(described.getName()); } names = Multisets.copyHighestCountFirst(names); Set<String> repeatedNames = new HashSet<>(); for (String name : names) { if (names.count(name) == 1) { break; } repeatedNames.add(name); } return repeatedNames; }
From source file:it.units.malelab.ege.util.Utils.java
public static double multisetDiversity(Multiset m, Set d) { double[] counts = new double[d.size()]; int i = 0;//from w ww.j a v a 2 s .c om for (Object possibleValue : d) { counts[i] = m.count(possibleValue); i = i + 1; } return 1d - normalizedVariance(counts); }
From source file:org.apache.ctakes.temporal.duration.Utils.java
/** * Convert duration distribution multiset to a format that's easy to parse automatically. * Format: <sign/symptom>, <time bin>:<count>, ... * Example: apnea, second:5, minute:1, hour:5, day:10, week:1, month:0, year:0 *///w w w . j a v a 2 s . c o m public static String formatDistribution(String mentionText, Multiset<String> durationDistribution, String separator, boolean normalize) { List<String> distribution = new LinkedList<String>(); distribution.add(mentionText); double total = 0; if (normalize) { for (String bin : bins) { total += durationDistribution.count(bin); } } for (String bin : bins) { if (normalize) { distribution.add(String.format("%s:%.3f", bin, durationDistribution.count(bin) / total)); } else { distribution.add(String.format("%s:%d", bin, durationDistribution.count(bin))); } } Joiner joiner = Joiner.on(separator); return joiner.join(distribution); }
From source file:org.apache.jackrabbit.oak.segment.SegmentGraph.java
/** * Write the gc generation graph of a file store to a stream. * <p>//w w w . ja v a 2 s. co m * The graph is written in * <a href="https://gephi.github.io/users/supported-graph-formats/gdf-format/">the Guess GDF format</a>, * which is easily imported into <a href="https://gephi.github.io/">Gephi</a>. * * @param fileStore file store to graph * @param out stream to write the graph to * @throws Exception */ public static void writeGCGraph(@Nonnull ReadOnlyStore fileStore, @Nonnull OutputStream out) throws Exception { PrintWriter writer = new PrintWriter(checkNotNull(out)); try { Graph<String> gcGraph = parseGCGraph(checkNotNull(fileStore)); writer.write("nodedef>name VARCHAR\n"); for (String gen : gcGraph.vertices()) { writer.write(gen + "\n"); } writer.write("edgedef>node1 VARCHAR, node2 VARCHAR, weight INT\n"); for (Entry<String, Multiset<String>> edge : gcGraph.edges()) { String from = edge.getKey(); Multiset<String> tos = edge.getValue(); for (String to : tos.elementSet()) { if (!from.equals(to) && !to.isEmpty()) { writer.write(from + "," + to + "," + tos.count(to) + "\n"); } } } } finally { writer.close(); } }
From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ClusterMerger.java
public static void mergeByVersions(ClusterCollection clusters) { TaskProgressLogger task = TaskProgressLogger.get(); task.start("Merging " + clusters.size() + " clusters by matching versions"); TreeSet<Cluster> sortedClusters = new TreeSet<>(Cluster.DESCENDING_SIZE_COMPARATOR); Map<VersionedFqnNode, Cluster> fqnToCluster = new HashMap<>(); for (Cluster cluster : clusters) { sortedClusters.add(cluster);/*from w w w . j a v a 2 s .co m*/ for (VersionedFqnNode fqn : cluster.getCoreFqns()) { fqnToCluster.put(fqn, cluster); } } Collection<Cluster> remainingClusters = new LinkedList<>(); Set<VersionedFqnNode> usedFqns = new HashSet<>(); task.start("Merging clusters", "clusters examined", 500); // Starting from the most important jar // For each cluster while (!sortedClusters.isEmpty()) { Cluster biggest = sortedClusters.pollFirst(); remainingClusters.add(biggest); usedFqns.addAll(biggest.getCoreFqns()); // Repeatedly add new fqns to the cluster, until no new ones can be added boolean addedSomething = true; while (addedSomething) { Set<VersionedFqnNode> globalPotentials = new HashSet<>(); Set<VersionedFqnNode> globalPartials = new HashSet<>(); // For each version, find any fqns that always occur for (ClusterVersion version : biggest.getVersions()) { Multiset<VersionedFqnNode> potentials = HashMultiset.create(); for (Jar jar : version.getJars()) { for (FqnVersion fqn : jar.getFqns()) { if (!usedFqns.contains(fqn.getFqn())) { potentials.add(fqn.getFqn()); } } } int max = version.getJars().size(); for (VersionedFqnNode fqn : potentials.elementSet()) { if (potentials.count(fqn) > max) { logger.severe("wtf! " + fqn.getFqn()); // Check the jars for duplicates for (Jar jar : version.getJars()) { for (FqnVersion node : jar.getFqns()) { if (node.getFqn() == fqn) { logger.severe(jar.getJar().getProperties().HASH.getValue() + " " + node.getFingerprint().serialize()); } } } } if (potentials.count(fqn) == max && fqn.getJars().isSubset(biggest.getJars())) { globalPotentials.add(fqn); } else { globalPartials.add(fqn); } } } globalPotentials.removeAll(globalPartials); // Collect the clusters we plan on merging Set<Cluster> newClusters = new HashSet<>(); for (VersionedFqnNode fqn : globalPotentials) { Cluster newCluster = fqnToCluster.get(fqn); if (newCluster == null) { logger.log(Level.SEVERE, "Unable to find cluster for: " + fqn.getFqn()); } else { newClusters.add(newCluster); usedFqns.add(fqn); biggest.addVersionedCore(fqn); } } // // Verify the clusters // for (Cluster cluster : newClusters) { // for (VersionedFqnNode fqn : cluster.getCoreFqns()) { // if (!globalPotentials.contains(fqn)) { // logger.severe("Cluster included without fqn: " + fqn.getFqn()); // // Every node should have the same JarSet // for (VersionedFqnNode node : cluster.getCoreFqns()) { // logger.severe(" " + node.getFqn() + " " + globalPotentials.contains(node) + " " + globalPartials.contains(node) + " " + usedFqns.contains(node) + " "+ node.getJars().hashCode()); // } // } // } // } // usedFqns.addAll(globalPotentials); // Remove the clusters from the queue sortedClusters.removeAll(newClusters); addedSomething = !globalPotentials.isEmpty(); } task.progress(); } task.finish(); clusters.reset(remainingClusters); task.report(clusters.size() + " clusters remain"); task.finish(); }