List of usage examples for com.google.common.collect Multiset add
int add(@Nullable E element, int occurrences);
From source file:edu.berkeley.compbio.phyloutils.EnvironmentParser.java
public static Collection<RootedPhylogeny<String>> read(InputStream is, RootedPhylogeny<String> tree) throws IOException, TreeException, NoSuchNodeException { BufferedReader r = new BufferedReader(new InputStreamReader(is)); Map<String, Multiset<String>> environmentCounts = new HashMap<String, Multiset<String>>(); String line;// ww w.ja v a2s .c om while ((line = r.readLine()) != null) { String[] tokens = line.split(" "); Multiset<String> env = environmentCounts.get(tokens[1]); if (env == null) { env = HashMultiset.create(); environmentCounts.put(tokens[1], env); } env.add(tokens[0], Integer.parseInt(tokens[2])); } Set<RootedPhylogeny<String>> result = new HashSet<RootedPhylogeny<String>>(); for (Map.Entry<String, Multiset<String>> entry : environmentCounts.entrySet()) { String name = entry.getKey(); Multiset<String> ids = entry.getValue(); RootedPhylogeny<String> subtree = tree.extractTreeWithLeafIDs(ids.elementSet(), false, false, AbstractRootedPhylogeny.MutualExclusionResolutionMode.EXCEPTION); subtree.setPayload(name); subtree.setLeafWeights(ids); result.add(subtree); } return result; }
From source file:com.github.fhirschmann.clozegen.lib.multiset.ReadMultisets.java
/** * Parses frequencies from a URL. The subject and the count for a subject * need to be delimited by {@code \t} with the count on the right-hand * side.//from ww w.j a va2 s . c o m * * <p>For example, assuming your frequency file contains the following lines: * <pre> * one of the 200 * because of the 100 * members of the 50 * </pre> * Then getting the count of "because of the" will yield 100. * * @param url the URL to the file to parse * @param charset the charset of the file * @return the parsed frequencies * @throws IOException on errors reading from the file */ public static Multiset<String> parseMultiset(final URL url, final Charset charset) throws IOException { final Multiset<String> multiset = LinkedHashMultiset.create(); final List<String> lines = Resources.readLines(checkNotNull(url), charset); for (String line : lines) { final String[] tokens = line.split("\t"); multiset.add(tokens[0], Integer.parseInt(tokens[1])); } return multiset; }
From source file:com.cloudera.oryx.rdf.common.rule.NumericDecision.java
static List<Decision> numericDecisionsFromExamples(int featureNumber, Iterable<Example> examples, int suggestedMaxSplitCandidates) { Multiset<Float> sortedFeatureValueCounts = TreeMultiset.create(); StorelessUnivariateStatistic mean = new Mean(); int numExamples = 0; for (Example example : examples) { NumericFeature feature = (NumericFeature) example.getFeature(featureNumber); if (feature == null) { continue; }/*w w w. j ava 2 s . c om*/ numExamples++; float value = feature.getValue(); sortedFeatureValueCounts.add(value, 1); mean.increment(value); } // Make decisions from split points that divide up input into roughly equal amounts of examples List<Decision> decisions = Lists.newArrayListWithExpectedSize(suggestedMaxSplitCandidates); int approxExamplesPerSplit = FastMath.max(1, numExamples / suggestedMaxSplitCandidates); int examplesInSplit = 0; float lastValue = Float.NaN; // This will iterate in order of value by nature of TreeMap for (Multiset.Entry<Float> entry : sortedFeatureValueCounts.entrySet()) { float value = entry.getElement(); if (examplesInSplit >= approxExamplesPerSplit) { decisions.add( new NumericDecision(featureNumber, (value + lastValue) / 2.0f, (float) mean.getResult())); examplesInSplit = 0; } examplesInSplit += entry.getCount(); lastValue = value; } // The vital condition here is that if decision n decides an example is positive, then all subsequent // decisions in the list will also find it positive. So we need to order from highest threshold to lowest Collections.reverse(decisions); return decisions; }
From source file:com.google.javascript.jscomp.deps.SortedDependencies.java
private static <T> List<T> topologicalStableSort(List<T> items, Multimap<T, T> deps) { if (items.isEmpty()) { // Priority queue blows up if we give it a size of 0. Since we need // to special case this either way, just bail out. return new ArrayList<>(); }//from w ww . j a v a 2 s . c o m final Map<T, Integer> originalIndex = new HashMap<>(); for (int i = 0; i < items.size(); i++) { originalIndex.put(items.get(i), i); } PriorityQueue<T> inDegreeZero = new PriorityQueue<>(items.size(), new Comparator<T>() { @Override public int compare(T a, T b) { return originalIndex.get(a).intValue() - originalIndex.get(b).intValue(); } }); List<T> result = new ArrayList<>(); Multiset<T> inDegree = HashMultiset.create(); Multimap<T, T> reverseDeps = ArrayListMultimap.create(); Multimaps.invertFrom(deps, reverseDeps); // First, add all the inputs with in-degree 0. for (T item : items) { Collection<T> itemDeps = deps.get(item); inDegree.add(item, itemDeps.size()); if (itemDeps.isEmpty()) { inDegreeZero.add(item); } } // Then, iterate to a fixed point over the reverse dependency graph. while (!inDegreeZero.isEmpty()) { T item = inDegreeZero.remove(); result.add(item); for (T inWaiting : reverseDeps.get(item)) { inDegree.remove(inWaiting, 1); if (inDegree.count(inWaiting) == 0) { inDegreeZero.add(inWaiting); } } } return result; }
From source file:org.sonar.api.utils.KeyValueFormat.java
/** * @since 2.7//from w w w. ja va 2 s .com */ public static <K> Multiset<K> parseMultiset(String data, Converter<K> keyConverter) { Multiset<K> multiset = LinkedHashMultiset.create();// to keep the same order if (data != null) { String[] pairs = StringUtils.split(data, PAIR_SEPARATOR); for (String pair : pairs) { String[] keyValue = StringUtils.split(pair, FIELD_SEPARATOR); String key = keyValue[0]; String value = (keyValue.length == 2 ? keyValue[1] : "0"); multiset.add(keyConverter.parse(key), new IntegerConverter().parse(value)); } } return multiset; }
From source file:com.anhth12.eval.ConfusionMatrix.java
/** * Increments the entry specified by actual and predicted by count. */// w w w . j a v a 2 s . com public void add(T actual, T predicted, int count) { if (matrix.containsKey(actual)) { matrix.get(actual).add(predicted, count); } else { Multiset<T> counts = HashMultiset.create(); counts.add(predicted, count); matrix.put(actual, counts); } classes.add(actual); classes.add(predicted); }
From source file:com.continuuity.loom.layout.change.AddServiceChangeIterator.java
@Override public ClusterLayoutChange next() { if (hasNext()) { int[] nodeLayoutCounts = nodeLayoutCountIterator.next(); // create the change object from the integer array Multiset<NodeLayout> counts = HashMultiset.create(); for (int i = 0; i < nodeLayoutCounts.length; i++) { counts.add(expandableNodeLayouts.get(i), nodeLayoutCounts[i]); }/*from w w w . j a v a2s .co m*/ return new AddServicesChange(counts, service); } throw new NoSuchElementException(); }
From source file:org.deeplearning4j.eval.ConfusionMatrix.java
/** * Increments the entry specified by actual and predicted by count. *//*from w w w . java2 s . c o m*/ public void add(T actual, T predicted, int count) { if (matrix.containsKey(actual)) { matrix.get(actual).add(predicted, count); } else { Multiset<T> counts = HashMultiset.create(); counts.add(predicted, count); matrix.put(actual, counts); } }
From source file:org.joda.beans.ser.GuavaSerIteratorFactory.java
private static SerIterable multiset(final Class<?> valueType, final List<Class<?>> valueTypeTypes, final Multiset<Object> coll) { return new SerIterable() { @Override/*from ww w . j av a2 s. c o m*/ public SerIterator iterator() { return multiset(coll, Object.class, valueType, valueTypeTypes); } @Override public void add(Object key, Object column, Object value, int count) { if (key != null) { throw new IllegalArgumentException("Unexpected key"); } coll.add(value, count); } @Override public Object build() { return coll; } @Override public SerCategory category() { return SerCategory.COUNTED; } @Override public Class<?> valueType() { return valueType; } @Override public List<Class<?>> valueTypeTypes() { return valueTypeTypes; } }; }
From source file:com.github.fhirschmann.clozegen.lib.generators.FrequencyGapGenerator.java
@Override public Optional<Gap> generate(final int count) { if (model.getMultiset().contains(token)) { int tokenCount = model.getMultiset().count(token); Multiset<String> ms = HashMultiset.create(); // compute a multiset with counts(x) = |count(x) - count(token)| for (Entry<String> entry : model.getMultiset().entrySet()) { ms.add(entry.getElement(), Math.abs(entry.getCount() - tokenCount)); }//from w ww . ja va 2s .com if (ms.elementSet().size() < count - 1) { // not enough data to create as many answer options as requested return Optional.absent(); } else { return Optional.of( Gap.with(token, Lists.reverse(MultisetUtils.sortedElementList(ms)).subList(0, count - 1))); } } else { // we have no knowledge of the word in question return Optional.absent(); } }