List of usage examples for com.google.common.collect Multiset count
int count(@Nullable Object element);
From source file:net.shipilev.elections.cikrf.Parser.java
private void summaryCompare(PrintWriter pw, SummaryData summ1, SummaryData summ2) { HashSet<List<String>> geos = new HashSet<List<String>>(); geos.addAll(summ1.keys());/* w w w .j a va 2 s .c om*/ geos.retainAll(summ2.keys()); boolean foundAnomalies = false; for (List<String> geo : geos) { Multiset<Metric> val1 = summ1.get(geo); Multiset<Metric> val2 = summ2.get(geo); Collection<Metric> metrics = new TreeSet<Metric>(); metrics.addAll(val1.elementSet()); metrics.addAll(val2.elementSet()); if (!val1.equals(val2)) { foundAnomalies = true; pw.printf("Found mismatches in aggregates over %s:\n", geo); for (Metric key : metrics) { Integer v1 = val1.count(key); Integer v2 = val2.count(key); if (!v1.equals(v2)) { pw.printf(" {%9d} vs {%9d} [%4.1f%%]: %s\n", v1, v2, (v1 * 100.0 / v2 - 100), key); } } pw.println(); } } if (!foundAnomalies) { pw.println("No anomalies in data."); } pw.flush(); }
From source file:cpw.mods.inventorysorter.SortingHandler.java
private void distributeInventory(Action.ActionContext context, Multiset<ItemStackHolder> itemcounts) { InventoryCrafting ic = (InventoryCrafting) context.slot.inventory; Multiset<ItemStackHolder> slotCounts = TreeMultiset.create(new InventoryHandler.ItemStackComparator()); for (int x = 0; x < ic.getWidth(); x++) { for (int y = 0; y < ic.getHeight(); y++) { ItemStack is = ic.getStackInRowAndColumn(x, y); if (is != null) { slotCounts.add(new ItemStackHolder(is)); }/*w w w . ja v a 2 s . com*/ } } final ImmutableMultiset<ItemStackHolder> staticcounts = ImmutableMultiset.copyOf(itemcounts); for (int x = 0; x < ic.getWidth(); x++) { for (int y = 0; y < ic.getHeight(); y++) { ItemStack is = ic.getStackInRowAndColumn(x, y); if (is != null) { ItemStackHolder ish = new ItemStackHolder(is); int count = staticcounts.count(ish); int slotNum = slotCounts.count(ish); final int occurrences = count / slotNum; itemcounts.remove(ish, occurrences); is.stackSize = occurrences; } } } for (int x = 0; x < ic.getWidth(); x++) { for (int y = 0; y < ic.getHeight(); y++) { ItemStack is = ic.getStackInRowAndColumn(x, y); if (is != null) { ItemStackHolder ish = new ItemStackHolder(is); if (itemcounts.count(ish) > 0) { is.stackSize += itemcounts.setCount(ish, 0); } } } } for (int slot = context.slotMapping.begin; slot < context.slotMapping.end + 1; slot++) { context.player.openContainer.getSlot(slot).onSlotChanged(); } }
From source file:net.shipilev.concurrent.torture.Runner.java
private Result dump(ConcurrencyTest test, Multiset<Long> results) { ObjectFactory factory = new ObjectFactory(); Result result = factory.createResult(); result.setName(test.getClass().getName()); for (Long e : results.elementSet()) { byte[] b = longToByteArr(e); byte[] temp = new byte[test.resultSize()]; System.arraycopy(b, 0, temp, 0, test.resultSize()); b = temp;/*from w w w . j ava2s. c o m*/ State state = factory.createState(); state.setId(Arrays.toString(b)); state.setCount(results.count(e)); result.getState().add(state); } Env env = factory.createEnv(); for (Map.Entry<String, String> entry : Environment.getEnvironment().entrySet()) { Kv kv = factory.createKv(); kv.setKey(entry.getKey()); kv.setValue(entry.getValue()); env.getProperty().add(kv); } result.setEnv(env); try { String packageName = Result.class.getPackage().getName(); JAXBContext jc = JAXBContext.newInstance(packageName); Marshaller marshaller = jc.createMarshaller(); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true); marshaller.marshal(result, new File(destDir + "/" + test.getClass().getName() + ".xml")); } catch (Throwable e) { e.printStackTrace(); } return result; }
From source file:BibTex.IOmethods.java
public void writeNumberPapersPerYear(Set<BibTexRef> refs) throws IOException { BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "papers per year.csv")); StringBuilder sb = new StringBuilder(); String sep = "|"; //creation of the data structures for I/O Multiset<String> years = TreeMultiset.create(); for (BibTexRef ref : refs) { String year = ref.getYear(); years.add(year);//from ww w .j av a 2s. c o m } for (String year : years.elementSet()) { sb.append(year); sb.append(sep); } sb.append("\n"); for (String year : years.elementSet()) { sb.append(years.count(year)); sb.append(sep); } sb.append("\n"); bw.write(sb.toString()); bw.close(); }
From source file:edu.uw.cs.lil.tiny.test.ccg.lambda.SingleSentencePartialCreditTestingStatistics.java
private PartialCreditTriplet partialCompare(LogicalExpression gold, LogicalExpression label) { final Multiset<Pair<? extends LogicalExpression, ? extends LogicalExpression>> goldPairs = GetPredConstPairs .of(gold);/* w ww .j a va 2 s . c o m*/ final Multiset<Pair<? extends LogicalExpression, ? extends LogicalExpression>> labelPairs; if (label == null) { labelPairs = HashMultiset.create(); } else { labelPairs = GetPredConstPairs.of(label); } // The "intersection" of the gold and label pair sets = the number of // matches final Multiset<Pair<? extends LogicalExpression, ? extends LogicalExpression>> intersection = HashMultiset .create(); for (final Entry<Pair<? extends LogicalExpression, ? extends LogicalExpression>> entry : goldPairs .entrySet()) { intersection.setCount(entry.getElement(), Math.min(entry.getCount(), labelPairs.count(entry.getElement()))); } return new PartialCreditTriplet(goldPairs.size(), labelPairs.size(), intersection.size()); }
From source file:com.continuuity.loom.layout.change.AddServiceChangeIterator.java
public AddServiceChangeIterator(ClusterLayout clusterLayout, String service) { this.service = service; // cluster services are needed in order to prune the constraints to only use ones that pertain to services // on the cluster Set<String> expandedClusterServices = Sets.newHashSet(service); for (NodeLayout nodeLayout : clusterLayout.getLayout().elementSet()) { expandedClusterServices.addAll(nodeLayout.getServiceNames()); }//from ww w. ja v a 2 s .co m // first figure out which node layouts can add this service this.expandableNodeLayouts = Lists.newArrayListWithCapacity(clusterLayout.getLayout().elementSet().size()); Multiset<NodeLayout> expandedCounts = HashMultiset.create(); for (NodeLayout originalNodeLayout : clusterLayout.getLayout().elementSet()) { NodeLayout expandedNodeLayout = NodeLayout.addServiceToNodeLayout(originalNodeLayout, service); if (expandedNodeLayout.satisfiesConstraints(clusterLayout.getConstraints(), expandedClusterServices)) { expandableNodeLayouts.add(originalNodeLayout); expandedCounts.add(originalNodeLayout, clusterLayout.getLayout().count(originalNodeLayout)); } } // sort expandable node layouts by preference order Collections.sort(this.expandableNodeLayouts, new NodeLayoutComparator(null, null)); // need to pass this to the slotted iterator so we don't try and add the service to a node layout more times // than there are nodes for the node layout. this.nodeLayoutMaxCounts = new int[expandableNodeLayouts.size()]; for (int i = 0; i < nodeLayoutMaxCounts.length; i++) { nodeLayoutMaxCounts[i] = expandedCounts.count(expandableNodeLayouts.get(i)); } // figure out the max number of nodes we can add the service to. Start off by saying we can add it to all nodes. this.nodesToAddTo = expandedCounts.size(); // we always need to add the service to at least one node. this.minNodesToAddTo = 1; ServiceConstraint serviceConstraint = clusterLayout.getConstraints().getServiceConstraints().get(service); // if there is a max constraint on this service and its less than the number of nodes in the cluster, start // there instead. Similarly, if there is a min constraint on this service higher than 1, use that instead. if (serviceConstraint != null) { this.nodesToAddTo = Math.min(serviceConstraint.getMaxCount(), this.nodesToAddTo); this.minNodesToAddTo = Math.max(serviceConstraint.getMinCount(), this.minNodesToAddTo); } this.nodeLayoutCountIterator = (this.nodesToAddTo < 1) ? null : new SlottedCombinationIterator(expandableNodeLayouts.size(), nodesToAddTo, nodeLayoutMaxCounts); }
From source file:org.summer.dsl.xbase.typesystem.references.LightweightTypeReference.java
/** * Returns the list of all super types which includes the super class and the * implemented interfaces. The type parameters of the provided super types are resolved. * That means, the super types of <code>ArrayList<String></code> includes * <code>List<String></code> and <code>Collection<String></code> * rather than <code>Collection<E></code>. * // w w w . j av a 2s . c om * @return the list of all super types, can be empty. */ public List<LightweightTypeReference> getAllSuperTypes() { final List<LightweightTypeReference> result = Lists.newArrayList(); final Multiset<JvmType> distances = HashMultiset.create(7); final Multiset<JvmType> counterPerType = HashMultiset.create(7); collectSuperTypes(new SuperTypeAcceptor() { int counter = 0; public boolean accept(LightweightTypeReference superType, int distance) { JvmType type = superType.getType(); counterPerType.add(type, counter++); if (distances.contains(type)) { int currentCount = distances.count(type); if (currentCount < distance + 1) { distances.setCount(type, distance + 1); } else { return false; } } else { result.add(superType); distances.add(type, distance + 1); } return true; } }); Collections.sort(result, new Comparator<LightweightTypeReference>() { public int compare(@Nullable LightweightTypeReference o1, @Nullable LightweightTypeReference o2) { if (o1 == null || o2 == null) { throw new IllegalArgumentException(); } JvmType type1 = o1.getType(); JvmType type2 = o2.getType(); if (type1 == null) return 1; if (type2 == null) return -1; int distanceCompare = Ints.compare(distances.count(type1), distances.count(type2)); if (distanceCompare != 0) return distanceCompare; return Ints.compare(counterPerType.count(type1), counterPerType.count(type2)); } }); return result; }
From source file:edu.cmu.lti.oaqa.baseqa.concept.rerank.scorers.LuceneConceptScorer.java
private void search(String queryString, String conf) throws RuntimeException { if (queryString.trim().isEmpty()) return;/*from ww w . j a va 2 s. com*/ ScoreDoc[] results; try { Query query = parser.parse(queryString); results = searcher.search(query, hits).scoreDocs; } catch (ParseException | IOException e) { throw new RuntimeException(e); } Multiset<String> sourceCounts = HashMultiset.create(); for (int i = 0; i < results.length; i++) { try { Document doc = reader.document(results[i].doc); String source = sourceFieldName == null ? null : doc.get(sourceFieldName); String uri = uriPrefix.get(source) + (idFieldName == null ? null : doc.get(idFieldName)); String gconf = conf + "/global"; if (!uri2conf2rank.contains(uri, gconf) || uri2conf2rank.get(uri, gconf) > i) { uri2conf2rank.put(uri, gconf, i); } double score = results[i].score; if (!uri2conf2score.contains(uri, gconf) || uri2conf2score.get(uri, gconf) < score) { uri2conf2score.put(uri, gconf, score); } String sconf = conf + source; int sourceRank = sourceCounts.count(source); sourceCounts.add(source); if (!uri2conf2rank.contains(uri, sconf) || uri2conf2rank.get(uri, sconf) > sourceRank) { synchronizedPut(uri2conf2rank, uri, sconf, sourceRank); } if (!uri2conf2score.contains(uri, sconf) || uri2conf2score.get(uri, sconf) < score) { synchronizedPut(uri2conf2score, uri, sconf, score); } } catch (IOException e) { throw new RuntimeException(e); } } }
From source file:com.cloudera.knittingboar.records.RCV1RecordFactory.java
public static void ScanFile(String file, int debug_break_cnt) throws IOException { ConstantValueEncoder encoder_test = new ConstantValueEncoder("test"); BufferedReader reader = null; // Collection<String> words int line_count = 0; Multiset<String> class_count = ConcurrentHashMultiset.create(); Multiset<String> namespaces = ConcurrentHashMultiset.create(); try {/*w ww . j a v a2 s . c o m*/ // System.out.println( newsgroup ); reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); while (line != null && line.length() > 0) { // shard_writer.write(line + "\n"); // out += line; String[] parts = line.split(" "); // System.out.println( "Class: " + parts[0] ); class_count.add(parts[0]); namespaces.add(parts[1]); line = reader.readLine(); line_count++; Vector v = new RandomAccessSparseVector(FEATURES); for (int x = 2; x < parts.length; x++) { // encoder_test.addToVector(parts[x], v); // System.out.println( parts[x] ); String[] feature = parts[x].split(":"); int index = Integer.parseInt(feature[0]) % FEATURES; double val = Double.parseDouble(feature[1]); // System.out.println( feature[1] + " = " + val ); if (index < FEATURES) { v.set(index, val); } else { System.out.println("Could Hash: " + index + " to " + (index % FEATURES)); } } Utils.PrintVectorSectionNonZero(v, 10); System.out.println("###"); if (line_count > debug_break_cnt) { break; } } System.out.println("Total Rec Count: " + line_count); System.out.println("-------------------- "); System.out.println("Classes"); for (String word : class_count.elementSet()) { System.out.println("Class " + word + ": " + class_count.count(word) + " "); } System.out.println("-------------------- "); System.out.println("NameSpaces:"); for (String word : namespaces.elementSet()) { System.out.println("Namespace " + word + ": " + namespaces.count(word) + " "); } /* * TokenStream ts = analyzer.tokenStream("text", reader); * ts.addAttribute(CharTermAttribute.class); * * // for each word in the stream, minus non-word stuff, add word to * collection while (ts.incrementToken()) { String s = * ts.getAttribute(CharTermAttribute.class).toString(); * //System.out.print( " " + s ); //words.add(s); out += s + " "; } */ } finally { reader.close(); } // return out + "\n"; }
From source file:com.davidsoergel.trees.AbstractRootedPhylogeny.java
/** * {@inheritDoc}/*from ww w. j a va 2 s. c o m*/ */ public void setLeafWeights(Multiset<T> leafWeights) throws TreeException { for (PhylogenyNode<T> leaf : getLeaves()) { int value = leafWeights.count(leaf.getPayload()); leaf.setWeight(new Double(value)); } normalizeWeights(); }