Example usage for com.google.common.collect Multiset count

List of usage examples for com.google.common.collect Multiset count

Introduction

In this page you can find the example usage for com.google.common.collect Multiset count.

Prototype

int count(@Nullable Object element);

Source Link

Document

Returns the number of occurrences of an element in this multiset (the count of the element).

Usage

From source file:org.apache.niolex.common.guava.GuavaCollections.java

/**
 * @param args/*from  w  ww  .  ja  v  a2  s . co  m*/
 */
public static void main(String[] args) {
    Multiset<String> wordsMultiset = HashMultiset.create();
    wordsMultiset.add("abc");
    wordsMultiset.add("abc");
    wordsMultiset.add("abcd");
    System.out.println("count => " + wordsMultiset.count("abc"));
    System.out.println("count => " + wordsMultiset.count("abcd"));

    BiMap<String, String> biMap = HashBiMap.create();
    biMap.put("good", "morning");
    biMap.put("bad", "afternoon");
    System.out.println("good => " + biMap.get("good"));
    System.out.println("afternoon => " + biMap.inverse().get("afternoon"));

    RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
    rangeMap.put(Range.closed(1, 11), "Nice");
    rangeMap.put(Range.openClosed(11, 15), "Girl");
    System.out.println("11 => " + rangeMap.get(11));
    System.out.println("12 => " + rangeMap.get(12));
    System.out.println("15 => " + rangeMap.get(15));
    System.out.println("16 => " + rangeMap.get(16));

    List<Integer> countUp = Ints.asList(1, 2, 3, 4, 5);
    List<Integer> countDown = Lists.reverse(countUp); // {5, 4, 3, 2, 1}
    System.out.println("countUp => " + countUp);
    System.out.println("countDown => " + countDown);
}

From source file:com.github.christofluyten.experiment.MeasureGendreau.java

/**
 * @param args/*from w  w  w.j av a  2 s.c om*/
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
    final List<Gendreau06Scenario> scns = new ArrayList<>(
            Gendreau06Parser.parser().addDirectory("files/gendreau2006/requests").parse());

    Collections.sort(scns, new Comparator<Gendreau06Scenario>() {
        @Override
        public int compare(Gendreau06Scenario o1, Gendreau06Scenario o2) {
            final int compare = o1.getProblemClass().getId().compareTo(o2.getProblemClass().getId());
            if (compare == 0) {
                return o1.getProblemInstanceId().compareTo(o2.getProblemInstanceId());
            }
            return compare;
        }
    });

    final List<Map<Property, Object>> propsList = new ArrayList<>();
    for (final Gendreau06Scenario scen : scns) {
        final StatisticalSummary urgency = Metrics.measureUrgency(scen);
        final Multiset<Class<?>> counts = Metrics.getEventTypeCounts(scen);

        final long scenarioLength = scen.getProblemClass().duration * 60000;
        final double dyn = Metrics.measureDynamism(scen, scenarioLength);

        final ImmutableMap<Property, Object> prop = ImmutableMap.<Property, Object>builder()
                .put(Property.PROBLEM_CLASS, scen.getProblemClass().getId())
                .put(Property.INSTANCE_ID, scen.getProblemInstanceId()).put(Property.DYNAMISM, dyn)
                .put(Property.URGENCY_MEAN, urgency.getMean() / 60000d)
                .put(Property.URGENCY_SD, urgency.getStandardDeviation() / 60000d)
                .put(Property.NUM_ORDERS, counts.count(AddParcelEvent.class))
                .put(Property.NUM_VEHICLES, counts.count(AddVehicleEvent.class))
                .putAll(MAP.get(scen.getProblemInstanceId() + scen.getProblemClass().getId())).build();

        propsList.add(prop);
    }

    final File targetFile = new File(PROPS_FILE);
    write(propsList, targetFile, asList(Property.values()));
    System.out.println("Results written to " + targetFile.getAbsolutePath());
}

From source file:edu.byu.nlp.data.app.AnnotationStream2Csv.java

public static void main(String[] args) throws IOException {
    // parse CLI arguments
    new ArgumentParser(AnnotationStream2Csv.class).parseArgs(args);
    Preconditions.checkNotNull(jsonStream, "You must provide a valid --json-stream!");

    Dataset data = readData(jsonStream);

    // optionally aggregate by instance
    String header = "annotator,start,end,annotation,label,source,num_correct_annotations,num_annotations,cum_num_annotations,num_annotators,cum_num_annotators\n";

    // iterate over instances and (optionally) annotations
    final StringBuilder bld = new StringBuilder();

    switch (row) {
    case ANNOTATION:

        // sort all annotations by end time
        Map<FlatInstance<SparseFeatureVector, Integer>, DatasetInstance> ann2InstMap = Maps
                .newIdentityHashMap();//ww w .  ja  va  2s. co  m
        List<FlatInstance<SparseFeatureVector, Integer>> annotationList = Lists.newArrayList();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                ann2InstMap.put(ann, inst); // record instance of each annotations
                annotationList.add(ann);
            }
        }
        Collections.sort(annotationList, new Comparator<FlatInstance<SparseFeatureVector, Integer>>() {
            @Override
            public int compare(FlatInstance<SparseFeatureVector, Integer> o1,
                    FlatInstance<SparseFeatureVector, Integer> o2) {
                // no null checking since we want to fail if annotation time is not set. 
                return Long.compare(o1.getEndTimestamp(), o2.getEndTimestamp());
            }
        });

        Set<Integer> annotators = Sets.newHashSet();
        for (Enumeration<FlatInstance<SparseFeatureVector, Integer>> item : Iterables2
                .enumerate(annotationList)) {
            FlatInstance<SparseFeatureVector, Integer> ann = item.getElement();
            DatasetInstance inst = ann2InstMap.get(ann);
            annotators.add(ann.getAnnotator());

            bld.append(ann.getAnnotator() + ",");
            bld.append(ann.getStartTimestamp() + ",");
            bld.append(ann.getEndTimestamp() + ",");
            bld.append(ann.getAnnotation() + ",");
            bld.append(inst.getLabel() + ",");
            bld.append(
                    data.getInfo().getIndexers().getInstanceIdIndexer().get(inst.getInfo().getSource()) + ",");
            bld.append((!inst.hasLabel() ? "NA" : ann.getAnnotation() == inst.getLabel() ? 1 : 0) + ","); // num correct
            bld.append(1 + ","); // num annotations
            bld.append((item.getIndex() + 1) + ","); // cumulative num annotations
            bld.append(1 + ","); // num annotators
            bld.append(annotators.size() + ""); // cumulative num annotators
            bld.append("\n");
        }
        break;
    case INSTANCE:
        int cumNumAnnotations = 0;
        for (DatasetInstance inst : data) {
            cumNumAnnotations += inst.getInfo().getNumAnnotations();

            int numCorrectAnnotations = 0;
            // sum over all the annotators who put the correct answer (if available)
            if (inst.hasLabel()) {
                Integer correctLabel = inst.getLabel();
                for (int j = 0; j < data.getInfo().getNumAnnotators(); j++) {
                    numCorrectAnnotations += inst.getAnnotations().getLabelAnnotations()
                            .getRow(j)[correctLabel];
                }
            }

            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(inst.getLabel() + ",");
            bld.append(inst.getInfo().getSource() + ",");
            bld.append(numCorrectAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotations() + ",");
            bld.append(cumNumAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotators() + ",");
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }
        break;

    case ANNOTATOR:
        Multiset<Integer> perAnnotatorAnnotationCounts = HashMultiset.create();
        Multiset<Integer> perAnnotatorCorrectAnnotationCounts = HashMultiset.create();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                int annotatorId = ann.getAnnotator();

                perAnnotatorAnnotationCounts.add(annotatorId);

                if (inst.getLabel() == ann.getAnnotation()) {
                    perAnnotatorCorrectAnnotationCounts.add(annotatorId);
                }

            }
        }

        for (String annotatorId : data.getInfo().getAnnotatorIdIndexer()) {

            bld.append(annotatorId + ",");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(perAnnotatorCorrectAnnotationCounts.count(annotatorId) + ",");
            bld.append(perAnnotatorAnnotationCounts.count(annotatorId) + ",");
            bld.append("NA,");
            bld.append("1,"); // num annotators
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }

        break;

    default:
        Preconditions.checkArgument(false, "unknown row type: " + row);
        break;
    }

    // output to console
    if (out == null) {
        System.out.println(header);
        System.out.println(bld.toString());
    } else {
        File outfile = new File(out);
        Files.write(header, outfile, Charsets.UTF_8);
        Files.append(bld, outfile, Charsets.UTF_8);
    }

}

From source file:org.apache.ctakes.relationextractor.eval.PrintRelationStatistics.java

public static void main(String[] args) throws Exception {
    Options options = new Options();
    CmdLineParser parser = new CmdLineParser(options);
    parser.parseArgument(args);//w  w  w.j av  a2s  .  c  om

    CollectionReader reader = CollectionReaderFactory.createReader(XReader.class,
            FilesCollectionReader.PARAM_ROOT_FILE, options.trainDirectory.getPath());

    Multiset<Integer> mentionsBetweenCounts = HashMultiset.create();
    JCas jCas = JCasFactory.createJCasFromPath("../ctakes-type-system/desc/common_type_system.xml");
    while (reader.hasNext()) {
        reader.getNext(jCas.getCas());
        JCas goldView = jCas.getView(GOLD_VIEW_NAME);
        for (BinaryTextRelation relation : JCasUtil.select(goldView, BinaryTextRelation.class)) {
            Annotation arg1 = relation.getArg1().getArgument();
            Annotation arg2 = relation.getArg2().getArgument();
            int mentionsBetween;
            if (arg1.getBegin() < arg2.getBegin()) {
                mentionsBetween = JCasUtil
                        .selectCovered(goldView, EntityMention.class, arg1.getEnd(), arg2.getBegin()).size();
            } else {
                mentionsBetween = -JCasUtil
                        .selectCovered(goldView, EntityMention.class, arg2.getEnd(), arg1.getBegin()).size();
            }
            mentionsBetweenCounts.add(mentionsBetween);
        }
    }

    List<Integer> mentionsBetweenKeys = new ArrayList<Integer>(mentionsBetweenCounts.elementSet());
    Collections.sort(mentionsBetweenKeys);
    for (Integer mentionsBetween : mentionsBetweenKeys) {
        System.err.printf("%d x%d\n", mentionsBetween, mentionsBetweenCounts.count(mentionsBetween));
    }
}

From source file:org.apache.mahout.classifier.sgd.TrainNewsGroups.java

public static void main(String[] args) throws IOException {
    File base = new File(args[0]);

    Multiset<String> overallCounts = HashMultiset.create();

    int leakType = 0;
    if (args.length > 1) {
        leakType = Integer.parseInt(args[1]);
    }//from   w w w.jav a  2  s .  c  om

    Dictionary newsGroups = new Dictionary();

    NewsgroupHelper helper = new NewsgroupHelper();
    helper.getEncoder().setProbes(2);
    AdaptiveLogisticRegression learningAlgorithm = new AdaptiveLogisticRegression(20, NewsgroupHelper.FEATURES,
            new L1());
    learningAlgorithm.setInterval(800);
    learningAlgorithm.setAveragingWindow(500);

    List<File> files = Lists.newArrayList();
    for (File newsgroup : base.listFiles()) {
        if (newsgroup.isDirectory()) {
            newsGroups.intern(newsgroup.getName());
            files.addAll(Arrays.asList(newsgroup.listFiles()));
        }
    }
    Collections.shuffle(files);
    System.out.println(files.size() + " training files");
    SGDInfo info = new SGDInfo();

    int k = 0;

    for (File file : files) {
        String ng = file.getParentFile().getName();
        int actual = newsGroups.intern(ng);

        Vector v = helper.encodeFeatureVector(file, actual, leakType, overallCounts);
        learningAlgorithm.train(actual, v);

        k++;
        State<AdaptiveLogisticRegression.Wrapper, CrossFoldLearner> best = learningAlgorithm.getBest();

        SGDHelper.analyzeState(info, leakType, k, best);
    }
    learningAlgorithm.close();
    SGDHelper.dissect(leakType, newsGroups, learningAlgorithm, files, overallCounts);
    System.out.println("exiting main");

    ModelSerializer.writeBinary("/tmp/news-group.model",
            learningAlgorithm.getBest().getPayload().getLearner().getModels().get(0));

    List<Integer> counts = Lists.newArrayList();
    System.out.println("Word counts");
    for (String count : overallCounts.elementSet()) {
        counts.add(overallCounts.count(count));
    }
    Collections.sort(counts, Ordering.natural().reverse());
    k = 0;
    for (Integer count : counts) {
        System.out.println(k + "\t" + count);
        k++;
        if (k > 1000) {
            break;
        }
    }
}

From source file:com.memonews.mahout.sentiment.SentimentModelTrainer.java

public static void main(final String[] args) throws IOException {
    final File base = new File(args[0]);
    final String modelPath = args.length > 1 ? args[1] : "target/model";

    final Multiset<String> overallCounts = HashMultiset.create();

    final Dictionary newsGroups = new Dictionary();

    final SentimentModelHelper helper = new SentimentModelHelper();
    helper.getEncoder().setProbes(2);//from   w  ww .  j  a  v a  2  s.c o  m
    final AdaptiveLogisticRegression learningAlgorithm = new AdaptiveLogisticRegression(2,
            SentimentModelHelper.FEATURES, new L1());
    learningAlgorithm.setInterval(800);
    learningAlgorithm.setAveragingWindow(500);

    final List<File> files = Lists.newArrayList();
    for (final File newsgroup : base.listFiles()) {
        if (newsgroup.isDirectory()) {
            newsGroups.intern(newsgroup.getName());
            files.addAll(Arrays.asList(newsgroup.listFiles()));
        }
    }
    Collections.shuffle(files);
    System.out.printf("%d training files\n", files.size());
    final SGDInfo info = new SGDInfo();

    int k = 0;

    for (final File file : files) {
        final String ng = file.getParentFile().getName();
        final int actual = newsGroups.intern(ng);

        final Vector v = helper.encodeFeatureVector(file, overallCounts);
        learningAlgorithm.train(actual, v);

        k++;
        final State<AdaptiveLogisticRegression.Wrapper, CrossFoldLearner> best = learningAlgorithm.getBest();

        SGDHelper.analyzeState(info, 0, k, best);
    }
    learningAlgorithm.close();
    SGDHelper.dissect(0, newsGroups, learningAlgorithm, files, overallCounts);
    System.out.println("exiting main");

    ModelSerializer.writeBinary(modelPath,
            learningAlgorithm.getBest().getPayload().getLearner().getModels().get(0));

    final List<Integer> counts = Lists.newArrayList();
    System.out.printf("Word counts\n");
    for (final String count : overallCounts.elementSet()) {
        counts.add(overallCounts.count(count));
    }
    Collections.sort(counts, Ordering.natural().reverse());
    k = 0;
    for (final Integer count : counts) {
        System.out.printf("%d\t%d\n", k, count);
        k++;
        if (k > 1000) {
            break;
        }
    }
}

From source file:com.music.tools.MidiAnalyzer.java

public static void main(String[] args) {
    Score score = new Score();
    Read.midi(score, "C:\\workspace\\music\\analysis\\midi\\jarre\\EQUINOX3.MID");
    for (Part part : score.getPartArray()) {
        System.out.println(part.getTitle() + " : " + part.getInstrument());
    }/*from  w  w  w.j ava2  s .c o m*/
    Part part = score.getPart(1);

    System.out.println(part.getInstrument());
    part.setTempo(160);
    int previousPitch = 0;
    int prePreviousPitch = 0;
    System.out.println(score.getTimeSignature());
    Multiset<Integer> uniqueIntervals = HashMultiset.create();
    int directionChanges = 0;
    int directionRetentions = 0;

    LinkedList<Double> noteLengths = new LinkedList<>();
    for (Note note : part.getPhrase(0).getNoteArray()) {
        System.out.println(note.getPitch());
        if (!note.isRest()) {
            if (prePreviousPitch != 0) {
                int previousDiff = previousPitch - prePreviousPitch;
                int diff = note.getPitch() - previousPitch;
                if (Math.signum(previousDiff) != Math.signum(diff) && diff != 0 && previousDiff != 0) {
                    directionChanges++;
                    System.out.println(prePreviousPitch + ":" + previousPitch + ":" + note.getPitch());
                } else if (diff != 0 && previousDiff != 0) {
                    directionRetentions++;
                }
            }
            if (note.getPitch() - previousPitch != 0) {
                prePreviousPitch = previousPitch;
            }

            uniqueIntervals.add(previousPitch - note.getPitch());
            previousPitch = note.getPitch();
        }
        noteLengths.add(note.getRhythmValue());
    }

    double normalizedBeatSize = 1d * score.getNumerator() * 4 / score.getDenominator();
    System.out.println("Beat size: " + normalizedBeatSize);
    double currentBeatSize = 0;
    int beats = 0;
    int beatsWithPerfectHalves = 0;
    // reverse, to avoid off-beats
    for (Iterator<Double> it = noteLengths.descendingIterator(); it.hasNext();) {
        currentBeatSize += it.next();
        ;
        if (currentBeatSize >= normalizedBeatSize) {
            currentBeatSize = 0;
            beats++;
        }
        if (currentBeatSize == normalizedBeatSize / 2) {
            beatsWithPerfectHalves++;
        }
    }

    System.out.println("Beats:beats with perfect halves -- " + beats + ":" + beatsWithPerfectHalves);

    Hashtable<String, Object> table = PhraseAnalysis.getAllStatistics(score.getPart(1).getPhrase(0), 1, 0,
            Scales.MAJOR_SCALE);
    for (Entry<String, Object> entry : table.entrySet()) {
        System.out.println(entry.getKey() + "=" + entry.getValue());
    }
    for (Integer interval : uniqueIntervals.elementSet()) {
        System.out.println(interval + " : " + uniqueIntervals.count(interval));
    }

    System.out.println("---");

    System.out.println(directionChanges + " : " + directionRetentions);
    Play.midi(part);
}

From source file:edu.mit.streamjit.test.StreamFuzzer.java

public static void main(String[] args) throws InterruptedException, IOException {
    StreamCompiler debugSC = new InterpreterStreamCompiler();
    StreamCompiler compilerSC = new Compiler2StreamCompiler();
    Set<FuzzElement> completedCases = new HashSet<>();
    int generated;
    int duplicatesSkipped = 0;
    Multiset<Class<?>> ignored = HashMultiset.create(ignoredExceptions.size());
    int failures = 0, successes = 0;
    next_case: for (generated = 0; true; ++generated) {
        FuzzElement fuzz = StreamFuzzer.generate();
        if (!completedCases.add(fuzz)) {
            ++duplicatesSkipped;/*from w w w . j  ava2 s .  c  o  m*/
            continue;
        }

        try {
            fuzz.instantiate().visit(new CheckVisitor());
        } catch (IllegalStreamGraphException ex) {
            System.out.println("Fuzzer generated bad test case");
            ex.printStackTrace(System.out);
            fuzz.instantiate().visit(new PrintStreamVisitor(System.out));
        }

        List<Integer> debugOutput = run(fuzz, debugSC);
        List<Integer> compilerOutput = null;
        try {
            compilerOutput = run(fuzz, compilerSC);
        } catch (Throwable ex) {
            for (Throwable t : Throwables.getCausalChain(ex))
                if (ignoredExceptions.contains(t.getClass())) {
                    ignored.add(t.getClass());
                    continue next_case;
                }
            System.out.println("Compiler failed");
            ex.printStackTrace(System.out);
            //fall into the if below
        }
        if (!debugOutput.equals(compilerOutput)) {
            ++failures;
            fuzz.instantiate().visit(new PrintStreamVisitor(System.out));
            System.out.println(fuzz.toJava());
            //TODO: show only elements where they differ
            System.out.println("Debug output: " + debugOutput);
            System.out.println("Compiler output: " + compilerOutput);
            writeRegressionTest(fuzz);
            break;
        } else
            ++successes;
        System.out.println(fuzz.hashCode() + " matched");
    }

    System.out.format("Generated %d cases%n", generated);
    System.out.format("  skipped %d duplicates (%f%%)%n", duplicatesSkipped,
            ((double) duplicatesSkipped) * 100 / generated);
    for (Class<?> c : ignoredExceptions) {
        int count = ignored.count(c);
        if (count > 0)
            System.out.format("  ignored %d due to %s (%f%%)%n", count, c, ((double) count) * 100 / generated);
    }
    System.out.format("Ran %d cases (%f%% run rate)%n", successes + failures,
            ((double) successes + failures) * 100 / generated);
    System.out.format("  %d succeeded (%f%%)%n", successes,
            ((double) successes) * 100 / (successes + failures));
    System.out.format("  %d failed (%f%%)%n", failures, ((double) failures) * 100 / (successes + failures));
}

From source file:com.threerings.util.MultisetUtil.java

/**
 * Return a Function that will transform objects to their corresponding count in the
 * specified Multiset./*from ww  w  .  j  a v  a  2  s  . c  o m*/
 */
public static Function<Object, Integer> countFunction(final Multiset<?> set) {
    return new Function<Object, Integer>() {
        public Integer apply(Object o) {
            return set.count(o);
        }
    };
}

From source file:org.apache.mahout.math.stats.LogLikelihood.java

private static <T> void compareAndAdd(Multiset<T> a, Multiset<T> b, int maxReturn, double threshold, int totalA,
        int totalB, Queue<ScoredItem<T>> best, T t) {
    int kA = a.count(t);
    int kB = b.count(t);
    double score = rootLogLikelihoodRatio(kA, totalA - kA, kB, totalB - kB);
    if (score >= threshold) {
        ScoredItem<T> x = new ScoredItem<T>(t, score);
        best.add(x);/*from w w  w . j a  v a  2s.c o  m*/
        while (best.size() > maxReturn) {
            best.poll();
        }
    }
}