Example usage for com.google.common.collect Multiset add

List of usage examples for com.google.common.collect Multiset add

Introduction

In this page you can find the example usage for com.google.common.collect Multiset add.

Prototype

@Override
boolean add(E element);

Source Link

Document

Adds a single occurrence of the specified element to this multiset.

Usage

From source file:com.facebook.buck.rules.SymlinkTree.java

/**
 * Because of cross-cell, multiple {@link SourcePath}s can resolve to the same relative path,
 * despite having distinct absolute paths. This presents a challenge for rules that require
 * gathering all of the inputs in one directory.
 *
 * @param sourcePaths set of SourcePaths to process
 * @param resolver resolver/*from   w  ww .j  a v a 2s .c o m*/
 * @return a map that assigns a unique relative path to each of the SourcePaths.
 */
public static ImmutableBiMap<SourcePath, Path> resolveDuplicateRelativePaths(
        ImmutableSortedSet<SourcePath> sourcePaths, SourcePathResolver resolver) {
    // This serves a dual purpose - it keeps track of whether a particular relative path had been
    // assigned to a SourcePath and how many times a particular relative path had been seen.
    Multiset<Path> assignedPaths = HashMultiset.create();
    ImmutableBiMap.Builder<SourcePath, Path> builder = ImmutableBiMap.builder();
    List<SourcePath> conflicts = new ArrayList<>();

    for (SourcePath sourcePath : sourcePaths) {
        Path relativePath = resolver.getRelativePath(sourcePath);
        if (!assignedPaths.contains(relativePath)) {
            builder.put(sourcePath, relativePath);
            assignedPaths.add(relativePath);
        } else {
            conflicts.add(sourcePath);
        }
    }

    for (SourcePath conflict : conflicts) {
        Path relativePath = resolver.getRelativePath(conflict);
        Path parent = MorePaths.getParentOrEmpty(relativePath);
        String extension = MorePaths.getFileExtension(relativePath);
        String name = MorePaths.getNameWithoutExtension(relativePath);

        while (true) {
            StringBuilder candidateName = new StringBuilder(name);
            candidateName.append('-');
            int suffix = assignedPaths.count(relativePath);
            candidateName.append(suffix);
            if (!extension.isEmpty()) {
                candidateName.append('.');
                candidateName.append(extension);
            }
            Path candidate = parent.resolve(candidateName.toString());

            if (!assignedPaths.contains(candidate)) {
                assignedPaths.add(candidate);
                builder.put(conflict, candidate);
                break;
            } else {
                assignedPaths.add(relativePath);
            }
        }
    }

    return builder.build();
}

From source file:com.koloboke.compile.KolobokeMapBackedMultiset.java

public static <E> Multiset<E> withElements(E... elements) {
    Multiset<E> multiset = withExpectedDistinctElements(elements.length);
    for (E e : elements) {
        multiset.add(e);
    }/*  ww w  . jav  a  2 s  .  c o m*/
    return multiset;
}

From source file:com.intellij.find.impl.FindInProjectTask.java

private static void logStats(Collection<PsiFile> otherFiles, long start) {
    long time = System.currentTimeMillis() - start;

    final Multiset<String> stats = HashMultiset.create();
    for (PsiFile file : otherFiles) {
        //noinspection StringToUpperCaseOrToLowerCaseWithoutLocale
        stats.add(StringUtil.notNullize(file.getViewProvider().getVirtualFile().getExtension()).toLowerCase());
    }/*  w  w w  . java2s  .  c  o m*/

    List<String> extensions = ContainerUtil.newArrayList(stats.elementSet());
    Collections.sort(extensions, new Comparator<String>() {
        @Override
        public int compare(String o1, String o2) {
            return stats.count(o2) - stats.count(o1);
        }
    });

    String message = "Search in " + otherFiles.size() + " files with unknown types took " + time + "ms.\n"
            + "Mapping their extensions to an existing file type (e.g. Plain Text) might speed up the search.\n"
            + "Most frequent non-indexed file extensions: ";
    for (int i = 0; i < Math.min(10, extensions.size()); i++) {
        String extension = extensions.get(i);
        message += extension + "(" + stats.count(extension) + ") ";
    }
    LOG.info(message);
}

From source file:com.hortonworks.streamline.common.Schema.java

private static Multiset<Field> parseArray(List<Object> array) throws ParserException {
    Multiset<Field> members = LinkedHashMultiset.create();
    for (Object member : array) {
        members.add(parseField(null, member));
    }//from   w w  w. jav a  2s . c om
    return members;
}

From source file:com.cloudera.knittingboar.records.RCV1RecordFactory.java

public static void ScanFile(String file, int debug_break_cnt) throws IOException {

    ConstantValueEncoder encoder_test = new ConstantValueEncoder("test");

    BufferedReader reader = null;
    // Collection<String> words
    int line_count = 0;

    Multiset<String> class_count = ConcurrentHashMultiset.create();
    Multiset<String> namespaces = ConcurrentHashMultiset.create();

    try {/*from   w ww  .j ava  2s .  co m*/
        // System.out.println( newsgroup );
        reader = new BufferedReader(new FileReader(file));

        String line = reader.readLine();

        while (line != null && line.length() > 0) {

            // shard_writer.write(line + "\n");
            // out += line;

            String[] parts = line.split(" ");

            // System.out.println( "Class: " + parts[0] );

            class_count.add(parts[0]);
            namespaces.add(parts[1]);

            line = reader.readLine();
            line_count++;

            Vector v = new RandomAccessSparseVector(FEATURES);

            for (int x = 2; x < parts.length; x++) {
                // encoder_test.addToVector(parts[x], v);
                // System.out.println( parts[x] );
                String[] feature = parts[x].split(":");
                int index = Integer.parseInt(feature[0]) % FEATURES;
                double val = Double.parseDouble(feature[1]);

                // System.out.println( feature[1] + " = " + val );

                if (index < FEATURES) {
                    v.set(index, val);
                } else {

                    System.out.println("Could Hash: " + index + " to " + (index % FEATURES));

                }

            }

            Utils.PrintVectorSectionNonZero(v, 10);
            System.out.println("###");

            if (line_count > debug_break_cnt) {
                break;
            }

        }

        System.out.println("Total Rec Count: " + line_count);

        System.out.println("-------------------- ");

        System.out.println("Classes");
        for (String word : class_count.elementSet()) {
            System.out.println("Class " + word + ": " + class_count.count(word) + " ");
        }

        System.out.println("-------------------- ");

        System.out.println("NameSpaces:");
        for (String word : namespaces.elementSet()) {
            System.out.println("Namespace " + word + ": " + namespaces.count(word) + " ");
        }

        /*
         * TokenStream ts = analyzer.tokenStream("text", reader);
         * ts.addAttribute(CharTermAttribute.class);
         * 
         * // for each word in the stream, minus non-word stuff, add word to
         * collection while (ts.incrementToken()) { String s =
         * ts.getAttribute(CharTermAttribute.class).toString();
         * //System.out.print( " " + s ); //words.add(s); out += s + " "; }
         */

    } finally {
        reader.close();
    }

    // return out + "\n";

}

From source file:org.onebusaway.nyc.vehicle_tracking.impl.particlefilter.ParticleFilter.java

/**
 * Low variance sampler. Follows Thrun's example in Probabilistic Robots.
 * @throws ParticleFilterException //from  w w  w  .  j ava  2s  .c om
 */
public static Multiset<Particle> lowVarianceSampler(Multiset<Particle> particles, double M)
        throws BadProbabilityParticleFilterException {
    Preconditions.checkArgument(particles.size() > 0);
    Preconditions.checkArgument(M > 0);

    final Multiset<Particle> resampled = HashMultiset.create((int) M);
    final double r = ParticleFactoryImpl.getLocalRng().nextDouble() / M;
    final Iterator<Particle> pIter = particles.iterator();
    Particle p = pIter.next();
    double c = p.getLogNormedWeight() - FastMath.log(particles.count(p));
    for (int m = 0; m < M; ++m) {
        final double U = FastMath.log(r + m / M);
        while (U > c && pIter.hasNext()) {
            p = pIter.next();
            c = LogMath.add(p.getLogNormedWeight() - FastMath.log(particles.count(p)), c);
        }
        resampled.add(p);
    }

    if (resampled.size() != M)
        throw new BadProbabilityParticleFilterException("low variance sampler did not return a valid sample");

    return resampled;
}

From source file:bio.gcat.operation.analysis.AminoAcids.java

@Override
public Result analyse(Collection<Tuple> tuples, Object... values) {
    Multiset<Compound> compounds = EnumMultiset.create(Compound.class);
    for (Tuple tuple : tuples)
        compounds.add(Compound.isStop(tuple) ? Compound.STOP
                : Optional.ofNullable(tuple.getCompound()).orElse(Compound.UNKNOWN));

    StringBuilder builder = new StringBuilder();
    for (Entry<Compound> compound : compounds.entrySet())
        builder.append(DELIMITER).append(compound.getCount()).append(TIMES).append(compound.getElement());

    return new SimpleResult(this, builder.substring(DELIMITER.length()).toString());
}

From source file:de.isabeldrostfromm.sof.Trainer.java

@Override
public OnlineLogisticRegression train(ExampleProvider provider) {
    OnlineLogisticRegression logReg = new OnlineLogisticRegression(ModelTargets.STATEVALUES.length,
            Vectoriser.getCardinality(), new L1());

    Multiset<String> set = HashMultiset.create();
    for (Example instance : provider) {
        set.add(instance.getState());
        logReg.train(ModelTargets.STATES.get(instance.getState()), instance.getVector());
    }/*w ww . j  a  v  a 2  s  . co  m*/

    return logReg;
}

From source file:org.lightjason.agentspeak.action.buildin.collection.list.CSymmetricDifference.java

@Override
public final IFuzzyValue<Boolean> execute(final IContext p_context, final boolean p_parallel,
        final List<ITerm> p_argument, final List<ITerm> p_return, final List<ITerm> p_annotation) {
    // create a multiset and counts the occurence of element -> on an odd number the element will be returned
    final Multiset<?> l_count = ConcurrentHashMultiset.create();
    CCommon.flatcollection(p_argument).parallel().forEach(i -> l_count.add(i.raw()));
    final List<?> l_result = l_count.entrySet().parallelStream().filter(i -> i.getCount() % 2 == 1)
            .collect(Collectors.toList());

    p_return.add(CRawTerm.from(p_parallel ? Collections.synchronizedList(l_result) : l_result));

    return CFuzzyValue.from(true);
}

From source file:it.units.malelab.ege.util.DUMapper.java

private static double[][][] getNeatData3(String baseDir, String fileNamePattern, int generations)
        throws IOException {
    List<List<Map<Integer, Multimap<Integer, Integer>>>> data = new ArrayList<>();
    Map<Integer, String> nodeTypesMap = new HashMap<>();
    for (int g = 0; g < generations; g++) {
        List<Map<Integer, Multimap<Integer, Integer>>> currentPopulation = new ArrayList<>();
        BufferedReader reader = Files.newBufferedReader(
                FileSystems.getDefault().getPath(baseDir, String.format(fileNamePattern, g + 1)));
        String line;// ww w .  j a v  a 2s  .c  om
        boolean isInPopulation = false;
        Map<Integer, Multimap<Integer, Integer>> currentIndividual = null;
        while ((line = reader.readLine()) != null) {
            if (line.equals("[NEAT-POPULATION:SPECIES]")) {
                isInPopulation = true;
                continue;
            }
            if (!isInPopulation) {
                continue;
            }
            if (line.startsWith("\"g\"")) {
                if (currentIndividual != null) {
                    //save current individual
                    currentPopulation.add(currentIndividual);
                }
                currentIndividual = new HashMap<>();
            }
            if (line.startsWith("\"n\"")) {
                String[] pieces = line.split(",");
                nodeTypesMap.put(Integer.parseInt(pieces[4]), pieces[3].replaceAll("\"", ""));
                currentIndividual.put(Integer.parseInt(pieces[4]), (Multimap) HashMultimap.create());
            } else if (line.startsWith("\"l\"")) {
                String[] pieces = line.split(",");
                int from = Integer.parseInt(pieces[3]);
                int to = Integer.parseInt(pieces[4]);
                if (currentIndividual.get(from) == null) {
                    currentIndividual.put(from, (Multimap) HashMultimap.create());
                }
                if (currentIndividual.get(to) == null) {
                    currentIndividual.put(to, (Multimap) HashMultimap.create());
                }
                currentIndividual.get(from).put(1, to);
                currentIndividual.get(to).put(-1, from);
            }
        }
        reader.close();
        data.add(currentPopulation);
    }
    //build node innovation numbers
    String[] nodeTypes = new String[] { "i", "b", "h", "o" };
    List<Integer> nodeINs = new ArrayList<>();
    for (String nodeType : nodeTypes) {
        List<Integer> typeNodeINs = new ArrayList<>();
        for (Integer in : nodeTypesMap.keySet()) {
            if (nodeTypesMap.get(in).equals(nodeType)) {
                typeNodeINs.add(in);
            }
        }
        Collections.sort(typeNodeINs);
        nodeINs.addAll(typeNodeINs);
    }
    //populate arrays
    double[][] usages = new double[generations][];
    double[][] diversities = new double[generations][];
    for (int g = 0; g < generations; g++) {
        usages[g] = new double[nodeINs.size()];
        diversities[g] = new double[nodeINs.size()];
        List<Map<Integer, Multimap<Integer, Integer>>> currentPopulation = data.get(g);
        //populate usages, diversities
        int i = 0;
        for (int nodeIN : nodeINs) {
            double[] localUsages = new double[currentPopulation.size()];
            Multiset<Set<Integer>> froms = HashMultiset.create();
            Multiset<Set<Integer>> tos = HashMultiset.create();
            int c = 0;
            for (Map<Integer, Multimap<Integer, Integer>> currentIndividual : currentPopulation) {
                if (nodeTypesMap.get(nodeIN).equals("i") || nodeTypesMap.get(nodeIN).equals("b")) {
                    if (currentIndividual.containsKey(nodeIN)) {
                        localUsages[c] = currentIndividual.get(nodeIN).get(1).isEmpty() ? 0 : 1;
                        tos.add(new HashSet<>(currentIndividual.get(nodeIN).get(1)));
                    } else {
                        tos.add(Collections.EMPTY_SET);
                    }
                } else if (nodeTypesMap.get(nodeIN).equals("h")) {
                    if (currentIndividual.containsKey(nodeIN)) {
                        localUsages[c] = (currentIndividual.get(nodeIN).get(-1).isEmpty() ? 0 : 0.5)
                                + (currentIndividual.get(nodeIN).get(1).isEmpty() ? 0 : 0.5);
                        tos.add(new HashSet<>(currentIndividual.get(nodeIN).get(1)));
                        froms.add(new HashSet<>(currentIndividual.get(nodeIN).get(-1)));
                    } else {
                        tos.add(Collections.EMPTY_SET);
                        froms.add(Collections.EMPTY_SET);
                    }
                } else if (nodeTypesMap.get(nodeIN).equals("o")) {
                    if (currentIndividual.containsKey(nodeIN)) {
                        localUsages[c] = currentIndividual.get(nodeIN).get(-1).isEmpty() ? 0 : 1;
                        froms.add(new HashSet<>(currentIndividual.get(nodeIN).get(-1)));
                    } else {
                        froms.add(Collections.EMPTY_SET);
                    }
                }
                c = c + 1;
            }
            usages[g][i] = StatUtils.mean(localUsages);
            if (nodeTypesMap.get(nodeIN).equals("i") || nodeTypesMap.get(nodeIN).equals("b")) {
                diversities[g][i] = Utils.multisetDiversity(tos, tos.elementSet());
            } else if (nodeTypesMap.get(nodeIN).equals("h")) {
                diversities[g][i] = Utils.multisetDiversity(tos, tos.elementSet()) / 2
                        + Utils.multisetDiversity(froms, tos.elementSet()) / 2;
            } else if (nodeTypesMap.get(nodeIN).equals("o")) {
                diversities[g][i] = Utils.multisetDiversity(froms, tos.elementSet());
            }
            i = i + 1;
        }
    }
    return new double[][][] { diversities, usages };
}