Example usage for com.google.common.collect Multiset elementSet

List of usage examples for com.google.common.collect Multiset elementSet

Introduction

In this page you can find the example usage for com.google.common.collect Multiset elementSet.

Prototype

Set<E> elementSet();

Source Link

Document

Returns the set of distinct elements contained in this multiset.

Usage

From source file:org.apache.jackrabbit.oak.segment.SegmentGraph.java

/**
 * Write the gc generation graph of a file store to a stream.
 * <p>// w  w  w .j  av  a  2  s. c  o  m
 * The graph is written in
 * <a href="https://gephi.github.io/users/supported-graph-formats/gdf-format/">the Guess GDF format</a>,
 * which is easily imported into <a href="https://gephi.github.io/">Gephi</a>.
 *
 * @param fileStore     file store to graph
 * @param out           stream to write the graph to
 * @throws Exception
 */
public static void writeGCGraph(@Nonnull ReadOnlyStore fileStore, @Nonnull OutputStream out) throws Exception {
    PrintWriter writer = new PrintWriter(checkNotNull(out));
    try {
        Graph<String> gcGraph = parseGCGraph(checkNotNull(fileStore));

        writer.write("nodedef>name VARCHAR\n");
        for (String gen : gcGraph.vertices()) {
            writer.write(gen + "\n");
        }

        writer.write("edgedef>node1 VARCHAR, node2 VARCHAR, weight INT\n");
        for (Entry<String, Multiset<String>> edge : gcGraph.edges()) {
            String from = edge.getKey();
            Multiset<String> tos = edge.getValue();
            for (String to : tos.elementSet()) {
                if (!from.equals(to) && !to.isEmpty()) {
                    writer.write(from + "," + to + "," + tos.count(to) + "\n");
                }
            }
        }
    } finally {
        writer.close();
    }
}

From source file:org.tensorics.core.tensor.Coordinates.java

public static Set<Class<?>> requireValidDimensions(Multiset<Class<?>> dimensions) {
    if (containsNonUniqueElements(dimensions)) {
        throw new IllegalArgumentException(
                "Only unique dimensions are allowed. The following dimensions are not unique: "
                        + nonUniqueElementsOf(dimensions));
    }//  w w w. jav  a  2s  . co m
    return ImmutableSet.copyOf(dimensions.elementSet());
}

From source file:net.librec.util.StringUtil.java

/**
 * Parse a {@code Collection<T>} data into string
 *
 * @param ts   the input data/* ww w .j  a v a2 s.c  om*/
 * @param <T>  type parameter
 * @return  string of the input data
 */
public static <T> String toString(Collection<T> ts) {

    if (ts instanceof Multiset<?>) {

        StringBuilder sb = new StringBuilder();
        Multiset<T> es = (Multiset<T>) ts;

        for (T e : es.elementSet()) {
            int count = es.count(e);
            sb.append(e + ", " + count + "\n");
        }

        return sb.toString();
    }

    return toString(ts, ",");
}

From source file:com.github.rinde.rinsim.scenario.measure.Metrics.java

/**
 * Computes the number of occurrences of each event type in the specified
 * {@link Scenario}./*from   ww  w .j av a 2  s. c  o  m*/
 * @param s The scenario to check.
 * @return A {@link ImmutableMultiset} of event types.
 */
public static ImmutableMultiset<Class<?>> getEventTypeCounts(Scenario s) {
    final Multiset<Class<?>> set = LinkedHashMultiset.create();
    for (final TimedEvent te : s.getEvents()) {
        set.add(te.getClass());
    }
    final List<Class<?>> toMove = new ArrayList<>();
    for (final Class<?> c : set.elementSet()) {
        if (!Modifier.isPublic(c.getModifiers()) && TimedEvent.class.isAssignableFrom(c.getSuperclass())
                && !set.contains(c.getSuperclass())) {
            toMove.add(c);
        }
    }
    for (final Class<?> c : toMove) {
        set.add(c.getSuperclass(), set.count(c));
        set.remove(c, set.count(c));
    }
    return ImmutableMultiset.copyOf(set);
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ClusterMerger.java

public static void mergeByVersions(ClusterCollection clusters) {
    TaskProgressLogger task = TaskProgressLogger.get();

    task.start("Merging " + clusters.size() + " clusters by matching versions");

    TreeSet<Cluster> sortedClusters = new TreeSet<>(Cluster.DESCENDING_SIZE_COMPARATOR);
    Map<VersionedFqnNode, Cluster> fqnToCluster = new HashMap<>();
    for (Cluster cluster : clusters) {
        sortedClusters.add(cluster);/*from w w  w . j a v  a 2s . com*/
        for (VersionedFqnNode fqn : cluster.getCoreFqns()) {
            fqnToCluster.put(fqn, cluster);
        }
    }

    Collection<Cluster> remainingClusters = new LinkedList<>();
    Set<VersionedFqnNode> usedFqns = new HashSet<>();
    task.start("Merging clusters", "clusters examined", 500);
    // Starting from the most important jar
    // For each cluster
    while (!sortedClusters.isEmpty()) {
        Cluster biggest = sortedClusters.pollFirst();
        remainingClusters.add(biggest);

        usedFqns.addAll(biggest.getCoreFqns());
        // Repeatedly add new fqns to the cluster, until no new ones can be added
        boolean addedSomething = true;
        while (addedSomething) {
            Set<VersionedFqnNode> globalPotentials = new HashSet<>();
            Set<VersionedFqnNode> globalPartials = new HashSet<>();

            // For each version, find any fqns that always occur
            for (ClusterVersion version : biggest.getVersions()) {
                Multiset<VersionedFqnNode> potentials = HashMultiset.create();
                for (Jar jar : version.getJars()) {
                    for (FqnVersion fqn : jar.getFqns()) {
                        if (!usedFqns.contains(fqn.getFqn())) {
                            potentials.add(fqn.getFqn());
                        }
                    }
                }

                int max = version.getJars().size();
                for (VersionedFqnNode fqn : potentials.elementSet()) {
                    if (potentials.count(fqn) > max) {
                        logger.severe("wtf! " + fqn.getFqn());
                        // Check the jars for duplicates
                        for (Jar jar : version.getJars()) {
                            for (FqnVersion node : jar.getFqns()) {
                                if (node.getFqn() == fqn) {
                                    logger.severe(jar.getJar().getProperties().HASH.getValue() + " "
                                            + node.getFingerprint().serialize());
                                }
                            }
                        }
                    }
                    if (potentials.count(fqn) == max && fqn.getJars().isSubset(biggest.getJars())) {
                        globalPotentials.add(fqn);
                    } else {
                        globalPartials.add(fqn);
                    }
                }
            }

            globalPotentials.removeAll(globalPartials);

            // Collect the clusters we plan on merging
            Set<Cluster> newClusters = new HashSet<>();
            for (VersionedFqnNode fqn : globalPotentials) {
                Cluster newCluster = fqnToCluster.get(fqn);
                if (newCluster == null) {
                    logger.log(Level.SEVERE, "Unable to find cluster for: " + fqn.getFqn());
                } else {
                    newClusters.add(newCluster);
                    usedFqns.add(fqn);
                    biggest.addVersionedCore(fqn);
                }
            }

            //        // Verify the clusters
            //        for (Cluster cluster : newClusters) {
            //          for (VersionedFqnNode fqn : cluster.getCoreFqns()) {
            //            if (!globalPotentials.contains(fqn)) {
            //              logger.severe("Cluster included without fqn: " + fqn.getFqn());
            //              // Every node should have the same JarSet
            //              for (VersionedFqnNode node : cluster.getCoreFqns()) {
            //                logger.severe(" " + node.getFqn() + " " + globalPotentials.contains(node) + " " + globalPartials.contains(node) + " " + usedFqns.contains(node) + " "+ node.getJars().hashCode());
            //              }
            //            }
            //          }
            //        }

            //        usedFqns.addAll(globalPotentials);

            // Remove the clusters from the queue
            sortedClusters.removeAll(newClusters);

            addedSomething = !globalPotentials.isEmpty();
        }

        task.progress();
    }

    task.finish();

    clusters.reset(remainingClusters);
    task.report(clusters.size() + " clusters remain");
    task.finish();
}

From source file:org.apache.mahout.math.stats.LogLikelihood.java

/**
 * Compares two sets of counts to see which items are interestingly over-represented in the first
 * set.//  w  ww . j a va 2  s. c o m
 * @param a  The first counts.
 * @param b  The reference counts.
 * @param maxReturn  The maximum number of items to return.  Use maxReturn >= a.elementSet.size() to return all
 * scores above the threshold.
 * @param threshold  The minimum score for items to be returned.  Use 0 to return all items more common
 * in a than b.  Use -Double.MAX_VALUE (not Double.MIN_VALUE !) to not use a threshold.
 * @return  A list of scored items with their scores.
 */
public static <T> List<ScoredItem<T>> compareFrequencies(Multiset<T> a, Multiset<T> b, int maxReturn,
        double threshold) {
    int totalA = a.size();
    int totalB = b.size();

    Ordering<ScoredItem<T>> byScoreAscending = new Ordering<ScoredItem<T>>() {
        @Override
        public int compare(ScoredItem<T> tScoredItem, ScoredItem<T> tScoredItem1) {
            return Double.compare(tScoredItem.score, tScoredItem1.score);
        }
    };
    Queue<ScoredItem<T>> best = new PriorityQueue<ScoredItem<T>>(maxReturn + 1, byScoreAscending);

    for (T t : a.elementSet()) {
        compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
    }

    // if threshold >= 0 we only iterate through a because anything not there can't be as or more common than in b.
    if (threshold < 0) {
        for (T t : b.elementSet()) {
            // only items missing from a need be scored
            if (a.count(t) == 0) {
                compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
            }
        }
    }

    List<ScoredItem<T>> r = Lists.newArrayList(best);
    Collections.sort(r, byScoreAscending.reverse());
    return r;
}

From source file:io.ssc.trackthetrackers.analysis.stats.LogLikelihood.java

/**
 * Compares two sets of counts to see which items are interestingly over-represented in the first
 * set.// w  ww.  jav a  2 s  .c om
 * @param a  The first counts.
 * @param b  The reference counts.
 * @param maxReturn  The maximum number of items to return.  Use maxReturn >= a.elementSet.size() to return all
 * scores above the threshold.
 * @param threshold  The minimum score for items to be returned.  Use 0 to return all items more common
 * in a than b.  Use -Double.MAX_VALUE (not Double.MIN_VALUE !) to not use a threshold.
 * @return  A list of scored items with their scores.
 */
public static <T> List<ScoredItem<T>> compareFrequencies(Multiset<T> a, Multiset<T> b, int maxReturn,
        double threshold) {
    int totalA = a.size();
    int totalB = b.size();

    Ordering<ScoredItem<T>> byScoreAscending = new Ordering<ScoredItem<T>>() {
        @Override
        public int compare(ScoredItem<T> tScoredItem, ScoredItem<T> tScoredItem1) {
            return Double.compare(tScoredItem.score, tScoredItem1.score);
        }
    };
    Queue<ScoredItem<T>> best = new PriorityQueue<ScoredItem<T>>(maxReturn + 1, byScoreAscending);

    for (T t : a.elementSet()) {
        compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
    }

    // if threshold >= 0 we only iterate through a because anything not there can't be as or more common than in b.
    if (threshold < 0) {
        for (T t : b.elementSet()) {
            // only items missing from a need be scored
            if (a.count(t) == 0) {
                compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
            }
        }
    }

    List<ScoredItem<T>> r = new ArrayList<ScoredItem<T>>(best);
    Collections.sort(r, byScoreAscending.reverse());
    return r;
}

From source file:com.hortonworks.streamline.common.Schema.java

private static Field parseField(String fieldName, Object fieldValue) throws ParserException {
    Field field = null;//  w w w .jav  a 2 s .  c o  m
    Type fieldType = fromJavaType(fieldValue);
    if (fieldType == Type.NESTED) {
        field = new NestedField(fieldName, parseFields((Map<String, Object>) fieldValue));
    } else if (fieldType == Type.ARRAY) {
        Multiset<Field> members = parseArray((List<Object>) fieldValue);
        Set<Field> fieldTypes = members.elementSet();
        if (fieldTypes.size() > 1) {
            field = new ArrayField(fieldName, new ArrayList<>(members));
        } else if (fieldTypes.size() == 1) {
            field = new ArrayField(fieldName, new ArrayList<>(members.elementSet()));
        } else {
            throw new IllegalArgumentException("Array should have at least one element");
        }
    } else {
        field = new Field(fieldName, fieldType);
    }
    return field;
}

From source file:com.github.rinde.rinsim.central.Solvers.java

static GlobalStateObject fixRoutes(GlobalStateObject state) {
    boolean firstVehicle = true;
    final ImmutableList.Builder<VehicleStateObject> vehicleList = ImmutableList.builder();
    for (int i = 0; i < state.getVehicles().size(); i++) {
        final VehicleStateObject vso = state.getVehicles().get(i);
        checkArgument(vso.getRoute().isPresent());

        final List<Parcel> route = new ArrayList<>(vso.getRoute().get());
        final Multiset<Parcel> routeContents = LinkedHashMultiset.create(route);
        for (final Parcel p : routeContents.elementSet()) {
            if (vso.getContents().contains(p)) {
                // should occur only once
                if (routeContents.count(p) > 1) {
                    // remove
                    route.remove(p);/*from w w w .j av  a  2  s  .c  o  m*/
                    checkArgument(routeContents.count(p) == 2);
                }
            } else {
                // should occur twice
                if (routeContents.count(p) < 2) {
                    route.add(p);
                } else {
                    checkArgument(routeContents.count(p) == 2);
                }
            }
        }

        if (firstVehicle) {
            final Set<Parcel> unassigned = GlobalStateObjects.unassignedParcels(state);
            route.addAll(unassigned);
            route.addAll(unassigned);
            firstVehicle = false;
        }

        vehicleList.add(VehicleStateObject.create(vso.getDto(), vso.getLocation(), vso.getContents(),
                vso.getRemainingServiceTime(), vso.getDestination().orNull(), ImmutableList.copyOf(route)));

    }
    return GlobalStateObject.create(state.getAvailableParcels(), vehicleList.build(), state.getTime(),
            state.getTimeUnit(), state.getSpeedUnit(), state.getDistUnit());
}

From source file:com.scaleunlimited.cascading.ml.LogLikelihood.java

/**
 * Compares two sets of counts to see which items are interestingly
 * over-represented in the first set./*ww w . j  a  v a2  s.c  om*/
 * 
 * @param a
 *            The first counts.
 * @param b
 *            The reference counts.
 * @param maxReturn
 *            The maximum number of items to return. Use maxReturn >=
 *            a.elementSet.size() to return all scores above the threshold.
 * @param threshold
 *            The minimum score for items to be returned. Use 0 to return
 *            all items more common in a than b. Use -Double.MAX_VALUE (not
 *            Double.MIN_VALUE !) to not use a threshold.
 * @return A list of scored items with their scores.
 */
public static <T> List<ScoredItem<T>> compareFrequencies(Multiset<T> a, Multiset<T> b, int maxReturn,
        double threshold) {
    int totalA = a.size();
    int totalB = b.size();

    Ordering<ScoredItem<T>> byScoreAscending = new Ordering<ScoredItem<T>>() {
        @Override
        public int compare(ScoredItem<T> tScoredItem, ScoredItem<T> tScoredItem1) {
            return Double.compare(tScoredItem.score, tScoredItem1.score);
        }
    };
    Queue<ScoredItem<T>> best = new PriorityQueue<ScoredItem<T>>(maxReturn + 1, byScoreAscending);

    for (T t : a.elementSet()) {
        compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
    }

    // if threshold >= 0 we only iterate through a because anything not
    // there can't be as or more common than in b.
    if (threshold < 0) {
        for (T t : b.elementSet()) {
            // only items missing from a need be scored
            if (a.count(t) == 0) {
                compareAndAdd(a, b, maxReturn, threshold, totalA, totalB, best, t);
            }
        }
    }

    List<ScoredItem<T>> r = Lists.newArrayList(best);
    Collections.sort(r, byScoreAscending.reverse());
    return r;
}