Example usage for com.google.common.collect HashMultiset create

List of usage examples for com.google.common.collect HashMultiset create

Introduction

In this page you can find the example usage for com.google.common.collect HashMultiset create.

Prototype

public static <E> HashMultiset<E> create() 

Source Link

Document

Creates a new, empty HashMultiset using the default initial capacity.

Usage

From source file:com.googlecode.blaisemath.graph.mod.metrics.BetweenCentrality.java

/**
 * Breadth-first search algorithm for an unweighted graph to generate
 * betweenness scores, with specified starting vertex. From <i>Brandes</i>,
 * "A Faster Algorithm for Betweenness Centrality"
 *
 * @param graph the graph//ww  w.j  ava2  s .  co  m
 * @param start the start vertex
 * @param between data structure storing existing betweenness centrality values
 * @param multiplier applied to all elements of resulting map
 * @return data structure encoding the result
 */
static <V> Map<V, Double> brandes(Graph<V> graph, V start, Map<V, Double> between, double multiplier) {
    Set<V> nodes = graph.nodes();
    if (!nodes.contains(start)) {
        return new HashMap<V, Double>();
    }

    // number of shortest paths to each vertex
    Multiset<V> numShortest = HashMultiset.create();
    // length of shortest paths to each vertex
    Map<V, Integer> lengths = new HashMap<V, Integer>();
    // tracks elements in non-increasing order for later use
    Deque<V> deque = Queues.newArrayDeque();
    // tracks vertex predecessors in resulting tree
    Multimap<V, V> pred = HashMultimap.create();

    GraphUtils.breadthFirstSearch(graph, start, numShortest, lengths, deque, pred);

    // compute betweenness
    Map<V, Double> dependencies = new HashMap<V, Double>();
    for (V v : nodes) {
        dependencies.put(v, 0.0);
    }
    while (!deque.isEmpty()) {
        V w = deque.pollLast();
        for (V v : pred.get(w)) {
            dependencies.put(v, dependencies.get(v)
                    + (double) numShortest.count(v) / numShortest.count(w) * (1 + dependencies.get(w)));
        }
        if (w != start) {
            between.put(w, between.get(w) + multiplier * dependencies.get(w));
        }
    }

    return between;

}

From source file:sklearn.cluster.KMeans.java

@Override
public ClusteringModel encodeModel(Schema schema) {
    int[] shape = getClusterCentersShape();

    int numberOfClusters = shape[0];
    int numberOfFeatures = shape[1];

    List<? extends Number> clusterCenters = getClusterCenters();
    List<Integer> labels = getLabels();

    Multiset<Integer> labelCounts = HashMultiset.create();

    if (labels != null) {
        labelCounts.addAll(labels);/*w w  w. j a v a 2s.co  m*/
    }

    List<Cluster> clusters = new ArrayList<>();

    for (int i = 0; i < numberOfClusters; i++) {
        Array array = PMMLUtil
                .createRealArray(MatrixUtil.getRow(clusterCenters, numberOfClusters, numberOfFeatures, i));

        Cluster cluster = new Cluster().setId(String.valueOf(i))
                .setSize((labelCounts.size() > 0 ? labelCounts.count(i) : null)).setArray(array);

        clusters.add(cluster);
    }

    List<Feature> features = schema.getFeatures();

    List<ClusteringField> clusteringFields = ClusteringModelUtil.createClusteringFields(features);

    ComparisonMeasure comparisonMeasure = new ComparisonMeasure(ComparisonMeasure.Kind.DISTANCE)
            .setCompareFunction(CompareFunction.ABS_DIFF).setMeasure(new SquaredEuclidean());

    Output output = ClusteringModelUtil.createOutput(FieldName.create("Cluster"), clusters);

    ClusteringModel clusteringModel = new ClusteringModel(MiningFunction.CLUSTERING,
            ClusteringModel.ModelClass.CENTER_BASED, numberOfClusters, ModelUtil.createMiningSchema(schema),
            comparisonMeasure, clusteringFields, clusters).setOutput(output);

    return clusteringModel;
}

From source file:carskit.alg.cars.transformation.prefiltering.splitting.ItemSplitting.java

public Table<Integer, Integer, Integer> split(SparseMatrix sm, int min) {
    Table<Integer, Integer, Integer> datatable = HashBasedTable.create();

    for (Integer j : itemRatingList.keySet()) {
        Collection<Integer> uis = itemRatingList.get(j);
        double maxt = Double.MIN_VALUE;
        int splitcond = -1;

        for (Integer cond : condContextsList.keySet()) {
            Collection<Integer> ctx = condContextsList.get(cond);
            // start to extract two rating list
            HashMultiset<Double> rate1 = HashMultiset.create();
            HashMultiset<Double> rate2 = HashMultiset.create();

            for (Integer ui : uis) {
                List<Integer> uctx = sm.getColumns(ui);
                for (Integer c : uctx) {
                    double rate = sm.get(ui, c);
                    if (ctx.contains(c))
                        rate1.add(rate);
                    else
                        rate2.add(rate);
                }/*from w  ww .ja  v  a  2  s .  c o m*/
            }

            double[] drate1 = Doubles.toArray(rate1);
            double[] drate2 = Doubles.toArray(rate2);

            if (drate1.length >= min && drate2.length >= min) {
                TTest tt = new TTest();
                double p = tt.tTest(drate1, drate2);
                if (p < 0.05) {
                    double t = tt.t(drate1, drate2);
                    if (t > maxt) {
                        // update the split
                        splitcond = cond;
                        maxt = t;
                    }
                }
            }
        }
        if (splitcond != -1) {
            // put u, ctx, new uid into datatable
            int newid = startId++;
            Collection<Integer> ctx = condContextsList.get(splitcond);
            for (Integer c : ctx)
                datatable.put(j, c, newid);
        }

    }
    Logs.info(datatable.rowKeySet().size() + " items have been splitted.");
    return datatable;
}

From source file:org.opennms.features.jmxconfiggenerator.webui.ui.validators.UniqueAttributeNameValidator.java

@Override
protected boolean isValidValue(String value) {
    if (value == null || !(value instanceof String))
        return false; //validation not possible
    String alias = (String) value;
    //count name occurance
    Multiset<String> nameMultiSet = HashMultiset.create();
    for (Entry<Object, String> entry : provider.getNames().entrySet()) {
        Object itemId = entry.getKey();
        String name = entry.getValue();
        //use name from textFieldItemMap if an entry for itemId exists, otherwise use name from provider
        nameMultiSet.add(/*from w ww  . ja  v a 2 s . c om*/
                textFieldItemMap.get(itemId) == null ? name : (String) textFieldItemMap.get(itemId).getValue());
    }
    return nameMultiSet.count(alias) <= 1; //is only valid if name exists 0 or 1 times 
}

From source file:org.bridgedb.tools.qc.PatternChecker.java

public void run(File f) throws SQLException, IDMapperException {
    String database = "" + f;
    //TODO: we can use the new Iterator interface here...
    DBConnector con = new DataDerby();
    Connection sqlcon = null;//  ww w.  ja v  a  2  s  .  c  o  m
    sqlcon = con.createConnection(database, 0);

    Multimap<DataSource, String> missExamples = HashMultimap.create();
    Multiset<DataSource> misses = HashMultiset.create();
    Multiset<DataSource> totals = HashMultiset.create();
    Map<DataSource, Pattern> patterns = DataSourcePatterns.getPatterns();

    //      String url = "jdbc:derby:jar:(" + f + ")database";
    //      IDMapperRdb gdb = SimpleGdbFactory.createInstance("" + f, url);

    Statement st = sqlcon.createStatement();
    ResultSet rs = st.executeQuery("select id, code from datanode");

    while (rs.next()) {
        String id = rs.getString(1);
        String syscode = rs.getString(2);
        if (DataSource.systemCodeExists(syscode)) {
            DataSource ds = DataSource.getExistingBySystemCode(syscode);
            if (patterns.get(ds) == null)
                continue; // skip if there is no pattern defined.

            Set<DataSource> matches = DataSourcePatterns.getDataSourceMatches(id);
            if (!matches.contains(ds)) {
                if (missExamples.get(ds).size() < 10)
                    missExamples.put(ds, id);
                misses.add(ds);
            }
            totals.add(ds);
        }
    }

    //         String code = rs.getString (2);
    //System.out.println (id + "\t" + code);

    for (DataSource ds : totals.elementSet()) {
        int miss = misses.count(ds);
        int total = totals.count(ds);

        if (miss > 0) {
            String severity = miss < (total / 25) ? "WARNING" : "ERROR";
            System.out.println(severity + ": " + miss + "/" + total + " (" + miss * 100 / total
                    + "%) ids do not match expected pattern for " + ds);
            System.out.println(severity + ": expected pattern is '" + patterns.get(ds) + "'");
            boolean first = true;
            for (String id : missExamples.get(ds)) {
                System.out.print(first ? severity + ": aberrant ids are e.g. " : ", ");
                first = false;
                System.out.print("'" + id + "'");
            }
            System.out.println();
        }
    }

    allMisses.addAll(misses);
    allTotals.addAll(totals);
}

From source file:org.sonar.java.checks.FieldMatchMethodNameCheck.java

@Override
public void visitNode(Tree tree) {
    Symbol.TypeSymbol classSymbol = ((ClassTree) tree).symbol();
    if (classSymbol != null) {
        Map<String, Symbol> indexSymbol = Maps.newHashMap();
        Multiset<String> fields = HashMultiset.create();
        Map<String, String> fieldsOriginal = Maps.newHashMap();
        Set<String> methodNames = Sets.newHashSet();
        Collection<Symbol> symbols = classSymbol.memberSymbols();
        for (Symbol sym : symbols) {
            String symName = sym.name().toLowerCase();
            if (sym.isVariableSymbol()) {
                indexSymbol.put(symName, sym);
                fields.add(symName);//from  w  ww. j av  a2  s  . c o  m
                fieldsOriginal.put(symName, sym.name());
            }
            if (sym.isMethodSymbol()) {
                methodNames.add(symName);
            }
        }
        fields.addAll(methodNames);
        for (Multiset.Entry<String> entry : fields.entrySet()) {
            if (entry.getCount() > 1) {
                Tree field = indexSymbol.get(entry.getElement()).declaration();
                if (field != null) {
                    addIssue(field, "Rename the \"" + fieldsOriginal.get(entry.getElement()) + "\" member.");
                }
            }
        }
    }
}

From source file:com.recalot.model.rec.librec.DataDAO.java

/**
 * Constructor for a data DAO object/*from  w w w.  jav a2  s .  c o m*/
 */
public DataDAO() {
    this.userIds = HashBiMap.create();

    this.itemIds = HashBiMap.create();
    scaleDist = HashMultiset.create();
}

From source file:carskit.alg.cars.transformation.prefiltering.splitting.UserSplitting.java

public Table<Integer, Integer, Integer> split(SparseMatrix sm, int min) {
    Logs.debug("UserSplitting: startId = " + startId);
    Table<Integer, Integer, Integer> datatable = HashBasedTable.create();

    for (Integer u : userRatingList.keySet()) {
        Collection<Integer> uis = userRatingList.get(u);
        double maxt = Double.MIN_VALUE;
        int splitcond = -1;

        for (Integer cond : condContextsList.keySet()) {
            Collection<Integer> ctx = condContextsList.get(cond);
            // start to extract two rating list
            HashMultiset<Double> rate1 = HashMultiset.create();
            HashMultiset<Double> rate2 = HashMultiset.create();

            for (Integer ui : uis) {
                List<Integer> uctx = sm.getColumns(ui);
                for (Integer c : uctx) {
                    double rate = sm.get(ui, c);
                    if (ctx.contains(c))
                        rate1.add(rate);
                    else
                        rate2.add(rate);
                }//  ww w  .  j a v  a 2  s .c o m
            }

            double[] drate1 = Doubles.toArray(rate1);
            double[] drate2 = Doubles.toArray(rate2);

            if (drate1.length >= min && drate2.length >= min) {
                TTest tt = new TTest();
                double p = tt.tTest(drate1, drate2);
                if (p < 0.05) {
                    double t = tt.t(drate1, drate2);
                    if (t > maxt) {
                        // update the split
                        splitcond = cond;
                        maxt = t;
                    }
                }
            }
        }

        if (splitcond != -1) {
            // put u, ctx, new uid into datatable
            int newid = startId++;
            Collection<Integer> ctx = condContextsList.get(splitcond);
            for (Integer c : ctx)
                datatable.put(u, c, newid);
        }

    }
    Logs.info(datatable.rowKeySet().size() + " users have been splitted.");
    return datatable;
}

From source file:bio.gcat.operation.analysis.TupleUsage.java

@Override
public Result analyse(Collection<Tuple> tuples, Object... values) {
    Logger logger = getLogger();/* w ww . j  av a 2 s  .c  om*/

    if (values[0] == null) {
        logger.log("Choose an existing file to count tuple usage in.");
        return null;
    }

    Acid acid;
    if ((acid = Tuple.tuplesAcid(tuples)) == null) {
        logger.log("Tuples with variable acids, can't analyse tuple usage.");
        return null; //tuples not all in same acid
    }

    Multiset<Tuple> tupleCount = HashMultiset.create();
    try (BufferedReader reader = new BufferedReader(new InputStreamReader((InputStream) values[0]))) {
        String line;
        while ((line = reader.readLine()) != null)
            tupleCount.addAll(normalizeTuples(splitTuples(tupleString(line).trim()), acid));
    } catch (IOException e) {
        logger.log("Error while reading file.", e);
        return null;
    }

    StringBuilder builder = new StringBuilder();
    for (Tuple tuple : (!tuples.isEmpty() && !containsOnly(tuples, EMPTY_TUPLE) ? normalizeTuples(tuples, acid)
            : tupleCount.elementSet()))
        builder.append(DELIMITER).append(tupleCount.count(tuple)).append(TIMES).append(tuple);
    return new SimpleResult(this,
            builder.length() != 0 ? builder.substring(DELIMITER.length()).toString() : "no tuples");
}

From source file:com.continuuity.loom.layout.change.AddServiceChangeIterator.java

public AddServiceChangeIterator(ClusterLayout clusterLayout, String service) {
    this.service = service;
    // cluster services are needed in order to prune the constraints to only use ones that pertain to services
    // on the cluster
    Set<String> expandedClusterServices = Sets.newHashSet(service);
    for (NodeLayout nodeLayout : clusterLayout.getLayout().elementSet()) {
        expandedClusterServices.addAll(nodeLayout.getServiceNames());
    }//from  w  w  w .j  a va 2  s.  c  om
    // first figure out which node layouts can add this service
    this.expandableNodeLayouts = Lists.newArrayListWithCapacity(clusterLayout.getLayout().elementSet().size());
    Multiset<NodeLayout> expandedCounts = HashMultiset.create();
    for (NodeLayout originalNodeLayout : clusterLayout.getLayout().elementSet()) {
        NodeLayout expandedNodeLayout = NodeLayout.addServiceToNodeLayout(originalNodeLayout, service);
        if (expandedNodeLayout.satisfiesConstraints(clusterLayout.getConstraints(), expandedClusterServices)) {
            expandableNodeLayouts.add(originalNodeLayout);
            expandedCounts.add(originalNodeLayout, clusterLayout.getLayout().count(originalNodeLayout));
        }
    }
    // sort expandable node layouts by preference order
    Collections.sort(this.expandableNodeLayouts, new NodeLayoutComparator(null, null));
    // need to pass this to the slotted iterator so we don't try and add the service to a node layout more times
    // than there are nodes for the node layout.
    this.nodeLayoutMaxCounts = new int[expandableNodeLayouts.size()];
    for (int i = 0; i < nodeLayoutMaxCounts.length; i++) {
        nodeLayoutMaxCounts[i] = expandedCounts.count(expandableNodeLayouts.get(i));
    }
    // figure out the max number of nodes we can add the service to. Start off by saying we can add it to all nodes.
    this.nodesToAddTo = expandedCounts.size();
    // we always need to add the service to at least one node.
    this.minNodesToAddTo = 1;
    ServiceConstraint serviceConstraint = clusterLayout.getConstraints().getServiceConstraints().get(service);
    // if there is a max constraint on this service and its less than the number of nodes in the cluster, start
    // there instead. Similarly, if there is a min constraint on this service higher than 1, use that instead.
    if (serviceConstraint != null) {
        this.nodesToAddTo = Math.min(serviceConstraint.getMaxCount(), this.nodesToAddTo);
        this.minNodesToAddTo = Math.max(serviceConstraint.getMinCount(), this.minNodesToAddTo);
    }
    this.nodeLayoutCountIterator = (this.nodesToAddTo < 1) ? null
            : new SlottedCombinationIterator(expandableNodeLayouts.size(), nodesToAddTo, nodeLayoutMaxCounts);
}