Example usage for com.google.common.collect Multiset elementSet

List of usage examples for com.google.common.collect Multiset elementSet

Introduction

In this page you can find the example usage for com.google.common.collect Multiset elementSet.

Prototype

Set<E> elementSet();

Source Link

Document

Returns the set of distinct elements contained in this multiset.

Usage

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ComponentRepositoryBuilder.java

private void computeLibraryDependencies() {
    task.start("Computing library version to library dependencies");
    {//from  w w w  . ja  va  2  s. c o  m
        // Build map from Clusters to Libraries
        Multimap<Cluster, Library> clustersToLibraries = HashMultimap.create();
        for (Library library : repo.getLibraries()) {
            if (library.getCoreCluster() != null) {
                clustersToLibraries.put(library.getCoreCluster(), library);
            }
            for (Cluster cluster : library.getSecondaryClusters()) {
                clustersToLibraries.put(cluster, library);
            }
        }

        for (Library library : repo.getLibraries()) {
            for (LibraryVersion version : library.getVersions()) {
                Multiset<Library> librarySet = HashMultiset.create();
                for (ClusterVersion clusterVersion : version.getClusters()) {
                    librarySet.addAll(clustersToLibraries.get(clusterVersion.getCluster()));
                }

                for (Library dep : librarySet.elementSet()) {
                    if (library != dep) {
                        if (dep.getCoreCluster() == null) {
                            // Must match every secondary cluster for package libraries
                            if (librarySet.count(dep) == dep.getSecondaryClusters().size()) {
                                version.addLibraryDependency(dep);
                            }
                        } else {
                            // See if there's a jar in this library that matches the right clusters
                            for (Jar jar : dep.getJars()) {
                                if (version.getClusters().containsAll(jarsToClusters.get(jar))) {
                                    version.addLibraryDependency(dep);
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    task.finish();
}

From source file:net.shipilev.concurrent.torture.Runner.java

private Result dump(ConcurrencyTest test, Multiset<Long> results) {
    ObjectFactory factory = new ObjectFactory();
    Result result = factory.createResult();

    result.setName(test.getClass().getName());

    for (Long e : results.elementSet()) {
        byte[] b = longToByteArr(e);
        byte[] temp = new byte[test.resultSize()];
        System.arraycopy(b, 0, temp, 0, test.resultSize());
        b = temp;/*from  w w w. ja  va 2  s  .com*/

        State state = factory.createState();
        state.setId(Arrays.toString(b));
        state.setCount(results.count(e));
        result.getState().add(state);
    }

    Env env = factory.createEnv();
    for (Map.Entry<String, String> entry : Environment.getEnvironment().entrySet()) {
        Kv kv = factory.createKv();
        kv.setKey(entry.getKey());
        kv.setValue(entry.getValue());
        env.getProperty().add(kv);
    }
    result.setEnv(env);

    try {
        String packageName = Result.class.getPackage().getName();
        JAXBContext jc = JAXBContext.newInstance(packageName);
        Marshaller marshaller = jc.createMarshaller();
        marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
        marshaller.marshal(result, new File(destDir + "/" + test.getClass().getName() + ".xml"));
    } catch (Throwable e) {
        e.printStackTrace();
    }

    return result;
}

From source file:net.shipilev.elections.cikrf.Parser.java

private void summaryCompare(PrintWriter pw, SummaryData summ1, SummaryData summ2) {
    HashSet<List<String>> geos = new HashSet<List<String>>();
    geos.addAll(summ1.keys());/*from w w w  . ja va 2  s . co  m*/
    geos.retainAll(summ2.keys());

    boolean foundAnomalies = false;
    for (List<String> geo : geos) {
        Multiset<Metric> val1 = summ1.get(geo);
        Multiset<Metric> val2 = summ2.get(geo);

        Collection<Metric> metrics = new TreeSet<Metric>();
        metrics.addAll(val1.elementSet());
        metrics.addAll(val2.elementSet());

        if (!val1.equals(val2)) {
            foundAnomalies = true;
            pw.printf("Found mismatches in aggregates over %s:\n", geo);
            for (Metric key : metrics) {
                Integer v1 = val1.count(key);
                Integer v2 = val2.count(key);

                if (!v1.equals(v2)) {
                    pw.printf(" {%9d} vs {%9d} [%4.1f%%]: %s\n", v1, v2, (v1 * 100.0 / v2 - 100), key);
                }
            }
            pw.println();
        }
    }

    if (!foundAnomalies) {
        pw.println("No anomalies in data.");
    }

    pw.flush();
}

From source file:org.onebusaway.nyc.vehicle_tracking.impl.simulator.SimulatorTask.java

public VehicleLocationDetails getParticleDetails(int particleId, int recordIndex) {
    final VehicleLocationDetails details = new VehicleLocationDetails();
    details.setId(_id);/*from w  ww. java  2  s.c  o m*/

    final Collection<Multiset.Entry<Particle>> particles;
    if (recordIndex < 0) {
        details.setLastObservation(
                RecordLibrary.getNycTestInferredLocationRecordAsNycRawLocationRecord(_mostRecentRecord));
        particles = _vehicleLocationInferenceService.getCurrentParticlesForVehicleId(_vehicleId).entrySet();
    } else {
        details.setLastObservation(getDetails(recordIndex).getLastObservation());
        particles = getDetails(recordIndex).getParticles();
    }

    if (particles != null) {
        for (final Multiset.Entry<Particle> pEntry : particles) {
            Particle p = pEntry.getElement();
            if (p.getIndex() == particleId) {
                final Multiset<Particle> history = TreeMultiset.create(Ordering.natural());
                while (p != null && history.elementSet().size() <= _particleParentSize) {
                    history.add(p, pEntry.getCount());
                    p = p.getParent();
                }
                details.setParticles(history);
                details.setHistory(true);
                break;
            }
        }
    }
    return details;
}

From source file:bio.gcat.operation.analysis.TupleUsage.java

@Override
public Result analyse(Collection<Tuple> tuples, Object... values) {
    Logger logger = getLogger();//from w w  w  .ja v a2  s.  c  o m

    if (values[0] == null) {
        logger.log("Choose an existing file to count tuple usage in.");
        return null;
    }

    Acid acid;
    if ((acid = Tuple.tuplesAcid(tuples)) == null) {
        logger.log("Tuples with variable acids, can't analyse tuple usage.");
        return null; //tuples not all in same acid
    }

    Multiset<Tuple> tupleCount = HashMultiset.create();
    try (BufferedReader reader = new BufferedReader(new InputStreamReader((InputStream) values[0]))) {
        String line;
        while ((line = reader.readLine()) != null)
            tupleCount.addAll(normalizeTuples(splitTuples(tupleString(line).trim()), acid));
    } catch (IOException e) {
        logger.log("Error while reading file.", e);
        return null;
    }

    StringBuilder builder = new StringBuilder();
    for (Tuple tuple : (!tuples.isEmpty() && !containsOnly(tuples, EMPTY_TUPLE) ? normalizeTuples(tuples, acid)
            : tupleCount.elementSet()))
        builder.append(DELIMITER).append(tupleCount.count(tuple)).append(TIMES).append(tuple);
    return new SimpleResult(this,
            builder.length() != 0 ? builder.substring(DELIMITER.length()).toString() : "no tuples");
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ComponentRepositoryBuilder.java

private void computeVersionDependencies() {
    task.start("Computing library version to version dependencies");
    // Build map from FqnVersions to LibraryVersions
    Multimap<FqnVersion, LibraryVersion> fqnVersionToLibVersion = HashMultimap.create();
    for (Library library : repo.getLibraries()) {
        for (LibraryVersion version : library.getVersions()) {
            for (FqnVersion fqn : version.getFqnVersions()) {
                fqnVersionToLibVersion.put(fqn, version);
            }// ww w  . j  a  va2 s .c  o m
        }
    }

    for (Library library : repo.getLibraries()) {
        for (LibraryVersion version : library.getVersions()) {
            // For each version of the library, look up all the libraries that contain that fqn
            Multiset<LibraryVersion> versionSet = HashMultiset.create();
            for (FqnVersion fqn : version.getFqnVersions()) {
                versionSet.addAll(fqnVersionToLibVersion.get(fqn));
            }

            // See if any other library contains a subset of the fqn versions for this library
            for (LibraryVersion libVersion : versionSet.elementSet()) {
                if (version != libVersion
                        && versionSet.count(libVersion) == libVersion.getFqnVersions().size()) {
                    version.addVersionDependency(libVersion);
                }
            }
        }
    }
    task.finish();
}

From source file:com.palantir.atlasdb.keyvalue.impl.SweepStatsKeyValueService.java

private void flushWrites(Multiset<String> writes, Set<String> clears) {
    if (writes.isEmpty() && clears.isEmpty()) {
        log.debug("No writes to flush");
        return;/*from   w  w w  .  java  2 s . c om*/
    }

    log.debug("Flushing stats for {} writes and {} clears", writes.size(), clears.size());
    log.trace("Flushing writes: {}", writes);
    log.trace("Flushing clears: {}", clears);
    try {
        Set<String> tableNames = Sets.difference(writes.elementSet(), clears);
        Iterable<byte[]> rows = Collections2.transform(tableNames, Functions
                .compose(Persistables.persistToBytesFunction(), SweepPriorityRow.fromFullTableNameFun()));
        Map<Cell, Value> oldWriteCounts = delegate().getRows(SWEEP_PRIORITY_TABLE, rows,
                SweepPriorityTable.getColumnSelection(SweepPriorityNamedColumn.WRITE_COUNT), Long.MAX_VALUE);
        Map<Cell, byte[]> newWriteCounts = Maps.newHashMapWithExpectedSize(writes.elementSet().size());
        byte[] col = SweepPriorityNamedColumn.WRITE_COUNT.getShortName();
        for (String tableName : tableNames) {
            Preconditions.checkState(!tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX),
                    "The sweep stats kvs should wrap the namespace mapping kvs, not the other way around.");
            byte[] row = SweepPriorityRow.of(tableName).persistToBytes();
            Cell cell = Cell.create(row, col);
            Value oldValue = oldWriteCounts.get(cell);
            long oldCount = oldValue == null || oldValue.getContents().length == 0 ? 0
                    : SweepPriorityTable.WriteCount.BYTES_HYDRATOR.hydrateFromBytes(oldValue.getContents())
                            .getValue();
            long newValue = clears.contains(tableName) ? writes.count(tableName)
                    : oldCount + writes.count(tableName);
            log.debug("Sweep priority for {} has {} writes (was {})", tableName, newValue, oldCount);
            newWriteCounts.put(cell, SweepPriorityTable.WriteCount.of(newValue).persistValue());
        }
        long timestamp = timestampService.getFreshTimestamp();

        // Committing before writing is intentional, we want the start timestamp to
        // show up in the transaction table before we write do our writes.
        commit(timestamp);
        delegate().put(SWEEP_PRIORITY_TABLE, newWriteCounts, timestamp);
    } catch (RuntimeException e) {
        Set<String> allTableNames = delegate().getAllTableNames();
        if (!allTableNames.contains(SWEEP_PRIORITY_TABLE)
                || !allTableNames.contains(TransactionConstants.TRANSACTION_TABLE)) {
            // ignore problems when sweep or transaction tables don't exist
            log.warn("Ignoring failed sweep stats flush due to {}", e.getMessage(), e);
        }
        log.error("Unable to flush sweep stats for writes {} and clears {}: {}", writes, clears, e.getMessage(),
                e);
        throw e;
    }
}

From source file:edu.cmu.cs.lti.ark.fn.identification.training.AlphabetCreationThreaded.java

private Callable<Integer> newJob(final int threadId, final List<String> frameLineBatch,
        final List<String> parseLines, final Multiset<String> alphabet) {
    return new Callable<Integer>() {
        public Integer call() {
            logger.info("Thread " + threadId + " : start");
            for (int i = 0; i < frameLineBatch.size() && !Thread.currentThread().isInterrupted(); i++) {
                processLine(frameLineBatch.get(i), parseLines, alphabet);
                if (i % 50 == 0) {
                    logger.info("Thread " + i + "\n" + "Processed index:" + i + " of " + frameLineBatch.size()
                            + "\n" + "Alphabet size:" + alphabet.elementSet().size());
                }/*from ww  w .j a v  a 2s.c o  m*/
            }
            logger.info("Thread " + threadId + " : end");
            return frameLineBatch.size();
        }
    };
}

From source file:tv.floe.metronome.io.records.RCV1RecordFactory.java

public static void ScanFile(String file, int debug_break_cnt) throws IOException {

    BufferedReader reader = null;
    int line_count = 0;

    Multiset<String> class_count = ConcurrentHashMultiset.create();
    Multiset<String> namespaces = ConcurrentHashMultiset.create();

    try {//  w ww.j  a  v  a 2 s.com
        reader = new BufferedReader(new FileReader(file));

        String line = reader.readLine();

        while (line != null && line.length() > 0) {

            String[] parts = line.split(" ");

            class_count.add(parts[0]);
            namespaces.add(parts[1]);

            line = reader.readLine();
            line_count++;

            Vector v = new RandomAccessSparseVector(FEATURES);

            for (int x = 2; x < parts.length; x++) {
                String[] feature = parts[x].split(":");
                int index = Integer.parseInt(feature[0]) % FEATURES;
                double val = Double.parseDouble(feature[1]);

                System.out.println(feature[1] + " = " + val);

                if (index < FEATURES) {
                    v.set(index, val);
                } else {

                    System.out.println("Could Hash: " + index + " to " + (index % FEATURES));

                }

            }

            System.out.println("###");

            if (line_count > debug_break_cnt) {
                break;
            }

        }

        System.out.println("Total Rec Count: " + line_count);

        System.out.println("-------------------- ");

        System.out.println("Classes");
        for (String word : class_count.elementSet()) {
            System.out.println("Class " + word + ": " + class_count.count(word) + " ");
        }

        System.out.println("-------------------- ");

        System.out.println("NameSpaces:");
        for (String word : namespaces.elementSet()) {
            System.out.println("Namespace " + word + ": " + namespaces.count(word) + " ");
        }

    } finally {
        reader.close();
    }

}

From source file:com.seajas.search.codex.service.social.SocialProfileService.java

@Override
public TwitterProfileSummaryDto getTwitterProfileSummary(final long twitterProfileId) {
    List<Tweet> tweets = socialFacade.getUserTimeline(twitterProfileId);

    SocialProfileDto socialProfileDto = null;
    TwitterProfile twitterProfile = socialFacade.getTwitterProfile(twitterProfileId);
    if (twitterProfile != null) {
        socialProfileDto = SocialProfileDto.translate(twitterProfile);
        socialProfileDto/*  w  w  w.j av  a 2 s  .c  om*/
                .setProfileImageMediaUrl(this.storeImageOnMediaServer(twitterProfile.getProfileImageUrl()));
    }

    Multiset<Long> mentionedCounter = HashMultiset.create();
    Multiset<String> hashTagCounter = HashMultiset.create();

    this.countTwitterEntities(tweets, mentionedCounter, hashTagCounter);

    mentionedCounter = Multisets.copyHighestCountFirst(mentionedCounter);
    hashTagCounter = Multisets.copyHighestCountFirst(hashTagCounter);

    List<MentionedDto> mentions = this.buildTwitterMentionedList(mentionedCounter);

    List<HashTagDto> hashTagList = Lists.newArrayList();
    for (String hashTag : hashTagCounter.elementSet()) {
        hashTagList.add(new HashTagDto(hashTag, hashTagCounter.count(hashTag)));
    }

    return new TwitterProfileSummaryDto(socialProfileDto, hashTagList, mentions);
}