Example usage for com.google.common.collect Sets newHashSet

List of usage examples for com.google.common.collect Sets newHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSet.

Prototype

public static <E> HashSet<E> newHashSet(Iterator<? extends E> elements) 

Source Link

Document

Creates a mutable HashSet instance containing the given elements.

Usage

From source file:com.griddynamics.jagger.JaggerLauncher.java

public static void main(String[] args) throws Exception {
    Thread memoryMonitorThread = new Thread("memory-monitor") {
        @Override/*from  ww  w.  jav a  2s. c  o m*/
        public void run() {
            for (;;) {
                try {
                    log.info("Memory info: totalMemory={}, freeMemory={}", Runtime.getRuntime().totalMemory(),
                            Runtime.getRuntime().freeMemory());

                    Thread.sleep(60000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    };
    memoryMonitorThread.setDaemon(true);
    memoryMonitorThread.start();

    String pid = ManagementFactory.getRuntimeMXBean().getName();
    System.out.println(String.format("PID:%s", pid));

    Properties props = System.getProperties();
    for (Map.Entry<Object, Object> prop : props.entrySet()) {
        log.info("{}: '{}'", prop.getKey(), prop.getValue());
    }
    log.info("");

    URL directory = new URL("file:" + System.getProperty("user.dir") + "/");
    loadBootProperties(directory, args[0], environmentProperties);

    log.debug("Bootstrap properties:");
    for (String propName : environmentProperties.stringPropertyNames()) {
        log.debug("   {}={}", propName, environmentProperties.getProperty(propName));
    }

    String[] roles = environmentProperties.getProperty(ROLES).split(",");
    Set<String> rolesSet = Sets.newHashSet(roles);

    if (rolesSet.contains(Role.COORDINATION_SERVER.toString())) {
        launchCoordinationServer(directory);
    }
    if (rolesSet.contains(Role.HTTP_COORDINATION_SERVER.toString())) {
        launchCometdCoordinationServer(directory);
    }
    if (rolesSet.contains(Role.RDB_SERVER.toString())) {
        launchRdbServer(directory);
    }
    if (rolesSet.contains(Role.MASTER.toString())) {
        launchMaster(directory);
    }
    if (rolesSet.contains(Role.KERNEL.toString())) {
        launchKernel(directory);
    }

    if (rolesSet.contains(Role.REPORTER.toString())) {
        launchReporter(directory);
    }

    LaunchManager launchManager = builder.build();

    int result = launchManager.launch();

    System.exit(result);

}

From source file:cosmos.example.BuildingPermitsExample.java

public static void main(String[] args) throws Exception {
    BuildingPermitsExample example = new BuildingPermitsExample();
    new JCommander(example, args);

    File inputFile = new File(example.fileName);

    Preconditions.checkArgument(inputFile.exists() && inputFile.isFile() && inputFile.canRead(),
            "Expected " + example.fileName + " to be a readable file");

    String zookeepers;//from  w w w .j av a2 s . c  o m
    String instanceName;
    Connector connector;
    MiniAccumuloCluster mac = null;
    File macDir = null;

    // Use the MiniAccumuloCluster is requested
    if (example.useMiniAccumuloCluster) {
        macDir = Files.createTempDir();
        String password = "password";
        MiniAccumuloConfig config = new MiniAccumuloConfig(macDir, password);
        config.setNumTservers(1);

        mac = new MiniAccumuloCluster(config);
        mac.start();

        zookeepers = mac.getZooKeepers();
        instanceName = mac.getInstanceName();

        ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers);
        connector = instance.getConnector("root", new PasswordToken(password));
    } else {
        // Otherwise connect to a running instance
        zookeepers = example.zookeepers;
        instanceName = example.instanceName;

        ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers);
        connector = instance.getConnector(example.username, new PasswordToken(example.password));
    }

    // Instantiate an instance of Cosmos
    Cosmos cosmos = new CosmosImpl(zookeepers);

    // Create a definition for the data we want to load
    Store id = Store.create(connector, new Authorizations(), AscendingIndexIdentitySet.create());

    // Register the definition with Cosmos so it can track its progress.
    cosmos.register(id);

    // Load all of the data from our inputFile
    LoadBuildingPermits loader = new LoadBuildingPermits(cosmos, id, inputFile);
    loader.run();

    // Finalize the SortableResult which will prevent future writes to the data set
    cosmos.finalize(id);

    // Flush the ingest traces to the backend so we can see the results;
    id.sendTraces();

    // Get back the Set of Columns that we've ingested.
    Set<Column> schema = Sets.newHashSet(cosmos.columns(id));

    log.debug("\nColumns: " + schema);

    Iterator<Column> iter = schema.iterator();
    while (iter.hasNext()) {
        Column c = iter.next();
        // Remove the internal ID field and columns that begin with CONTRACTOR_
        if (c.equals(LoadBuildingPermits.ID) || c.name().startsWith("CONTRACTOR_")) {
            iter.remove();
        }
    }

    Iterable<Index> indices = Iterables.transform(schema, new Function<Column, Index>() {

        @Override
        public Index apply(Column col) {
            return Index.define(col);
        }

    });

    // Ensure that we have locality groups set as we expect
    log.info("Ensure locality groups are set");
    id.optimizeIndices(indices);

    // Compact down the data for this SortableResult    
    log.info("Issuing compaction for relevant data");
    id.consolidate();

    final int numTopValues = 10;

    // Walk through each column in the result set
    for (Column c : schema) {
        Stopwatch sw = new Stopwatch();
        sw.start();

        // Get the number of times we've seen each value in a given column
        CloseableIterable<Entry<RecordValue<?>, Long>> groupingsInColumn = cosmos.groupResults(id, c);

        log.info(c.name() + ":");

        // Iterate over the counts, collecting the top N values in each column
        TreeMap<Long, RecordValue<?>> topValues = Maps.newTreeMap();

        for (Entry<RecordValue<?>, Long> entry : groupingsInColumn) {
            if (topValues.size() == numTopValues) {
                Entry<Long, RecordValue<?>> least = topValues.pollFirstEntry();

                if (least.getKey() < entry.getValue()) {
                    topValues.put(entry.getValue(), entry.getKey());
                } else {
                    topValues.put(least.getKey(), least.getValue());
                }
            } else if (topValues.size() < numTopValues) {
                topValues.put(entry.getValue(), entry.getKey());
            }
        }

        for (Long key : topValues.descendingKeySet()) {
            log.info(topValues.get(key).value() + " occurred " + key + " times");
        }

        sw.stop();

        log.info("Took " + sw.toString() + " to run query.\n");
    }

    log.info("Deleting records");

    // Delete the records we've ingested
    if (!example.useMiniAccumuloCluster) {
        // Because I'm lazy and don't want to wait around to run the BatchDeleter when we're just going
        // to rm -rf the directory in a few secs.
        cosmos.delete(id);
    }

    // And shut down Cosmos
    cosmos.close();

    log.info("Cosmos stopped");

    // If we were using MAC, also stop that
    if (example.useMiniAccumuloCluster && null != mac) {
        mac.stop();
        if (null != macDir) {
            FileUtils.deleteDirectory(macDir);
        }
    }
}

From source file:com.mmounirou.spotirss.spotify.tracks.SpotifyHrefQuery.java

public static void main(String[] args) throws IOException, SpotifyException {
    //@formatter:off
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("p!nk)"),"blow me")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("sia","david guetta"),"titanium")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("janelle mone","fun."),"we are young ")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("kanye west","big sean", "pusha t", "2 chainz"),"mercy")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("selena gomez","the scene"),"love you like a love song")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("che'nelle"),"believe")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("dj khaled"),"take it to the head")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("jason mraz"),"i won't give up")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("adele"),"set fire to the rain")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("pitbull","shakira"),"get it started")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("lil wayne","big sean"),"my homies still")));
    //Map<Track, String> trackHrefs = new SpotifyHrefQuery(null).getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("kenny chesney"),"feel like a rock star")));
    Map<Track, String> trackHrefs = new SpotifyHrefQuery(null)
            .getTrackHrefs(Sets.newHashSet(new Track(Sets.newHashSet("ne-yo", "calvin harris"), "let's go")));
    //@formatter:on , 
    System.out.println(trackHrefs);
}

From source file:org.apache.jackrabbit.oak.scalability.ScalabilityRunner.java

public static void main(String[] args) throws Exception {
    OptionParser parser = new OptionParser();
    OptionSpec<File> base = parser.accepts("base", "Base directory").withRequiredArg().ofType(File.class)
            .defaultsTo(new File("target"));
    OptionSpec<String> host = parser.accepts("host", "MongoDB host").withRequiredArg().defaultsTo("localhost");
    OptionSpec<Integer> port = parser.accepts("port", "MongoDB port").withRequiredArg().ofType(Integer.class)
            .defaultsTo(27017);// ww w.  j a  v  a 2s .  c  o m
    OptionSpec<String> dbName = parser.accepts("db", "MongoDB database").withRequiredArg();
    OptionSpec<Boolean> dropDBAfterTest = parser
            .accepts("dropDBAfterTest", "Whether to drop the MongoDB database after the test").withOptionalArg()
            .ofType(Boolean.class).defaultsTo(true);
    OptionSpec<String> rdbjdbcuri = parser.accepts("rdbjdbcuri", "RDB JDBC URI").withOptionalArg()
            .defaultsTo("jdbc:h2:./target/benchmark");
    OptionSpec<String> rdbjdbcuser = parser.accepts("rdbjdbcuser", "RDB JDBC user").withOptionalArg()
            .defaultsTo("");
    OptionSpec<String> rdbjdbcpasswd = parser.accepts("rdbjdbcpasswd", "RDB JDBC password").withOptionalArg()
            .defaultsTo("");
    OptionSpec<String> rdbjdbctableprefix = parser.accepts("rdbjdbctableprefix", "RDB JDBC table prefix")
            .withOptionalArg().defaultsTo("");
    OptionSpec<Boolean> mmap = parser.accepts("mmap", "TarMK memory mapping").withOptionalArg()
            .ofType(Boolean.class).defaultsTo("64".equals(System.getProperty("sun.arch.data.model")));
    OptionSpec<Integer> cache = parser.accepts("cache", "cache size (MB)").withRequiredArg()
            .ofType(Integer.class).defaultsTo(100);
    OptionSpec<Integer> fdsCache = parser.accepts("blobCache", "cache size (MB)").withRequiredArg()
            .ofType(Integer.class).defaultsTo(32);
    OptionSpec<Boolean> withStorage = parser.accepts("storage", "Index storage enabled").withOptionalArg()
            .ofType(Boolean.class);
    OptionSpec<File> csvFile = parser.accepts("csvFile", "File to write a CSV version of the benchmark data.")
            .withOptionalArg().ofType(File.class);
    OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp();
    OptionSpec<String> nonOption = parser.nonOptions();

    OptionSet options = parser.parse(args);

    if (options.has(help)) {
        parser.printHelpOn(System.out);
        System.exit(0);
    }

    int cacheSize = cache.value(options);
    RepositoryFixture[] allFixtures = new RepositoryFixture[] {
            new JackrabbitRepositoryFixture(base.value(options), cacheSize),
            OakRepositoryFixture.getMemoryNS(cacheSize * MB),
            OakRepositoryFixture.getMongo(host.value(options), port.value(options), dbName.value(options),
                    dropDBAfterTest.value(options), cacheSize * MB),
            OakRepositoryFixture.getMongoWithFDS(host.value(options), port.value(options),
                    dbName.value(options), dropDBAfterTest.value(options), cacheSize * MB, base.value(options),
                    fdsCache.value(options)),
            OakRepositoryFixture.getMongoNS(host.value(options), port.value(options), dbName.value(options),
                    dropDBAfterTest.value(options), cacheSize * MB),
            OakRepositoryFixture.getTar(base.value(options), 256, cacheSize, mmap.value(options)),
            OakRepositoryFixture.getTarWithBlobStore(base.value(options), 256, cacheSize, mmap.value(options)),
            OakRepositoryFixture.getSegmentTar(base.value(options), 256, cacheSize, mmap.value(options)),
            OakRepositoryFixture.getSegmentTarWithBlobStore(base.value(options), 256, cacheSize,
                    mmap.value(options)),
            OakRepositoryFixture.getRDB(rdbjdbcuri.value(options), rdbjdbcuser.value(options),
                    rdbjdbcpasswd.value(options), rdbjdbctableprefix.value(options),
                    dropDBAfterTest.value(options), cacheSize * MB),
            OakRepositoryFixture.getRDBWithFDS(rdbjdbcuri.value(options), rdbjdbcuser.value(options),
                    rdbjdbcpasswd.value(options), rdbjdbctableprefix.value(options),
                    dropDBAfterTest.value(options), cacheSize * MB, base.value(options),
                    fdsCache.value(options)) };
    ScalabilitySuite[] allSuites = new ScalabilitySuite[] {
            new ScalabilityBlobSearchSuite(withStorage.value(options)).addBenchmarks(new FullTextSearcher(),
                    new NodeTypeSearcher(), new FormatSearcher(), new FacetSearcher(),
                    new LastModifiedSearcher(Date.LAST_2_HRS), new LastModifiedSearcher(Date.LAST_24_HRS),
                    new LastModifiedSearcher(Date.LAST_7_DAYS), new LastModifiedSearcher(Date.LAST_MONTH),
                    new LastModifiedSearcher(Date.LAST_YEAR), new OrderByDate()),
            new ScalabilityNodeSuite(withStorage.value(options)).addBenchmarks(new OrderBySearcher(),
                    new SplitOrderBySearcher(), new OrderByOffsetPageSearcher(),
                    new SplitOrderByOffsetPageSearcher(), new OrderByKeysetPageSearcher(),
                    new SplitOrderByKeysetPageSearcher(), new MultiFilterOrderBySearcher(),
                    new MultiFilterSplitOrderBySearcher(), new MultiFilterOrderByOffsetPageSearcher(),
                    new MultiFilterSplitOrderByOffsetPageSearcher(), new MultiFilterOrderByKeysetPageSearcher(),
                    new MultiFilterSplitOrderByKeysetPageSearcher(), new ConcurrentReader(),
                    new ConcurrentWriter()),
            new ScalabilityNodeRelationshipSuite(withStorage.value(options))
                    .addBenchmarks(new AggregateNodeSearcher()) };

    Set<String> argset = Sets.newHashSet(nonOption.values(options));
    List<RepositoryFixture> fixtures = Lists.newArrayList();
    for (RepositoryFixture fixture : allFixtures) {
        if (argset.remove(fixture.toString())) {
            fixtures.add(fixture);
        }
    }

    Map<String, List<String>> argmap = Maps.newHashMap();
    // Split the args to get suites and benchmarks (i.e. suite:benchmark1,benchmark2)
    for (String arg : argset) {
        List<String> tokens = Splitter.on(":").limit(2).splitToList(arg);
        if (tokens.size() > 1) {
            argmap.put(tokens.get(0), Splitter.on(",").trimResults().splitToList(tokens.get(1)));
        } else {
            argmap.put(tokens.get(0), null);
        }
        argset.remove(arg);
    }

    if (argmap.isEmpty()) {
        System.err.println(
                "Warning: no scalability suites specified, " + "supported  are: " + Arrays.asList(allSuites));
    }

    List<ScalabilitySuite> suites = Lists.newArrayList();
    for (ScalabilitySuite suite : allSuites) {
        if (argmap.containsKey(suite.toString())) {
            List<String> benchmarks = argmap.get(suite.toString());
            // Only keep requested benchmarks
            if (benchmarks != null) {
                Iterator<String> iter = suite.getBenchmarks().keySet().iterator();
                for (; iter.hasNext();) {
                    String availBenchmark = iter.next();
                    if (!benchmarks.contains(availBenchmark)) {
                        iter.remove();
                    }
                }
            }
            suites.add(suite);
            argmap.remove(suite.toString());
        }
    }

    if (argmap.isEmpty()) {
        PrintStream out = null;
        if (options.has(csvFile)) {
            out = new PrintStream(FileUtils.openOutputStream(csvFile.value(options), true), false,
                    Charsets.UTF_8.name());
        }
        for (ScalabilitySuite suite : suites) {
            if (suite instanceof CSVResultGenerator) {
                ((CSVResultGenerator) suite).setPrintStream(out);
            }
            suite.run(fixtures);
        }
        if (out != null) {
            out.close();
        }
    } else {
        System.err.println("Unknown arguments: " + argset);
    }
}

From source file:Main.VectorIR.java

static double cosine_similarity(Map<String, Double> v1, Map<String, Double> v2) {
    Set<String> both = Sets.newHashSet(v1.keySet());
    double score;

    both.retainAll(v2.keySet());/*from w ww .j a v a2 s  .  c  om*/
    double sclar = 0, norm1 = 0, norm2 = 0;
    for (String k : both)
        sclar += v1.get(k) * v2.get(k);
    for (String k : v1.keySet())
        norm1 += v1.get(k) * v1.get(k);
    for (String k : v2.keySet())
        norm2 += v2.get(k) * v2.get(k);
    score = sclar / Math.sqrt(norm1 * norm2);

    if (Double.isNaN(score)) {
        return 0.0;
    } else
        return score;
}

From source file:runtime.intrinsic._unique.java

public static ListValue invoke(final ListValue list) {
    final Set<?> set = Sets.newHashSet(list.iterator());
    return PersistentList.init(set.iterator(), set.size());
}

From source file:edu.cmu.lti.oaqa.framework.eval.retrieval.EvaluationHelper.java

public static Set<String> intersect(Set<String> docs, Set<String> gs) {
    Set<String> relevant = Sets.newHashSet(gs);
    relevant.retainAll(docs);/*w w w .j a v  a 2  s .  c  o m*/
    return relevant;
}

From source file:com.palantir.util.AssertUtils.java

public static <T> boolean assertListElementsUnique(List<T> l) {
    Set<T> set = Sets.newHashSet(l);
    assert set.size() == l.size();

    return true;//from ww w .  java2  s .  co m
}

From source file:com.textocat.textokit.morph.commons.SintagrusUtils.java

public static Set<String> mapToOpenCorpora(Set<String> srcSet) {
    srcSet = Sets.newHashSet(srcSet);
    Set<String> result = Sets.newHashSet();
    for (Submapper sm : submappers) {
        sm.apply(srcSet, result);/*w w w .  j a  v  a2 s.com*/
    }
    if (!srcSet.isEmpty()) {
        throw new IllegalArgumentException(String.format("Unknown grammemes: %s", srcSet));
    }
    return result;
}

From source file:ivorius.ivtoolkit.tools.Pairs.java

public static <T> Iterable<T> both(Iterable<? extends Pair<T, T>> set) {
    return Sets.newHashSet(Iterables.concat(Iterables.transform(set, Pairs.<T>leftFunction()),
            Iterables.transform(set, Pairs.<T>rightFunction())));
}