Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:org.dllearner.algorithms.qtl.experiments.PRConvergenceExperiment.java

public static void main(String[] args) throws Exception {
    StringRenderer.setRenderer(Rendering.DL_SYNTAX);
    Logger.getLogger(PRConvergenceExperiment.class)
            .addAppender(new FileAppender(new SimpleLayout(), "log/qtl-qald.log", false));
    Logger.getRootLogger().setLevel(Level.INFO);
    Logger.getLogger(QTL2Disjunctive.class).setLevel(Level.INFO);
    Logger.getLogger(PRConvergenceExperiment.class).setLevel(Level.INFO);
    Logger.getLogger(QueryExecutionFactoryCacheEx.class).setLevel(Level.INFO);

    OptionParser parser = new OptionParser();
    OptionSpec<String> datasetSpec = parser.accepts("dataset", "possible datasets: QALD4-Bio or QALD6-DBpedia")
            .withRequiredArg().ofType(String.class).required();
    OptionSpec<File> benchmarkDirectorySpec = parser.accepts("d", "base directory").withRequiredArg()
            .ofType(File.class).required();
    OptionSpec<File> queriesFileSpec = parser.accepts("q", "processed queries file").withRequiredArg()
            .ofType(File.class);
    OptionSpec<URL> endpointURLSpec = parser.accepts("e", "endpoint URL").withRequiredArg().ofType(URL.class)
            .required();//from   w ww.  j a v a2s  .  com
    OptionSpec<String> defaultGraphSpec = parser.accepts("g", "default graph").withRequiredArg()
            .ofType(String.class);
    OptionSpec<Boolean> overrideSpec = parser.accepts("o", "override previous results").withOptionalArg()
            .ofType(Boolean.class).defaultsTo(Boolean.FALSE);
    OptionSpec<Boolean> write2DBSpec = parser.accepts("db", "write to database").withOptionalArg()
            .ofType(Boolean.class).defaultsTo(Boolean.FALSE);
    OptionSpec<Boolean> emailNotificationSpec = parser.accepts("mail", "enable email notification")
            .withOptionalArg().ofType(Boolean.class).defaultsTo(Boolean.FALSE);
    OptionSpec<Integer> maxNrOfQueriesSpec = parser.accepts("max-queries", "max. nr. of processed queries")
            .withRequiredArg().ofType(Integer.class).defaultsTo(-1);
    OptionSpec<Integer> maxTreeDepthSpec = parser
            .accepts("max-tree-depth", "max. depth of processed queries and generated trees").withRequiredArg()
            .ofType(Integer.class).defaultsTo(2);
    OptionSpec<Integer> maxQTLRuntimeSpec = parser.accepts("max-qtl-runtime", "max. runtime of each QTL run")
            .withRequiredArg().ofType(Integer.class).defaultsTo(60);
    OptionSpec<Integer> nrOfThreadsSpec = parser
            .accepts("thread-count", "number of threads used for parallel evaluation").withRequiredArg()
            .ofType(Integer.class).defaultsTo(1);

    OptionSpec<String> exampleIntervalsSpec = parser
            .accepts("examples", "comma-separated list of number of examples used in evaluation")
            .withRequiredArg().ofType(String.class).defaultsTo("");
    OptionSpec<String> noiseIntervalsSpec = parser
            .accepts("noise", "comma-separated list of noise values used in evaluation").withRequiredArg()
            .ofType(String.class).defaultsTo("");
    OptionSpec<String> measuresSpec = parser
            .accepts("measures", "comma-separated list of measures used in evaluation").withRequiredArg()
            .ofType(String.class);

    OptionSpec<String> queriesToOmitTokensSpec = parser
            .accepts("omitTokens",
                    "comma-separated list of tokens such that queries containing any of them will be omitted")
            .withRequiredArg().ofType(String.class).defaultsTo("");
    OptionSpec<String> queriesToProcessTokensSpec = parser
            .accepts("processTokens",
                    "comma-separated list of tokens such that queries containing any of them will be omitted")
            .withRequiredArg().ofType(String.class).defaultsTo("");

    OptionSpec<String> databaseNameSpec = parser.accepts("dbName", "database name").withRequiredArg()
            .ofType(String.class);

    OptionSpec<String> cbdSpec = parser.accepts("cbd", "CBD structure tree string").withRequiredArg()
            .ofType(String.class);
    OptionSpec<Boolean> workaroundSpec = parser.accepts("workaround", "Virtuoso parse error workaround enabled")
            .withRequiredArg().ofType(Boolean.class).defaultsTo(Boolean.FALSE);

    OptionSet options = null;
    try {
        options = parser.parse(args);
    } catch (Exception e) {
        System.err.println(e.getMessage());
        parser.printHelpOn(System.out);
        System.exit(0);
    }

    File benchmarkDirectory = options.valueOf(benchmarkDirectorySpec);
    boolean write2DB = options.valueOf(write2DBSpec);
    boolean override = options.valueOf(overrideSpec);
    boolean useEmailNotification = options.valueOf(emailNotificationSpec);
    URL endpointURL = options.valueOf(endpointURLSpec);
    String defaultGraph = options.has(defaultGraphSpec) ? options.valueOf(defaultGraphSpec) : null;
    SparqlEndpoint endpoint = SparqlEndpoint.create(endpointURL.toString(), defaultGraph);
    int maxNrOfQueries = options.valueOf(maxNrOfQueriesSpec);
    int maxTreeDepth = options.valueOf(maxTreeDepthSpec);
    int maxQTLRuntime = options.valueOf(maxQTLRuntimeSpec);
    int nrOfThreads = options.valueOf(nrOfThreadsSpec);

    File queriesFile = null;
    if (options.has(queriesFileSpec)) {
        queriesFile = options.valueOf(queriesFileSpec);
    }

    int[] exampleInterval = null;
    if (options.has(exampleIntervalsSpec)) {
        exampleInterval = StreamSupport
                .stream(Splitter.on(',').omitEmptyStrings().trimResults()
                        .split(options.valueOf(exampleIntervalsSpec)).spliterator(), false)
                .map(Integer::valueOf).mapToInt(Integer::intValue).toArray();
    }

    double[] noiseInterval = null;
    if (options.has(noiseIntervalsSpec)) {
        noiseInterval = StreamSupport
                .stream(Splitter.on(',').omitEmptyStrings().trimResults()
                        .split(options.valueOf(noiseIntervalsSpec)).spliterator(), false)
                .map(Double::valueOf).mapToDouble(Double::doubleValue).toArray();
    }

    HeuristicType[] measures = null;
    if (options.has(measuresSpec)) {
        String s = options.valueOf(measuresSpec);
        String[] split = s.split(",");
        measures = new HeuristicType[split.length];
        for (int i = 0; i < split.length; i++) {
            if (split[i].equalsIgnoreCase("mcc")) {
                measures[i] = HeuristicType.MATTHEWS_CORRELATION;
            } else {
                measures[i] = HeuristicType.valueOf(split[i].toUpperCase());
            }
        }
    }

    List<String> omitTokens = Splitter.on(",").omitEmptyStrings().trimResults()
            .splitToList(options.valueOf(queriesToOmitTokensSpec));
    List<String> processTokens = Splitter.on(",").omitEmptyStrings().trimResults()
            .splitToList(options.valueOf(queriesToProcessTokensSpec));

    //      EvaluationDataset dataset = new DBpediaEvaluationDataset(benchmarkDirectory, endpoint, queriesFile);
    String datasetName = options.valueOf(datasetSpec);
    EvaluationDataset dataset;
    if (datasetName.equals("QALD4-Bio")) {
        dataset = new QALD4BiomedicalChallengeEvaluationDataset(benchmarkDirectory);
    } else if (datasetName.equals("QALD6-DBpedia")) {
        dataset = new QALD6DBpediaEvaluationDataset(benchmarkDirectory);
    } else {
        throw new RuntimeException("Unsupported dataset:" + datasetName);
    }

    String databaseName = options.valueOf(databaseNameSpec);

    CBDStructureTree cbdStructureTree = options.has(options.valueOf(cbdSpec))
            ? CBDStructureTree.fromTreeString(options.valueOf(cbdSpec).trim())
            : null;

    PRConvergenceExperiment eval = new PRConvergenceExperiment(dataset, benchmarkDirectory, write2DB,
            databaseName, override, maxQTLRuntime, useEmailNotification, nrOfThreads);
    eval.setQueriesToOmitTokens(omitTokens);
    eval.setQueriesToProcessTokens(processTokens);
    eval.setDatabaseName(databaseName);
    eval.setDefaultCbdStructure(cbdStructureTree);
    eval.setWorkaroundEnabled(options.valueOf(workaroundSpec), endpoint);
    eval.run(maxNrOfQueries, maxTreeDepth, exampleInterval, noiseInterval, measures);

    //      new QALDExperiment(Dataset.BIOMEDICAL).run();
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.TestElasticsearchCrudService.java

public void test_JsonRepositoryCalls_common(final ICrudService<JsonNode> service,
        ICrudService<JsonNode> original) throws InterruptedException, ExecutionException {

    // Single object get

    final Future<Optional<JsonNode>> obj1 = service.getObjectById("id1");

    assertEquals(/*from   w  w w  .  ja v a  2s  .co  m*/
            "{\"_index\":\"testjsonrepositorycalls\",\"_type\":\"test\",\"test_string\":\"test_string1\",\"_id\":\"id1\",\"test_long\":1}",
            obj1.get().get().toString());

    // Multi object get

    final QueryComponent<JsonNode> query_2 = CrudUtils.allOf().rangeAbove("test_string", "test_string4", false)
            .withPresent("test_long").orderBy(Tuples._2T("test_long", 1)).limit(4);

    try (Cursor<JsonNode> cursor = service.getObjectsBySpec(query_2, Arrays.asList("test_string"), false)
            .get()) {

        assertEquals(6, cursor.count()); // (count ignores limit)

        final List<JsonNode> objs = StreamSupport.stream(Optionals.ofNullable(cursor).spliterator(), false)
                .collect(Collectors.toList());

        assertEquals(4, objs.size());

        assertEquals(
                "{\"_index\":\"testjsonrepositorycalls\",\"_type\":\"test\",\"_id\":\"id4\",\"test_long\":4}",
                objs.get(0).toString());
    } catch (Exception e) {
        //DEBUG
        //e.printStackTrace();
        //(fail on close, normally carry on - but here error out)
        fail("getObjectsBySpec errored on close: " + ErrorUtils.getLongForm("{0}", e));
    }

    // Delete

    assertEquals(10L, (long) service.countObjects().get());

    final QueryComponent<JsonNode> query_5b = CrudUtils.allOf().rangeAbove("test_string", "test_string4", false)
            .withPresent("test_long").orderBy(Tuples._2T("test_long", 1)).limit(4);

    assertEquals(4L, (long) service.deleteObjectsBySpec(query_5b).get());
    for (int i = 0; i < 5000L; i += 250) {
        if (0L == service.countObjects().join().longValue()) {
            System.out.println("(objects deleted after " + i + " ms)");
            break;
        }
        Thread.sleep(250L);
    }
    assertEquals(6L, service.countObjects().join().longValue());

    //TODO: also need to do an update and a findAndModify
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.TestElasticsearchCrudService.java

@Test
public void test_checkMaxIndexSize_createAliases() throws InterruptedException, ExecutionException {
    final ElasticsearchCrudService<TestBean> service = getTestService("test_checkMaxIndexSize", TestBean.class,
            false, true, Optional.empty(), Optional.of(0L), true);

    // 1) Write a doc and check that it is written to the base index (delete first to check that case)

    assertEquals(0, service.countObjects().get().intValue());

    // 1) Add a new object to an empty DB
    {//from  ww w  .ja  va 2  s . co  m
        final TestBean test = new TestBean();
        test._id = "_id_1";
        test.test_string = "test_string_1";

        final Future<Supplier<Object>> result = service.storeObject(test);
        result.get();

        // Should have been added to the base index

        IndicesStatsResponse stats = service._state.client.admin().indices()
                .prepareStats("test_checkmaxindexsize*").setStore(true).setDocs(true).execute().actionGet();

        assertEquals(1, stats.getIndices().size());
        assertTrue("Base index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize"));
        assertEquals(1L, stats.getIndex("test_checkmaxindexsize").getTotal().getDocs().getCount());
    }

    // 2) Add another object, check that it adds it to the same index
    {
        final TestBean test = new TestBean();
        test._id = "_id_2";
        test.test_string = "test_string_2";

        final Future<Supplier<Object>> result = service.storeObject(test);
        result.get();

        // Should have been added to the base index

        IndicesStatsResponse stats = service._state.client.admin().indices()
                .prepareStats("test_checkmaxindexsize*").setStore(true).setDocs(true).execute().actionGet();

        assertEquals(1, stats.getIndices().size());
        assertTrue("Base index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize"));
        assertEquals(2L, stats.getIndex("test_checkmaxindexsize").getTotal().getDocs().getCount());
    }

    // 3+4) Now wait 10s to be up, add more objects, check that they gets added to another index
    // (add 2, the first one should go on the old one, the second after a wait should go on the new one)

    Thread.sleep(11000L);
    {
        //(First off, back from 2) .. check that we now have an alias for test_checkmaxindexsize)
        //(for some reason prepareGetAliases didn't work, but this does)
        ClusterStateResponse csr = service._state.client.admin().cluster().prepareState()
                .setIndices("test_checkmaxindexsize*").setRoutingTable(false).setNodes(false)
                .setListenerThreaded(false).get();
        assertEquals(1, csr.getState().getMetaData().aliases().size());
        assertTrue("Found an alias for test_checkmaxindexsize",
                null != csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize"));
        assertEquals(1, csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize").size());
        assertEquals("test_checkmaxindexsize",
                csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize").keysIt().next());

        final TestBean test = new TestBean();
        test._id = "_id_3";
        test.test_string = "test_string_3";
        final TestBean test2 = new TestBean();
        test2._id = "_id_4";
        test2.test_string = "test_string_4";

        final Future<Supplier<Object>> result = service.storeObject(test);
        result.get();
        Thread.sleep(1000L);
        final Future<Supplier<Object>> result2 = service.storeObject(test2);
        result2.get();

        // Should have been added to the base index

        IndicesStatsResponse stats = service._state.client.admin().indices()
                .prepareStats("test_checkmaxindexsize*").setStore(true).setDocs(true).execute().actionGet();

        assertEquals(2, stats.getIndices().size());
        assertTrue("Base index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize"));
        assertEquals(3L, stats.getIndex("test_checkmaxindexsize").getTotal().getDocs().getCount());
        assertTrue("Second index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize_1"));
        assertEquals(1L, stats.getIndex("test_checkmaxindexsize_1").getTotal().getDocs().getCount());
    }

    // 5) Get a new context for the same service with a larger size, check that it writes to an existing one
    {
        final ElasticsearchCrudService<TestBean> service2 = getTestService("test_checkMaxIndexSize",
                TestBean.class, false, false, Optional.empty(), Optional.of(100L), true);
        final TestBean test = new TestBean();
        test._id = "_id_5";
        test.test_string = "test_string_5";

        final Future<Supplier<Object>> result = service2.storeObject(test);
        result.get();
        Thread.sleep(1000L);

        IndicesStatsResponse stats = service._state.client.admin().indices()
                .prepareStats("test_checkmaxindexsize*").setStore(true).setDocs(true).execute().actionGet();

        assertEquals(2, stats.getIndices().size());
        assertTrue("Base index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize"));
        assertEquals(4L, stats.getIndex("test_checkmaxindexsize").getTotal().getDocs().getCount());
        assertTrue("Second index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize_1"));
        assertEquals(1L, stats.getIndex("test_checkmaxindexsize_1").getTotal().getDocs().getCount());
    }

    // 6) Get a new context for the same service with the same zero size, check that it writes to a new one (immediately)
    // (also don't create alias for this one)
    {
        final ElasticsearchCrudService<TestBean> service2 = getTestService("test_checkMaxIndexSize",
                TestBean.class, false, false, Optional.empty(), Optional.of(0L), false);
        final TestBean test = new TestBean();
        test._id = "_id_6";
        test.test_string = "test_string_6";

        final Future<Supplier<Object>> result = service2.storeObject(test);
        result.get();
        Thread.sleep(1000L);

        IndicesStatsResponse stats = service._state.client.admin().indices()
                .prepareStats("test_checkmaxindexsize*").setStore(true).setDocs(true).execute().actionGet();

        assertEquals(3, stats.getIndices().size());
        assertTrue("Base index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize"));
        assertEquals(4L, stats.getIndex("test_checkmaxindexsize").getTotal().getDocs().getCount());
        assertTrue("Second index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize_1"));
        assertEquals(1L, stats.getIndex("test_checkmaxindexsize_1").getTotal().getDocs().getCount());
        assertTrue("Third index: " + stats.getIndices().keySet(),
                null != stats.getIndex("test_checkmaxindexsize_2"));
        assertEquals(1L, stats.getIndex("test_checkmaxindexsize_2").getTotal().getDocs().getCount());
    }

    // (4/5/6) - Check aliases:
    Thread.sleep(3000L);
    ClusterStateResponse csr = service._state.client.admin().cluster().prepareState()
            .setIndices("test_checkmaxindexsize*").setRoutingTable(false).setNodes(false)
            .setListenerThreaded(false).get();
    assertEquals(1, csr.getState().getMetaData().aliases().size());
    assertTrue("Found an alias for test_checkmaxindexsize",
            null != csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize"));
    assertEquals(2, csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize").size());
    assertEquals("test_checkmaxindexsize:test_checkmaxindexsize_1", StreamSupport.stream(
            csr.getState().getMetaData().getAliases().get("r__test_checkmaxindexsize").keys().spliterator(),
            false).map(x -> x.value).sorted().collect(Collectors.joining(":")));

    // 7) Check that delete datastore removes all the indexes in the context

    service.deleteDatastore().get();

    IndicesStatsResponse stats = service._state.client.admin().indices().prepareStats("test_checkmaxindexsize*")
            .setStore(true).setDocs(true).execute().actionGet();

    assertEquals(0, stats.getIndices().size());
}

From source file:com.joyent.manta.client.MantaClient.java

/**
 * Gets all of the Manta jobs' IDs as a real-time {@link Stream} from
 * the Manta API. <strong>Make sure to close this stream when you are done with
 * otherwise the HTTP socket will remain open.</strong>
 *
 * @return a stream with all of the job IDs (actually all that Manta will give us)
 *///  ww w  .ja va  2s  .  com
public Stream<UUID> getAllJobIds() {
    final String path = String.format("%s/jobs", config.getMantaHomeDirectory());

    final MantaDirectoryListingIterator itr = new MantaDirectoryListingIterator(path, httpHelper, MAX_RESULTS);

    danglingStreams.add(itr);

    Stream<Map<String, Object>> backingStream = StreamSupport
            .stream(Spliterators.spliteratorUnknownSize(itr, Spliterator.ORDERED | Spliterator.NONNULL), false);

    return backingStream.map(item -> {
        final String id = Objects.toString(item.get("name"));
        return UUID.fromString(id);
    });
}

From source file:com.evolveum.midpoint.gui.api.util.WebComponentUtil.java

public static boolean isAllNulls(Iterable<?> array) {
    return StreamSupport.stream(array.spliterator(), true).allMatch(o -> o == null);
}

From source file:org.kie.workbench.common.dmn.backend.DMNMarshallerTest.java

private static Node<View, ?> nodeOfDefinition(final Iterator<Node<View, ?>> nodesIterator, final Class aClass) {
    return StreamSupport.stream(Spliterators.spliteratorUnknownSize(nodesIterator, Spliterator.NONNULL), false)
            .filter(node -> aClass.isInstance(node.getContent().getDefinition())).findFirst().get();
}