Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_TestBuckets.java

/**
 * Returns back all test sources that aren't marked as complete or errored
 * //from   www . j a  va 2s.  c om
 * @param source_test_db
 * @return
 */
protected CompletableFuture<List<TestQueueBean>> getAllTestSources(
        final ICrudService<TestQueueBean> source_test_db) {
    final QueryComponent<TestQueueBean> get_query = CrudUtils.allOf(TestQueueBean.class)
            .whenNot(TestQueueBean::status, TestStatus.completed)
            .whenNot(TestQueueBean::status, TestStatus.error); //can be complete | error | in_progress | submitted | {unset/anything else}

    final QueryComponent<TestQueueBean> update_query = CrudUtils.allOf(TestQueueBean.class)
            .whenNot(TestQueueBean::status, TestStatus.in_progress)
            .whenNot(TestQueueBean::status, TestStatus.completed)
            .whenNot(TestQueueBean::status, TestStatus.error); //can be complete | error | in_progress | submitted | {unset/anything else}

    final UpdateComponent<TestQueueBean> update_command = CrudUtils.update(TestQueueBean.class)
            .set(TestQueueBean::status, TestStatus.in_progress)
    // (don't set started_processing_on - only set that once the job has been launched)
    ;

    final CompletableFuture<List<TestQueueBean>> get_command = source_test_db.getObjectsBySpec(get_query)
            .thenApply(c -> StreamSupport.stream(c.spliterator(), false).collect(Collectors.toList()));

    return get_command.thenCompose(__ -> {
        return source_test_db.updateObjectsBySpec(update_query, Optional.of(false), update_command);
    }).thenApply(__ -> get_command.join()); // (ie return the original command but only once the update has completed)
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects,
        final boolean replace_if_present) {
    try {/*from www  . j a  v  a 2 s.  co m*/
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects");

        final BulkRequestBuilder brb = new_objects.stream()
                .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE)
                        .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy),
                        (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context),
                                Either.left(val), replace_if_present, true)),
                        (acc1, acc2) -> {
                            throw new RuntimeException("Internal logic error - Parallel not supported");
                        });

        final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() {
            // WARNING: mutable/imperative code ahead...
            long _curr_written = 0;
            List<Object> _id_list = null;
            HashMap<String, String> _mapping_failures = null;

            @Override
            public void accept(final BulkResponse result,
                    final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) {

                if (result.hasFailures() && (rw_context
                        .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) {
                    final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context
                            .typeContext();
                    // Recursive builder in case I need to build a second batch of docs                        
                    BulkRequestBuilder brb2 = null;

                    if (null == _id_list) {
                        _id_list = new LinkedList<Object>();
                    }
                    HashMap<String, String> temp_mapping_failures = null;
                    final Iterator<BulkItemResponse> it = result.iterator();
                    while (it.hasNext()) {
                        final BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            if (bir.getFailure().getMessage().startsWith("MapperParsingException")) {
                                final Set<String> fixed_type_fields = rw_context.typeContext()
                                        .fixed_type_fields();
                                if (!fixed_type_fields.isEmpty()) {
                                    // Obtain the field name from the exception (if we fail then drop the record) 
                                    final String field = getFieldFromParsingException(
                                            bir.getFailure().getMessage());
                                    if ((null == field) || fixed_type_fields.contains(field)) {
                                        continue;
                                    }
                                } //(else roll on to...)                                                

                                // OK this is the case where I might be able to apply auto types:
                                if (null == brb2) {
                                    brb2 = _state.client.prepareBulk()
                                            .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh(
                                                    CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy);
                                }
                                String failed_json = null;
                                if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request
                                    if (null == temp_mapping_failures) {
                                        temp_mapping_failures = new HashMap<String, String>();
                                    }
                                    final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId());
                                    if (ar instanceof IndexRequest) {
                                        IndexRequest ir = (IndexRequest) ar;
                                        failed_json = ir.source().toUtf8();
                                        temp_mapping_failures.put(bir.getId(), failed_json);
                                    }
                                } else { // have already grabbed all the failure _ids and stuck in a map
                                    failed_json = _mapping_failures.get(bir.getId());
                                }
                                if (null != failed_json) {
                                    brb2.add(singleObjectIndexRequest(
                                            Either.right(Tuples._2T(bir.getIndex(),
                                                    ElasticsearchContextUtils.getNextAutoType(
                                                            auto_context.getPrefix(), bir.getType()))),
                                            Either.right(Tuples._2T(bir.getId(), failed_json)), false, true));
                                }
                            }
                            // Ugh otherwise just silently fail I guess? 
                            //(should I also look for transient errors and resubmit them after a pause?!)
                        } else { // (this item worked)
                            _id_list.add(bir.getId());
                            _curr_written++;
                        }
                    }
                    if (null != brb2) { // found mapping errors to retry with
                        if (null == _mapping_failures) // (first level of recursion)
                            _mapping_failures = temp_mapping_failures;

                        // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw)
                        ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> {
                            future2.completeExceptionally(error);
                        });
                    } else { // relative success, plus we've built the list anyway
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                } else { // No errors with this iteration of the bulk request         
                    _curr_written += result.getItems().length;

                    if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed
                        final Supplier<List<Object>> get_objects = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).map(bir -> bir.getId())
                                    .collect(Collectors.toList());
                        };
                        final Supplier<Long> get_count_workaround = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).collect(Collectors.counting());
                        };
                        get_count_workaround.get();
                        future.complete(Tuples._2T(get_objects, get_count_workaround));
                    } else { // have already calculated everything so just return it                     
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                }
            }
        };

        return ElasticsearchFutureUtils.wrap(brb.execute(),
                new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler,
                (error, future) -> {
                    future.completeExceptionally(error);
                });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.ikanow.aleph2.data_import.services.HarvestContext.java

@Override
public CompletableFuture<Map<String, String>> getHarvestLibraries(final Optional<DataBucketBean> bucket) {
    if (_state_name == State.IN_TECHNOLOGY) {

        final DataBucketBean my_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

        final SingleQueryComponent<SharedLibraryBean> tech_query = CrudUtils.anyOf(SharedLibraryBean.class)
                .when(SharedLibraryBean::_id, my_bucket.harvest_technology_name_or_id())
                .when(SharedLibraryBean::path_name, my_bucket.harvest_technology_name_or_id());

        final List<SingleQueryComponent<SharedLibraryBean>> other_libs = Optionals
                .ofNullable(my_bucket.harvest_configs()).stream()
                .flatMap(hcfg -> Optionals.ofNullable(hcfg.library_names_or_ids()).stream()).map(name -> {
                    return CrudUtils.anyOf(SharedLibraryBean.class).when(SharedLibraryBean::_id, name)
                            .when(SharedLibraryBean::path_name, name);
                }).collect(Collector.of(LinkedList::new, LinkedList::add, (left, right) -> {
                    left.addAll(right);/* w  w w.java  2 s.c  o  m*/
                    return left;
                }));

        @SuppressWarnings("unchecked")
        final MultiQueryComponent<SharedLibraryBean> spec = CrudUtils.<SharedLibraryBean>anyOf(tech_query,
                other_libs.toArray(new SingleQueryComponent[other_libs.size()]));

        // Get the names or ids, get the shared libraries, get the cached ids (must be present)

        return this._core_management_db.readOnlyVersion().getSharedLibraryStore()
                .getObjectsBySpec(spec, Arrays.asList(JsonUtils._ID, "path_name"), true).thenApply(cursor -> {
                    return StreamSupport.stream(cursor.spliterator(), false)
                            .collect(Collectors.<SharedLibraryBean, String, String>toMap(lib -> lib.path_name(),
                                    lib -> _globals.local_cached_jar_dir() + "/"
                                            + JarCacheUtils.buildCachedJarName(lib)));
                });
    } else {
        throw new RuntimeException(ErrorUtils.TECHNOLOGY_NOT_MODULE);
    }
}

From source file:org.onosproject.p4runtime.ctl.P4RuntimeClientImpl.java

private Collection<PiMeterCellConfig> doReadMeterEntities(Collection<Entity> entitiesToRead,
        PiPipeconf pipeconf) {/*from   www  . j  a va2s  .c  o m*/

    if (entitiesToRead.size() == 0) {
        return Collections.emptyList();
    }

    final ReadRequest request = ReadRequest.newBuilder().setDeviceId(p4DeviceId).addAllEntities(entitiesToRead)
            .build();

    final Iterable<ReadResponse> responses;
    try {
        responses = () -> blockingStub.read(request);
    } catch (StatusRuntimeException e) {
        log.warn("Unable to read meter cells: {}", e.getMessage());
        log.debug("exception", e);
        return Collections.emptyList();
    }

    List<Entity> responseEntities = StreamSupport.stream(responses.spliterator(), false)
            .map(ReadResponse::getEntitiesList).flatMap(List::stream).collect(Collectors.toList());

    return MeterEntryCodec.decodeMeterEntities(responseEntities, pipeconf);
}

From source file:org.hawkular.inventory.impl.tinkerpop.test.BasicTest.java

@Test
public void testEnvironments() throws Exception {
    BiFunction<String, String, Void> test = (tenantId, id) -> {
        GremlinPipeline<Graph, Vertex> q = new GremlinPipeline<Graph, Vertex>(graph).V().has("__type", "tenant")
                .has("__eid", tenantId).out("contains").has("__type", "environment").has("__eid", id)
                .cast(Vertex.class);

        Iterator<Vertex> envs = q.iterator();
        assert envs.hasNext();
        envs.next();/*w w w . jav  a  2 s . co m*/
        assert !envs.hasNext();

        //query, we should get the same results
        Environment env = inventory.tenants().get(tenantId).environments().get(id).entity();
        assert env.getId().equals(id);

        return null;
    };

    test.apply("com.acme.tenant", "production");
    test.apply("com.example.tenant", "test");

    GraphQuery query = graph.query().has("__type", "environment");
    assert StreamSupport.stream(query.vertices().spliterator(), false).count() == 2;
}

From source file:org.hawkular.inventory.impl.tinkerpop.test.BasicTest.java

@Test
public void testResourceTypes() throws Exception {
    BiFunction<String, String, Void> test = (tenantId, id) -> {
        GremlinPipeline<Graph, Vertex> q = new GremlinPipeline<Graph, Vertex>(graph).V().has("__type", "tenant")
                .has("__eid", tenantId).out("contains").has("__type", "resourceType").has("__eid", id)
                .has("__version", "1.0").cast(Vertex.class);

        assert q.hasNext();

        ResourceType rt = inventory.tenants().get(tenantId).resourceTypes().get(id).entity();
        assert rt.getId().equals(id);

        return null;
    };/*  w w  w. j a v  a 2  s . c  om*/

    test.apply("com.acme.tenant", "URL");
    test.apply("com.example.tenant", "Kachna");
    test.apply("com.example.tenant", "Playroom");

    GraphQuery query = graph.query().has("__type", "resourceType");
    assert StreamSupport.stream(query.vertices().spliterator(), false).count() == 3;
}

From source file:org.hawkular.inventory.impl.tinkerpop.test.BasicTest.java

@Test
public void testMetricDefinitions() throws Exception {
    BiFunction<String, String, Void> test = (tenantId, id) -> {

        GremlinPipeline<Graph, Vertex> q = new GremlinPipeline<Graph, Vertex>(graph).V().has("__type", "tenant")
                .has("__eid", tenantId).out("contains").has("__type", "metricType").has("__eid", id)
                .cast(Vertex.class);

        assert q.hasNext();

        MetricType md = inventory.tenants().get(tenantId).metricTypes().get(id).entity();
        assert md.getId().equals(id);

        return null;
    };/*from  w w  w .ja v  a 2  s . c o m*/

    test.apply("com.acme.tenant", "ResponseTime");
    test.apply("com.example.tenant", "Size");

    GraphQuery query = graph.query().has("__type", "metricType");
    assert StreamSupport.stream(query.vertices().spliterator(), false).count() == 2;
}

From source file:com.complexible.pinto.RDFMapper.java

/**
 * Get or generate an rdf:ID for the given object
 * @param theT  the object/*w ww.  ja  v  a  2s  .  c  om*/
 * @return      the rdf:ID
 */
private <T> Resource id(final T theT) {
    if (theT instanceof Identifiable) {
        Identifiable aIdentifiable = (Identifiable) theT;

        if (aIdentifiable.id() != null) {
            return aIdentifiable.id();
        }
    }

    final Iterable<String> aProps = () -> StreamSupport
            .stream(Beans.getDeclaredMethods(theT.getClass()).spliterator(), false)
            .filter(Methods.annotated(RdfId.class)).map(Methods.property()).iterator();

    // Sort the properties so they're always iterated over in the same order.  since the hash is sensitive
    // to iteration order, the same inputs but in a different order yields a different hashed value, and thus
    // a different ID, even though it's the *same* resource.
    final List<String> aSorted = Ordering.natural().sortedCopy(aProps);

    Resource aId = null;
    if (!Iterables.isEmpty(aSorted)) {
        Hasher aFunc = Hashing.md5().newHasher();
        for (String aProp : aSorted) {
            try {
                final Object aValue = PropertyUtils.getProperty(theT, aProp);

                if (aValue == null) {
                    continue;
                }

                aFunc.putString(aValue.toString(), Charsets.UTF_8);
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e, RDFMappingException.class);
                throw new RDFMappingException(e);
            }
        }

        aId = mValueFactory.createIRI(mDefaultNamespace + aFunc.hash().toString());
    }

    for (Map.Entry<Class<?>, Function<Object, Resource>> aEntry : mIdFunctions.entrySet()) {
        if (aEntry.getKey().isAssignableFrom(theT.getClass())) {
            aId = aEntry.getValue().apply(theT);
            break;
        }
    }

    if (aId == null && mMappingOptions.is(MappingOptions.REQUIRE_IDS)) {
        throw new UnidentifiableObjectException(
                String.format(
                        "No identifier was found for %s!  The instance should "
                                + "implement Identifiable, have one or more properties "
                                + "annotated with @RdfId, or have an id function provided " + "to the mapper.",
                        theT));
    } else {
        if (aId == null) {
            aId = mValueFactory.createIRI(mDefaultNamespace
                    + Hashing.md5().newHasher().putString(theT.toString(), Charsets.UTF_8).hash().toString());
        }

        if (theT instanceof Identifiable) {
            ((Identifiable) theT).id(aId);
        }

        return aId;
    }
}

From source file:com.hotelbeds.distribution.hotel_api_sdk.HotelApiClient.java

private <T> List<T> getAllElements(final String language, final boolean useSecondaryLanguage, ContentType type)
        throws HotelApiSDKException {
    try {//w  w w .  j  a  va  2  s.  co m
        return StreamSupport
                .stream(new ContentElementSpliterator<T>(this, type,
                        generateDefaultFullRequest(language, useSecondaryLanguage, type)), false)
                .collect(Collectors.toList());
    } catch (InstantiationException | IllegalAccessException e) {
        throw new HotelApiSDKException(
                new HotelbedsError("SDK Configuration error", e.getCause().getMessage()));
    }
}

From source file:org.hawkular.inventory.impl.tinkerpop.test.BasicTest.java

@Test
public void testMetrics() throws Exception {
    TetraFunction<String, String, String, String, Void> test = (tenantId, environmentId, metricDefId, id) -> {
        GremlinPipeline<Graph, Vertex> q = new GremlinPipeline<Graph, Vertex>(graph).V().has("__type", "tenant")
                .has("__eid", tenantId).out("contains").has("__type", "environment").has("__eid", environmentId)
                .out("contains").has("__type", "metric").has("__eid", id).as("metric").in("defines")
                .has("__type", "metricType").has("__eid", metricDefId).back("metric").cast(Vertex.class);

        assert q.hasNext();

        Metric m = inventory.tenants().get(tenantId).environments().get(environmentId).feedlessMetrics()
                .getAll(Defined.by(new MetricType(tenantId, metricDefId)), With.id(id)).entities().iterator()
                .next();//  w  w  w. ja v a 2 s .c  o  m
        assert m.getId().equals(id);

        return null;
    };

    test.apply("com.acme.tenant", "production", "ResponseTime", "host1_ping_response");
    test.apply("com.example.tenant", "test", "Size", "playroom1_size");
    test.apply("com.example.tenant", "test", "Size", "playroom2_size");

    GraphQuery query = graph.query().has("__type", "metric");
    Assert.assertEquals(4, StreamSupport.stream(query.vertices().spliterator(), false).count());
}