Example usage for java.util.stream Stream empty

List of usage examples for java.util.stream Stream empty

Introduction

In this page you can find the example usage for java.util.stream Stream empty.

Prototype

public static <T> Stream<T> empty() 

Source Link

Document

Returns an empty sequential Stream .

Usage

From source file:com.ikanow.aleph2.graph.titan.utils.TitanGraphBuildingUtils.java

/** Calls user merge on the various possibly duplicate elements, and sorts out user responses
 * @param tx/*from ww w.j a  v  a  2  s. com*/
 * @param config
 * @param security_service
 * @param logger
 * @param maybe_merger
 * @param titan_mapper
 * @param element_type
 * @param key
 * @param new_elements
 * @param existing_elements
 * @return
 */
protected static <O extends Element> List<O> invokeUserMergeCode(final TitanTransaction tx,
        final GraphSchemaBean config, final Tuple2<String, ISecurityService> security_service,
        final Optional<IBucketLogger> logger,
        final Optional<Tuple2<IEnrichmentBatchModule, GraphMergeEnrichmentContext>> maybe_merger,
        final org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper titan_mapper,
        final MutableStatsBean mutable_stats, final Class<O> element_type, final String bucket_path,
        final ObjectNode key, final Collection<ObjectNode> new_elements, final Collection<O> existing_elements,
        final Map<ObjectNode, Vertex> mutable_existing_vertex_store) {
    if (existing_elements.isEmpty() && (1 == new_elements.size()) && !config.custom_finalize_all_objects()) {

        return validateUserElement(new_elements.stream().findFirst().get(), config)
                .bind(el -> addGraphSON2Graph(bucket_path, key, el, mutable_existing_vertex_store,
                        Collections.emptyMap(), tx, element_type, mutable_stats))
                .<List<O>>validation(fail -> {
                    if (Vertex.class.isAssignableFrom(element_type))
                        mutable_stats.vertex_errors++;
                    else if (Edge.class.isAssignableFrom(element_type))
                        mutable_stats.edge_errors++;

                    logger.ifPresent(l -> l.inefficientLog(Level.DEBUG,
                            BeanTemplateUtils.clone(fail)
                                    .with(BasicMessageBean::source, "GraphBuilderEnrichmentService")
                                    .with(BasicMessageBean::command, "system.onObjectBatch").done()));
                    //(keep this here for c/p purposes .. if want to attach an expensive "details" object then could do that by copying the fields across one by one)                        
                    //                        logger.ifPresent(l -> l.log(Level.DEBUG,                        
                    //                              ErrorUtils.lazyBuildMessage(true, () -> "GraphBuilderEnrichmentService", 
                    //                                    () -> "system.onObjectBatch", 
                    //                                    () -> null, 
                    //                                    () -> ErrorUtils.get("MESSAGE", params),
                    //                                    () -> null)
                    //                                    ));                  

                    return Collections.emptyList();
                }, success -> Arrays.<O>asList(success));
    } else {
        // (just gives me the elements indexed by their ids so we can get them back again later)
        // (we'll convert to string as a slightly inefficient way of ensuring the same code can handle both edge andv vertex cases)
        final Map<String, Optional<O>> mutable_existing_element_vs_id_store = existing_elements.stream()
                .collect(Collectors.toMap(e -> e.id().toString(), e -> Optional.of(e)));

        return maybe_merger.<List<O>>map(merger -> {

            final Stream<Tuple2<Long, IBatchRecord>> in_stream = Stream.concat(
                    new_elements.stream().map(j -> Tuples._2T(0L, new BatchRecordUtils.JsonBatchRecord(j))),
                    existing_elements.stream().sorted((a, b) -> ((Long) a.id()).compareTo((Long) b.id())) // (ensure first found element has the lowest id)
                            .map(v -> _mapper.convertValue(titan_mapper.convertValue(v, Map.class),
                                    JsonNode.class))
                            .map(j -> Tuples._2T(0L, new BatchRecordUtils.InjectedJsonBatchRecord(j))));

            merger._2().initializeMerge(element_type);

            merger._1().onObjectBatch(in_stream, Optional.of(new_elements.size()), Optional.of(key));

            return merger._2().getAndResetElementList().stream()
                    .map(o -> addGraphSON2Graph(bucket_path, key, o, mutable_existing_vertex_store,
                            mutable_existing_element_vs_id_store, tx, element_type, mutable_stats))
                    .<O>flatMap(v -> v.validation(fail -> {
                        if (Vertex.class.isAssignableFrom(element_type))
                            mutable_stats.vertex_errors++;
                        else if (Edge.class.isAssignableFrom(element_type))
                            mutable_stats.edge_errors++;

                        logger.ifPresent(l -> l.inefficientLog(Level.DEBUG,
                                BeanTemplateUtils.clone(fail)
                                        .with(BasicMessageBean::source, "GraphBuilderEnrichmentService")
                                        .with(BasicMessageBean::command, "system.onObjectBatch").done()));

                        return Stream.empty();
                    }, success -> Stream.of(success))).collect(Collectors.toList());
        }).orElse(Collections.emptyList());
    }
}

From source file:org.alfresco.permissions.dao.titan.TitanPermissionsDAO.java

private Optional<Properties> getNodePropertiesImpl(Vertex nodeVertex) {
    Optional<Properties> properties = graph.traversal().V(nodeVertex).out("model").map(mv -> {
        Model model = new Model(mv.get().properties());
        List<Property> propsList = model.getProperties().entrySet().stream().flatMap(p -> {
            String propertyName = p.getKey();
            VertexProperty<Object> vp = nodeVertex.property(propertyName);
            return vp.isPresent() ? Stream.of(new Property(propertyName, vp.value())) : Stream.empty();
        }).collect(Collectors.toList());
        Properties props = new Properties(propsList);
        return props;
    }).tryNext();//from w  w  w.  j a  va 2s .  com
    return properties;
}

From source file:com.ggvaidya.scinames.dataset.BinomialChangesSceneController.java

private void calculateAllBinomialChanges() {
    potentialChanges.clear();/*from   w w  w. j  a v  a2s  .c o  m*/
    changesByPotentialChange.clear();

    Dataset prevDataset = null;
    for (Dataset ds : project.getDatasets()) {
        if (prevDataset == null) {
            prevDataset = ds;
            continue;
        }

        // Step 1. Figure out which binomial names were added and removed.
        Set<Name> binomialNamesInPrev = prevDataset.getRecognizedNames(project).flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());
        Set<Name> binomialNamesInCurrent = ds.getRecognizedNames(project).flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());

        Set<Name> namesAdded = new HashSet<>(binomialNamesInCurrent);
        namesAdded.removeAll(binomialNamesInPrev);

        Set<Name> namesDeleted = new HashSet<>(binomialNamesInPrev);
        namesDeleted.removeAll(binomialNamesInCurrent);

        // Step 2. Map all changes involving binomial name changes to the
        // binomial names they involve.
        // 
        // Note that this means deliberately skipping changes that *don't* affect
        // binomial composition, such as if a form or variety is deleted but that
        // doesn't result in the binomial name changing.
        List<Change> datasetChanges = ds.getChanges(project).collect(Collectors.toList());
        Map<Name, Set<Change>> changesByBinomialName = new HashMap<>();

        for (Change ch : datasetChanges) {
            Set<Name> changeNames = ch.getAllNames();
            Set<Name> changeBinomialNames = changeNames.stream().flatMap(n -> n.asBinomial())
                    .collect(Collectors.toSet());

            boolean involvesAddedNames = changeBinomialNames.stream().anyMatch(n -> namesAdded.contains(n));
            boolean involvesDeletedNames = changeBinomialNames.stream().anyMatch(n -> namesDeleted.contains(n));

            if (involvesAddedNames || involvesDeletedNames) {
                // Oh goody, involves one of our binomial names.
                //
                // Record all the changes by binomial name
                for (Name binomialName : changeBinomialNames) {
                    if (!changesByBinomialName.containsKey(binomialName))
                        changesByBinomialName.put(binomialName, new HashSet<>());

                    changesByBinomialName.get(binomialName).add(ch);
                }

            } else {
                // This change is an error or involves non-binomial names only.
                // Ignore!
            }
        }

        // Step 3. Convert the additions and deletions into potential changes,
        // based on the changes they include.
        Set<Name> namesChanged = new HashSet<>(namesAdded);
        namesChanged.addAll(namesDeleted);

        Set<Change> changesSummarized = new HashSet<>();

        for (Name n : namesChanged) {
            Set<Change> allChangesAssociatedWithName = changesByBinomialName.get(n);

            // TODO: am I sure this is being handled correctly?
            if (allChangesAssociatedWithName == null)
                continue;

            Set<Change> changes = allChangesAssociatedWithName.stream()
                    // Don't summarize the same change into multiple changes
                    // (e.g. if A + B -> C, we don't want this to turn up three times,
                    //  under 'A', 'B' and 'C'
                    .filter(ch -> !changesSummarized.contains(ch)).collect(Collectors.toSet());

            // No changes left? Skip this name!
            if (changes.isEmpty())
                continue;

            changesSummarized.addAll(changes);

            PotentialChange potentialChange = new PotentialChange(ds,
                    (namesAdded.contains(n) ? ChangeType.ADDITION : ChangeType.DELETION),
                    (namesAdded.contains(n) ? Stream.empty() : Stream.of(n)),
                    (namesAdded.contains(n) ? Stream.of(n) : Stream.empty()),
                    BinomialChangesSceneController.class, "Created from " + changes.size() + " changes: "
                            + changes.stream().map(ch -> ch.toString()).collect(Collectors.joining(";")));

            // Now, by default, the potential change writes in a long creation note, but
            // we don't want that, do we?
            potentialChange.getProperties().put("created", potentialChange.getNote().orElse(""));
            potentialChange.getProperties().remove("note");

            Set<ChangeType> changeTypes = new HashSet<>();

            for (Change ch : changes) {
                changeTypes.add(ch.getType());

                potentialChange.fromProperty().addAll(ch.getFrom());
                potentialChange.toProperty().addAll(ch.getTo());

                Optional<String> currentNote = potentialChange.getNote();
                Optional<String> changeNote = ch.getNote();

                if (currentNote.isPresent() && changeNote.isPresent()) {
                    potentialChange.noteProperty().set(currentNote.get() + "; " + changeNote.get());

                } else if (!currentNote.isPresent() && changeNote.isPresent()) {
                    potentialChange.noteProperty().set(changeNote.get());

                } else {
                    // Nothing to get hung about.
                }
            }

            // Finally, figure out this potential change's type.
            if (changeTypes.size() == 1)
                potentialChange.typeProperty().set(changeTypes.iterator().next());
            else {
                potentialChange.typeProperty().set(ChangeType.COMPLEX);
            }

            // All done!
            potentialChanges.add(potentialChange);
            changesByPotentialChange.put(potentialChange, changes);
        }

        // Ready for next!
        prevDataset = ds;
    }
}

From source file:org.talend.dataprep.transformation.service.TransformationService.java

/**
 * Suggest what {@link ActionDefinition actions} can be applied to <code>column</code>.
 *
 * @param column A {@link ColumnMetadata column} definition.
 * @param limit An optional limit parameter to return the first <code>limit</code> suggestions.
 * @return A list of {@link ActionDefinition} that can be applied to this column.
 * @see #suggest(DataSet)//from  w w w . j  a va2 s  . c o m
 */
@RequestMapping(value = "/suggest/column", method = POST, consumes = APPLICATION_JSON_VALUE, produces = APPLICATION_JSON_VALUE)
@ApiOperation(value = "Suggest actions for a given column metadata", notes = "This operation returns an array of suggested actions in decreasing order of importance.")
@ResponseBody
public Stream<ActionDefinition> suggest(@RequestBody(required = false) ColumnMetadata column, //
        @ApiParam(value = "How many actions should be suggested at most", defaultValue = "5") @RequestParam(value = "limit", defaultValue = "5", required = false) int limit) {
    if (column == null) {
        return Stream.empty();
    }

    // look for all actions applicable to the column type
    final Stream<Suggestion> suggestions = suggestionEngine
            .score(actionRegistry.findAll().parallel().filter(am -> am.acceptField(column)), column);
    return suggestions //
            .filter(s -> s.getScore() > 0) // Keep only strictly positive score (negative and 0 indicates not applicable)
            .limit(limit) //
            .map(Suggestion::getAction) // Get the action for positive suggestions
            .map(am -> am.adapt(column)); // Adapt default values (e.g. column name)
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Logic to perform the custom deduplication with the current and new versions
 * @param maybe_custom_handler/*from w w  w  .  ja  va  2  s. com*/
 * @param new_record
 * @param old_record
 * @returns list of Json objects to delete
 */
protected static Stream<JsonNode> handleCustomDeduplication(
        Optional<Tuple2<IEnrichmentBatchModule, DeduplicationEnrichmentContext>> maybe_custom_handler,
        final List<Tuple3<Long, IBatchRecord, ObjectNode>> new_records, final Collection<JsonNode> old_records,
        final JsonNode key) {
    return maybe_custom_handler.map(handler_context -> {
        handler_context._2().resetMutableState(old_records, key);

        final Consumer<IEnrichmentBatchModule> handler = new_module -> {
            final Stream<Tuple2<Long, IBatchRecord>> dedup_stream = Stream.concat(
                    new_records.stream().map(t3 -> Tuples._2T(t3._1(), t3._2())),
                    old_records.stream().map(old_record -> Tuples._2T(-1L,
                            (IBatchRecord) (new BatchRecordUtils.InjectedJsonBatchRecord(old_record)))));

            final int batch_size = new_records.size();

            new_module.onObjectBatch(dedup_stream, Optional.of(batch_size).filter(__ -> !old_records.isEmpty()), // (ie leave batch size blank if there's no dedup) 
                    Optional.of(key));

            new_module.onStageComplete(false);
        };

        handler.accept(handler_context._1());

        return handler_context._2().getObjectIdsToDelete();
    }).orElse(Stream.empty());
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Fills in the jobs' entry points in the streaming enrichment case
 * @param technology/*from   w ww  . j  a v a 2  s.  c  o  m*/
 * @param bucket
 * @return
 */
protected static final DataBucketBean finalBucketConversion(final String technology,
        final DataBucketBean bucket,
        final Validation<BasicMessageBean, Map<String, Tuple2<SharedLibraryBean, String>>> err_or_libs) {
    //TODO (ALEPH-12 also handle the system classpath case, using some lookup engine)
    return err_or_libs.validation(fail -> bucket, libs -> {
        if (STREAMING_ENRICHMENT_TECH_NAME.equals(technology) // enrichment is specified
                && (null != bucket.streaming_enrichment_topology()) // there is a streaming topology specified
                && Optional.ofNullable(bucket.streaming_enrichment_topology().enabled()).orElse(true) // it's enabled (otherwise entry_point==null)
                && isStreamingEnrichmentType(bucket) // it is a streaming enrichment bucket
        ) {
            // Check all modules and libs...
            return Stream
                    .concat(Optional.ofNullable(bucket.streaming_enrichment_topology().module_name_or_id())
                            .map(Stream::of).orElse(Stream.empty()),
                            Optional.ofNullable(bucket.streaming_enrichment_topology().library_names_or_ids())
                                    .map(List::stream).orElse(Stream.empty()))
                    .map(name -> libs.get(name)) //...to see if we can find the corresponding shared library...
                    .filter(t2 -> t2 != null).map(t2 -> t2._1())
                    .map(lib -> Optional.ofNullable(bucket.streaming_enrichment_topology().entry_point())
                            .map(Optional::of)
                            .orElse(Optional.ofNullable(lib.streaming_enrichment_entry_point()))
                            .orElse(lib.misc_entry_point()))
                    .filter(entry_point -> entry_point != null) //...that has a valid entry point...
                    .findFirst().map(entry_point -> { // ... grab the first and ...
                        return BeanTemplateUtils.clone(bucket).with(DataBucketBean::analytic_thread,
                                BeanTemplateUtils.clone(bucket.analytic_thread())
                                        .with(AnalyticThreadBean::jobs, bucket.analytic_thread().jobs().stream()
                                                .map(job -> BeanTemplateUtils.clone(job)
                                                        .with(AnalyticThreadJobBean::entry_point, entry_point) //...set that entry point in all the jobs...
                                                        .done())
                                                .collect(Collectors.toList()))
                                        .done())
                                .done();
                    }).orElse(bucket); // (if anything fails just return the bucket)               
        } else if (BATCH_ENRICHMENT_TECH_NAME.equals(technology) // enrichment is specified
                && (null != bucket.batch_enrichment_configs()) // there is a batch topology specified
                && bucket.batch_enrichment_configs().stream()
                        .filter(cfg -> Optional.ofNullable(cfg.enabled()).orElse(true)).findAny().isPresent()
                && isBatchEnrichmentType(bucket) // it is a batch enrichment bucket
        ) {
            return bucket; // nothing to do here the entry points are inferred from the configurations
        } else
            return bucket;
    });
}

From source file:com.joyent.manta.client.MantaClient.java

/**
 * Return a stream of the contents of a directory in Manta.
 *
 * @param path The fully qualified path of the directory.
 * @return A {@link Stream} of {@link MantaObjectResponse} listing the contents of the directory.
 * @throws IOException thrown when there is a problem getting the listing over the network
 *///  ww  w.  j  a v a  2 s .com
public Stream<MantaObject> listObjects(final String path) throws IOException {
    final MantaDirectoryListingIterator itr = streamingIterator(path);

    /* We preemptively check the iterator for a next value because that will
     * trigger an error if the path doesn't exist or is otherwise inaccessible.
     * This error typically takes the form of an UncheckedIOException, so we
     * unwind that exception if the cause is a MantaClientHttpResponseException
     * and rethrow another MantaClientHttpResponseException, so that the
     * stacktrace will point to this running method.
     */
    try {
        if (!itr.hasNext()) {
            itr.close();
            return Stream.empty();
        }
    } catch (UncheckedIOException e) {
        if (e.getCause() instanceof MantaClientHttpResponseException) {
            throw e.getCause();
        } else {
            throw e;
        }
    }

    final int additionalCharacteristics = Spliterator.CONCURRENT | Spliterator.ORDERED | Spliterator.NONNULL
            | Spliterator.DISTINCT;

    Stream<Map<String, Object>> backingStream = StreamSupport
            .stream(Spliterators.spliteratorUnknownSize(itr, additionalCharacteristics), false);

    Stream<MantaObject> stream = backingStream.map(MantaObjectConversionFunction.INSTANCE).onClose(itr::close);

    danglingStreams.add(stream);

    return stream;
}

From source file:com.joyent.manta.client.MantaClient.java

/**
 * <p>Finds all directories and files recursively under a given path. Since
 * this method returns a {@link Stream}, consumers can add their own
 * additional filtering based on path, object type or other criteria.</p>
 *
 * <p>This method will make each request to each subdirectory in parallel.
 * Parallelism settings are set by JDK system property:
 * <code>java.util.concurrent.ForkJoinPool.common.parallelism</code></p>
 *
 * <p>When using a filter with this method, if the filter matches a directory,
 * then all subdirectory results for that directory will be excluded. If you
 * want to perform a match against all results, then use {@link #find(String)}
 * and then filter on the stream returned.</p>
 *
 * <p><strong>WARNING:</strong> this method is not atomic and thereby not
 * safe if other operations are performed on the directory structure while
 * it is running.</p>//  w ww.  ja  v a  2s.  co m
 *
 * @param path directory path
 * @param filter predicate class used to filter all results returned
 * @return A recursive unsorted {@link Stream} of {@link MantaObject}
 *         instances representing the contents of all subdirectories.
 */
public Stream<MantaObject> find(final String path, final Predicate<? super MantaObject> filter) {
    /* We read directly from the iterator here to reduce the total stack
     * frames and to reduce the amount of abstraction to a minimum.
     *
     * Within this loop, we store all of the objects found in memory so
     * that we can later query find() methods for the directory objects
     * in parallel. */
    final Stream.Builder<MantaObject> objectBuilder = Stream.builder();
    final Stream.Builder<MantaObject> dirBuilder = Stream.builder();

    try (MantaDirectoryListingIterator itr = streamingIterator(path)) {
        while (itr.hasNext()) {
            final Map<String, Object> item = itr.next();
            final MantaObject obj = MantaObjectConversionFunction.INSTANCE.apply(item);

            /* We take a predicate as a method parameter because it allows
             * us to filter at the highest level within this iterator. If
             * we just passed the stream as is back to the user, then
             * they would have to filter the results *after* all of the
             * HTTP requests were made. This way the filter can help limit
             * the total number of HTTP requests made to Manta. */
            if (filter == null || filter.test(obj)) {
                objectBuilder.accept(obj);

                if (obj.isDirectory()) {
                    dirBuilder.accept(obj);
                }
            }
        }
    }

    /* All objects within this directory should be included in the results,
     * so we have a stream stored here that will later be concatenated. */
    final Stream<MantaObject> objectStream = objectBuilder.build();

    /* Directories are processed in parallel because it is the only unit
     * within our abstractions that can be properly done in parallel.
     * MantaDirectoryListingIterator forces all paging of directory
     * listings to be sequential requests. However, it works fine to
     * run multiple MantaDirectoryListingIterator instances per request.
     * That is exactly what we are doing here using streams which is
     * allowing us to do the recursive calls in a lazy fashion.
     *
     * From a HTTP request perspective, this means that only the listing for
     * this current highly directory is performed and no other listing
     * will be performed until the stream is read.
     */
    try {
        final Stream<MantaObject> dirStream = findForkJoinPool
                .submit(() -> dirBuilder.build().parallel().flatMap(obj -> find(obj.getPath(), filter))).get();

        /* Due to the way we concatenate the results will be quite out of order
         * if a consumer needs sorted results that is their responsibility. */
        final Stream<MantaObject> stream = Stream.concat(objectStream, dirStream);

        danglingStreams.add(stream);

        return stream;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return Stream.empty();
    } catch (ExecutionException e) {
        throw new MantaException(e.getCause());
    }
}

From source file:com.ikanow.aleph2.analytics.spark.services.EnrichmentPipelineService.java

/** Handy utility to create a stream whose final tagged with "last" from an iterator 
 * @param it//from   w w w.  j a  v a2  s  . c  o  m
 * @return
 */
private <T> Stream<Tuple2<T, Boolean>> createStream(final Iterator<T> it) {
    return !it.hasNext() ? Stream.empty() : StreamUtils.unfold(it, itit -> {
        return itit.hasNext() ? Optional.of(itit) : Optional.empty();
    }).map(itit -> Tuples._2T(itit.next(), !itit.hasNext()));
}