Example usage for java.util.stream Stream concat

List of usage examples for java.util.stream Stream concat

Introduction

In this page you can find the example usage for java.util.stream Stream concat.

Prototype

public static <T> Stream<T> concat(Stream<? extends T> a, Stream<? extends T> b) 

Source Link

Document

Creates a lazily concatenated stream whose elements are all the elements of the first stream followed by all the elements of the second stream.

Usage

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Returns the minimal set of includes to return from the dedup query
 * @param policy/*from   w ww  .java  2s  .  c o m*/
 * @param dedup_fields
 * @param timestamp_field
 * @return
 */
protected static Tuple2<List<String>, Boolean> getIncludeFields(final DeduplicationPolicy policy,
        final List<String> dedup_fields, String timestamp_field) {
    final Tuple2<List<String>, Boolean> fields_include = Optional
            .of(Patterns.match(policy).<Tuple2<List<String>, Boolean>>andReturn()
                    .when(p -> p == DeduplicationPolicy.leave,
                            __ -> Tuples._2T(Arrays.asList(AnnotationBean._ID), true))
                    .when(p -> p == DeduplicationPolicy.update,
                            __ -> Tuples._2T(Arrays.asList(AnnotationBean._ID, timestamp_field), true))
                    .when(p -> p == DeduplicationPolicy.overwrite,
                            __ -> Tuples._2T(Arrays.asList(AnnotationBean._ID), true))
                    .otherwise(__ -> Tuples._2T(Arrays.asList(), false)))
            .map(t2 -> t2._2() ? Tuples._2T(
                    Stream.concat(t2._1().stream(), dedup_fields.stream()).collect(Collectors.toList()),
                    t2._2()) : t2)
            .get();

    return fields_include;
}

From source file:org.apache.james.mailbox.model.MailboxACL.java

/**
 * Performs the set theoretic operation of union of this {@link MailboxACL}
 * and toAdd {@link MailboxACL}./*w w  w . ja va2s . c  om*/
 *
 * A schematic example:
 * "user1:lr;user2:lrwt".union("user1:at;-$group1:lrwt") returns
 * "user1:alrt;user2:lrwt;-$group1:lrwt".
 *
 * Implementations must return a new unmodifiable instance of
 * {@link MailboxACL}. However, implementations may decide to return this or
 * toAdd parameter value in case the result would be equal to the respective
 * one of those.
 */
public MailboxACL union(MailboxACL other) throws UnsupportedRightException {
    return new MailboxACL(Stream
            .concat(this.entries.entrySet().stream(), other.getEntries().entrySet().stream())
            .collect(Guavate.toImmutableListMultimap(Map.Entry::getKey, Map.Entry::getValue)).asMap().entrySet()
            .stream().collect(Guavate.toImmutableMap(Map.Entry::getKey, e -> union(e.getValue()))));
}

From source file:no.asgari.civilization.server.action.GameAction.java

private List<Item> getAllRevealedItems(PBF pbf) {
    //Had to have comparator inside sort, otherwise weird exception
    Stream<Item> discardedStream = pbf.getDiscardedItems().stream()
            .sorted((o1, o2) -> o1.getSheetName().compareTo(o2.getSheetName()));

    Stream<Item> playerStream = pbf.getPlayers().stream().flatMap(p -> p.getItems().stream())
            .filter(it -> !it.isHidden()).sorted((o1, o2) -> o1.getSheetName().compareTo(o2.getSheetName()));

    Stream<Item> concatedStream = Stream.concat(discardedStream, playerStream);
    return concatedStream.collect(toList());
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Logic to perform the custom deduplication with the current and new versions
 * @param maybe_custom_handler/*from   ww  w  .j  a  v  a  2  s  . c o m*/
 * @param new_record
 * @param old_record
 * @returns list of Json objects to delete
 */
protected static Stream<JsonNode> handleCustomDeduplication(
        Optional<Tuple2<IEnrichmentBatchModule, DeduplicationEnrichmentContext>> maybe_custom_handler,
        final List<Tuple3<Long, IBatchRecord, ObjectNode>> new_records, final Collection<JsonNode> old_records,
        final JsonNode key) {
    return maybe_custom_handler.map(handler_context -> {
        handler_context._2().resetMutableState(old_records, key);

        final Consumer<IEnrichmentBatchModule> handler = new_module -> {
            final Stream<Tuple2<Long, IBatchRecord>> dedup_stream = Stream.concat(
                    new_records.stream().map(t3 -> Tuples._2T(t3._1(), t3._2())),
                    old_records.stream().map(old_record -> Tuples._2T(-1L,
                            (IBatchRecord) (new BatchRecordUtils.InjectedJsonBatchRecord(old_record)))));

            final int batch_size = new_records.size();

            new_module.onObjectBatch(dedup_stream, Optional.of(batch_size).filter(__ -> !old_records.isEmpty()), // (ie leave batch size blank if there's no dedup) 
                    Optional.of(key));

            new_module.onStageComplete(false);
        };

        handler.accept(handler_context._1());

        return handler_context._2().getObjectIdsToDelete();
    }).orElse(Stream.empty());
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.FeedRestController.java

@GET
@Path("{feedId}/actions/allowed")
@Produces(MediaType.APPLICATION_JSON)/*from www  .jav a2s.  com*/
@ApiOperation("Gets the list of actions permitted for the given username and/or groups.")
@ApiResponses({ @ApiResponse(code = 200, message = "Returns the actions.", response = ActionGroup.class),
        @ApiResponse(code = 404, message = "A feed with the given ID does not exist.", response = RestResponseStatus.class) })
public Response getAllowedActions(@PathParam("feedId") String feedIdStr,
        @QueryParam("user") Set<String> userNames, @QueryParam("group") Set<String> groupNames) {
    log.debug("Get allowed actions for feed: {}", feedIdStr);

    Set<? extends Principal> users = Arrays.stream(this.securityTransform.asUserPrincipals(userNames))
            .collect(Collectors.toSet());
    Set<? extends Principal> groups = Arrays.stream(this.securityTransform.asGroupPrincipals(groupNames))
            .collect(Collectors.toSet());

    return this.securityService
            .getAllowedFeedActions(feedIdStr,
                    Stream.concat(users.stream(), groups.stream()).collect(Collectors.toSet()))
            .map(g -> Response.ok(g).build()).orElseThrow(() -> new WebApplicationException(
                    "A feed with the given ID does not exist: " + feedIdStr, Status.NOT_FOUND));
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.FeedRestController.java

@GET
@Path("{feedId}/actions/change")
@Produces(MediaType.APPLICATION_JSON)/*w w w .j  a v  a  2  s  . co  m*/
@ApiOperation("Constructs and returns a permission change request for a set of users/groups containing the actions that the requester may permit or revoke.")
@ApiResponses({
        @ApiResponse(code = 200, message = "Returns the change request that may be modified by the client and re-posted.", response = PermissionsChange.class),
        @ApiResponse(code = 400, message = "The type is not valid.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "No feed exists with the specified ID.", response = RestResponseStatus.class) })
public PermissionsChange getAllowedPermissionsChange(@PathParam("feedId") String feedIdStr,
        @QueryParam("type") String changeType, @QueryParam("user") Set<String> userNames,
        @QueryParam("group") Set<String> groupNames) {
    if (StringUtils.isBlank(changeType)) {
        throw new WebApplicationException("The query parameter \"type\" is required", Status.BAD_REQUEST);
    }

    Set<? extends Principal> users = Arrays.stream(this.securityTransform.asUserPrincipals(userNames))
            .collect(Collectors.toSet());
    Set<? extends Principal> groups = Arrays.stream(this.securityTransform.asGroupPrincipals(groupNames))
            .collect(Collectors.toSet());

    return this.securityService
            .createFeedPermissionChange(feedIdStr, ChangeType.valueOf(changeType.toUpperCase()),
                    Stream.concat(users.stream(), groups.stream()).collect(Collectors.toSet()))
            .orElseThrow(() -> new WebApplicationException(
                    "A feed with the given ID does not exist: " + feedIdStr, Status.NOT_FOUND));
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Converts a bucket with only streaming enrichment settings into one that has an analytic thread dervied
 * @param bucket/*from w  ww .j av  a 2s . c  o  m*/
 * @return
 */
protected static DataBucketBean convertEnrichmentToAnalyticBucket(final DataBucketBean bucket) {

    if ((null != bucket.streaming_enrichment_topology()) && isStreamingEnrichmentType(bucket)) {
        final EnrichmentControlMetadataBean enrichment = Optional
                .ofNullable(bucket.streaming_enrichment_topology().enabled()).orElse(false)
                        ? bucket.streaming_enrichment_topology()
                        : BeanTemplateUtils.build(EnrichmentControlMetadataBean.class).done().get();

        final AnalyticThreadJobBean.AnalyticThreadJobInputBean input = new AnalyticThreadJobBean.AnalyticThreadJobInputBean(
                true, //(enabled)
                null, //(name)
                "", // (myself) 
                "stream", null, // (no filter)
                null // (no extra config)
        );

        final AnalyticThreadJobBean.AnalyticThreadJobOutputBean output = new AnalyticThreadJobBean.AnalyticThreadJobOutputBean(
                true, // (not used for streaming) 
                false, // (not transient, ie final output) 
                null, // (no sub-bucket path)
                DataBucketBean.MasterEnrichmentType.streaming // (not used for non-transient)
        );

        final AnalyticThreadJobBean job = new AnalyticThreadJobBean(
                Optional.ofNullable(enrichment.name()).orElse("streaming_enrichment"), //(name) 
                true, // (enabled)
                STREAMING_ENRICHMENT_TECH_NAME, //(technology name or id)
                enrichment.module_name_or_id(), enrichment.library_names_or_ids(), //(additional modules)
                enrichment.entry_point(), // if the user specifies an overide 
                Maps.newLinkedHashMap(Optional.ofNullable(enrichment.config()).orElse(Collections.emptyMap())), //(config)
                DataBucketBean.MasterEnrichmentType.streaming, // (type) 
                Collections.emptyList(), //(node rules)
                false, //(multi node enabled)
                false, //(lock to nodes)
                Collections.emptyList(), // (dependencies) 
                Arrays.asList(input), null, //(global input config)
                output);

        return BeanTemplateUtils.clone(bucket)
                .with(DataBucketBean::analytic_thread, BeanTemplateUtils.build(AnalyticThreadBean.class)
                        .with(AnalyticThreadBean::jobs, Arrays.asList(job)).done().get())
                .done();
    } else if ((null != bucket.batch_enrichment_configs()) && isBatchEnrichmentType(bucket)) {
        final AnalyticThreadJobBean.AnalyticThreadJobInputBean input = new AnalyticThreadJobBean.AnalyticThreadJobInputBean(
                true, //(enabled) 
                null, //(name)
                "", // (myself) 
                "batch", null, // (no filter)
                null // (no extra config)
        );

        final AnalyticThreadJobBean.AnalyticThreadJobOutputBean output = new AnalyticThreadJobBean.AnalyticThreadJobOutputBean(
                true, // (preserve existing data by default) 
                false, // (not transient, ie final output) 
                null, // (no sub-bucket path)
                null // (not used for non-transient)
        );

        //(needed below)
        final ObjectMapper object_mapper = BeanTemplateUtils.configureMapper(Optional.empty());

        final AnalyticThreadJobBean job = new AnalyticThreadJobBean("batch_enrichment", //(name) 
                true, // (enabled)
                BATCH_ENRICHMENT_TECH_NAME, //(technology name or id)
                null, // no concept of a single module for batch enrichment
                bucket // collect _all_ the libraries and modules into the classpath, the BE logic will have to figure out how to sort them out later
                        .batch_enrichment_configs().stream()
                        .flatMap(cfg -> Stream.concat(
                                Optional.ofNullable(cfg.module_name_or_id()).map(Stream::of)
                                        .orElseGet(Stream::empty),
                                Optional.ofNullable(cfg.library_names_or_ids()).orElse(Collections.emptyList())
                                        .stream()))
                        .collect(Collectors.toList()),
                null, // no concept of a single entry point for batch enrichment 
                Maps.<String, Object>newLinkedHashMap(ImmutableMap.<String, Object>builder()
                        .put(EnrichmentControlMetadataBean.ENRICHMENT_PIPELINE,
                                bucket.batch_enrichment_configs().stream()
                                        .map(cfg -> object_mapper.convertValue(cfg, LinkedHashMap.class))
                                        .collect(Collectors.toList()))
                        .build())
                //(config)
                , DataBucketBean.MasterEnrichmentType.batch, // (type) 
                Collections.emptyList(), //(node rules)
                false, //(multi node enabled)
                false, // (lock to nodes)
                Collections.emptyList(), // (dependencies) 
                Arrays.asList(input), null, //(global input config)
                output);

        return BeanTemplateUtils.clone(bucket)
                .with(DataBucketBean::analytic_thread, BeanTemplateUtils.build(AnalyticThreadBean.class)
                        .with(AnalyticThreadBean::jobs, Arrays.asList(job))
                        .with(AnalyticThreadBean::trigger_config,
                                BeanTemplateUtils.build(AnalyticThreadTriggerBean.class)
                                        .with(AnalyticThreadTriggerBean::auto_calculate, true).done().get())
                        .done().get())
                .done();
    } else {
        return bucket;
    }

}

From source file:com.ikanow.aleph2.data_import.services.HarvestContext.java

@Override
public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket) {
    // Flush data and logger

    final Stream<CompletableFuture<?>> flush_writer = Stream.of(_multi_writer.get().flushBatchOutput());
    final Stream<CompletableFuture<?>> flush_logger = _mutable_state.bucket_loggers.values().stream()
            .map(l -> l.flush());//  w  w w. ja v a 2s .  c o m

    return CompletableFuture.allOf(Stream.concat(flush_writer, flush_logger).toArray(CompletableFuture[]::new));
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Fills in the jobs' entry points in the streaming enrichment case
 * @param technology/*from  w  ww.  j av  a2 s . co  m*/
 * @param bucket
 * @return
 */
protected static final DataBucketBean finalBucketConversion(final String technology,
        final DataBucketBean bucket,
        final Validation<BasicMessageBean, Map<String, Tuple2<SharedLibraryBean, String>>> err_or_libs) {
    //TODO (ALEPH-12 also handle the system classpath case, using some lookup engine)
    return err_or_libs.validation(fail -> bucket, libs -> {
        if (STREAMING_ENRICHMENT_TECH_NAME.equals(technology) // enrichment is specified
                && (null != bucket.streaming_enrichment_topology()) // there is a streaming topology specified
                && Optional.ofNullable(bucket.streaming_enrichment_topology().enabled()).orElse(true) // it's enabled (otherwise entry_point==null)
                && isStreamingEnrichmentType(bucket) // it is a streaming enrichment bucket
        ) {
            // Check all modules and libs...
            return Stream
                    .concat(Optional.ofNullable(bucket.streaming_enrichment_topology().module_name_or_id())
                            .map(Stream::of).orElse(Stream.empty()),
                            Optional.ofNullable(bucket.streaming_enrichment_topology().library_names_or_ids())
                                    .map(List::stream).orElse(Stream.empty()))
                    .map(name -> libs.get(name)) //...to see if we can find the corresponding shared library...
                    .filter(t2 -> t2 != null).map(t2 -> t2._1())
                    .map(lib -> Optional.ofNullable(bucket.streaming_enrichment_topology().entry_point())
                            .map(Optional::of)
                            .orElse(Optional.ofNullable(lib.streaming_enrichment_entry_point()))
                            .orElse(lib.misc_entry_point()))
                    .filter(entry_point -> entry_point != null) //...that has a valid entry point...
                    .findFirst().map(entry_point -> { // ... grab the first and ...
                        return BeanTemplateUtils.clone(bucket).with(DataBucketBean::analytic_thread,
                                BeanTemplateUtils.clone(bucket.analytic_thread())
                                        .with(AnalyticThreadBean::jobs, bucket.analytic_thread().jobs().stream()
                                                .map(job -> BeanTemplateUtils.clone(job)
                                                        .with(AnalyticThreadJobBean::entry_point, entry_point) //...set that entry point in all the jobs...
                                                        .done())
                                                .collect(Collectors.toList()))
                                        .done())
                                .done();
                    }).orElse(bucket); // (if anything fails just return the bucket)               
        } else if (BATCH_ENRICHMENT_TECH_NAME.equals(technology) // enrichment is specified
                && (null != bucket.batch_enrichment_configs()) // there is a batch topology specified
                && bucket.batch_enrichment_configs().stream()
                        .filter(cfg -> Optional.ofNullable(cfg.enabled()).orElse(true)).findAny().isPresent()
                && isBatchEnrichmentType(bucket) // it is a batch enrichment bucket
        ) {
            return bucket; // nothing to do here the entry points are inferred from the configurations
        } else
            return bucket;
    });
}

From source file:com.joyent.manta.client.MantaClient.java

/**
 * <p>Finds all directories and files recursively under a given path. Since
 * this method returns a {@link Stream}, consumers can add their own
 * additional filtering based on path, object type or other criteria.</p>
 *
 * <p>This method will make each request to each subdirectory in parallel.
 * Parallelism settings are set by JDK system property:
 * <code>java.util.concurrent.ForkJoinPool.common.parallelism</code></p>
 *
 * <p>When using a filter with this method, if the filter matches a directory,
 * then all subdirectory results for that directory will be excluded. If you
 * want to perform a match against all results, then use {@link #find(String)}
 * and then filter on the stream returned.</p>
 *
 * <p><strong>WARNING:</strong> this method is not atomic and thereby not
 * safe if other operations are performed on the directory structure while
 * it is running.</p>//  w  w w  .jav  a  2  s.c  om
 *
 * @param path directory path
 * @param filter predicate class used to filter all results returned
 * @return A recursive unsorted {@link Stream} of {@link MantaObject}
 *         instances representing the contents of all subdirectories.
 */
public Stream<MantaObject> find(final String path, final Predicate<? super MantaObject> filter) {
    /* We read directly from the iterator here to reduce the total stack
     * frames and to reduce the amount of abstraction to a minimum.
     *
     * Within this loop, we store all of the objects found in memory so
     * that we can later query find() methods for the directory objects
     * in parallel. */
    final Stream.Builder<MantaObject> objectBuilder = Stream.builder();
    final Stream.Builder<MantaObject> dirBuilder = Stream.builder();

    try (MantaDirectoryListingIterator itr = streamingIterator(path)) {
        while (itr.hasNext()) {
            final Map<String, Object> item = itr.next();
            final MantaObject obj = MantaObjectConversionFunction.INSTANCE.apply(item);

            /* We take a predicate as a method parameter because it allows
             * us to filter at the highest level within this iterator. If
             * we just passed the stream as is back to the user, then
             * they would have to filter the results *after* all of the
             * HTTP requests were made. This way the filter can help limit
             * the total number of HTTP requests made to Manta. */
            if (filter == null || filter.test(obj)) {
                objectBuilder.accept(obj);

                if (obj.isDirectory()) {
                    dirBuilder.accept(obj);
                }
            }
        }
    }

    /* All objects within this directory should be included in the results,
     * so we have a stream stored here that will later be concatenated. */
    final Stream<MantaObject> objectStream = objectBuilder.build();

    /* Directories are processed in parallel because it is the only unit
     * within our abstractions that can be properly done in parallel.
     * MantaDirectoryListingIterator forces all paging of directory
     * listings to be sequential requests. However, it works fine to
     * run multiple MantaDirectoryListingIterator instances per request.
     * That is exactly what we are doing here using streams which is
     * allowing us to do the recursive calls in a lazy fashion.
     *
     * From a HTTP request perspective, this means that only the listing for
     * this current highly directory is performed and no other listing
     * will be performed until the stream is read.
     */
    try {
        final Stream<MantaObject> dirStream = findForkJoinPool
                .submit(() -> dirBuilder.build().parallel().flatMap(obj -> find(obj.getPath(), filter))).get();

        /* Due to the way we concatenate the results will be quite out of order
         * if a consumer needs sorted results that is their responsibility. */
        final Stream<MantaObject> stream = Stream.concat(objectStream, dirStream);

        danglingStreams.add(stream);

        return stream;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return Stream.empty();
    } catch (ExecutionException e) {
        throw new MantaException(e.getCause());
    }
}