Example usage for java.util.stream Stream forEach

List of usage examples for java.util.stream Stream forEach

Introduction

In this page you can find the example usage for java.util.stream Stream forEach.

Prototype

void forEach(Consumer<? super T> action);

Source Link

Document

Performs an action for each element of this stream.

Usage

From source file:com.joyent.manta.client.MantaClientIT.java

@Test
public final void testList() throws IOException {
    final String pathPrefix = String.format("%s/%s", testPathPrefix, UUID.randomUUID());
    mantaClient.putDirectory(pathPrefix, null);

    mantaClient.put(String.format("%s/%s", pathPrefix, UUID.randomUUID()), "");
    mantaClient.put(String.format("%s/%s", pathPrefix, UUID.randomUUID()), "");
    final String subDir = pathPrefix + "/" + UUID.randomUUID().toString();
    mantaClient.putDirectory(subDir, null);
    mantaClient.put(String.format("%s/%s", subDir, UUID.randomUUID()), "");
    final Stream<MantaObject> objs = mantaClient.listObjects(pathPrefix);

    final AtomicInteger count = new AtomicInteger(0);
    objs.forEach(obj -> {
        count.incrementAndGet();//from   www.j a  v  a 2 s  . c om
        Assert.assertTrue(obj.getPath().startsWith(testPathPrefix));
    });

    Assert.assertEquals(3, count.get());
}

From source file:com.ikanow.aleph2.graph.titan.services.TitanGraphService.java

/** Deletes a bucket
 * @param bucket/* w w w. j a  v a2s.c  o m*/
 * @param secondary_buffer
 * @param bucket_or_buffer_getting_deleted
 * @return
 */
private CompletableFuture<BasicMessageBean> handleBucketDeletionRequest_internal(DataBucketBean bucket,
        Optional<String> secondary_buffer, boolean bucket_or_buffer_getting_deleted) {

    //TODO (ALEPH-15): check if the indexes exist - just return if so

    if (secondary_buffer.isPresent()) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                "handleBucketDeletionRequest", ErrorUtils.BUFFERS_NOT_SUPPORTED, bucket.full_name()));
    }

    //TODO (ALEPH-15): At some point need to be able for services to (optionally) request batch enrichment jobs - eg would be much nicer to fire this off as a distributed job

    return CompletableFuture.runAsync(() -> {

        try {
            Thread.sleep(1000L);
        } catch (Exception e) {
        } // just check the indexes have refreshed...

        final TitanTransaction tx = _titan.buildTransaction().start();

        //DEBUG
        //final com.fasterxml.jackson.databind.ObjectMapper titan_mapper = _titan.io(org.apache.tinkerpop.gremlin.structure.io.IoCore.graphson()).mapper().create().createMapper();

        @SuppressWarnings("unchecked")
        final Stream<TitanVertex> vertices_to_check = Optionals.<TitanVertex>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).vertices(), false);
        vertices_to_check.forEach(v -> {
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                while (props.hasNext()) {
                    final VertexProperty<String> prop = props.next();
                    if (bucket.full_name().equals(prop.value())) {
                        prop.remove();
                    }
                }
            }
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                if (!props.hasNext()) { // can delete this bucket
                    v.remove();
                }
            }
        });
        @SuppressWarnings("unchecked")
        final Stream<TitanEdge> edges_to_check = Optionals.<TitanEdge>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).edges(), false);
        edges_to_check.forEach(e -> {
            e.remove(); // (can only have one edge so delete it)
        });

        tx.commit();
    }).thenApply(__ -> ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
            "handleBucketDeletionRequest", "Completed", "handleBucketDeletionRequest"))
            .exceptionally(t -> ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "handleBucketDeletionRequest", ErrorUtils.getLongForm("{0}", t),
                    "handleBucketDeletionRequest"));

}

From source file:com.github.jsonj.JsonArray.java

public JsonArray(Stream<Object> s) {
    super();
    s.forEach(o -> this.addObject(o));
}

From source file:com.ikanow.aleph2.analytics.services.GraphBuilderEnrichmentService.java

@Override
public void onObjectBatch(Stream<Tuple2<Long, IBatchRecord>> batch, Optional<Integer> batch_size,
        Optional<JsonNode> grouping_key) {
    if (_enabled.get()) { // Also process +annoying hack to ensure the stream is also emitted normally

        _delegate.optional()/*from   www.  jav a2 s .co  m*/
                .ifPresent(
                        delegate -> delegate.onObjectBatch(
                                batch.peek(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(),
                                        Optional.empty(), Optional.empty(), grouping_key)),
                                batch_size, grouping_key));
    }
    try { // Passthrough if the stream hasn't been processed (ie not enabled), else harmless error
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), grouping_key));
    } catch (IllegalStateException e) {
        // just means the stream was already processed, ie the above peek worked, ie we're good
    }
}

From source file:com.intuit.wasabi.repository.cassandra.impl.CassandraAssignmentsRepository.java

/**
 * Populate existing user assignments for given user, application & context.
 * This method make use of provided experimentMap to eliminate the call to database to fetch experiment object.
 *
 * @param userID        User Id/*ww w  . ja v a2 s .c  o m*/
 * @param appLabel      Application Label
 * @param context       Environment context
 * @param experimentMap experiment map to fetch experiment label
 * @return List of assignments in term of pair of Experiment & Bucket label.
 */
@Override
@Timed
public List<Pair<Experiment, String>> getAssignments(User.ID userID, Application.Name appLabel, Context context,
        Map<Experiment.ID, Experiment> experimentMap) {
    final Stream<ExperimentUserByUserIdContextAppNameExperimentId> experimentUserStream = getUserIndexStream(
            userID.toString(), appLabel.toString(), context.getContext());
    List<Pair<Experiment, String>> result = new ArrayList<>();
    experimentUserStream.forEach((ExperimentUserByUserIdContextAppNameExperimentId t) -> {
        Experiment exp = experimentMap.get(Experiment.ID.valueOf(t.getExperimentId()));
        if (nonNull(exp)) {
            result.add(new ImmutablePair<>(exp, Optional.ofNullable(t.getBucket()).orElseGet(() -> "null")));
        } else {
            LOGGER.debug("{} experiment id is not present in the experimentMap...", t.getExperimentId());
        }
    });
    return result;
}

From source file:com.nextdoor.bender.handler.BaseHandler.java

/**
 * Method called by Handler implementations to process records.
 *
 * @param context Lambda invocation context.
 * @throws HandlerException//w  w w.  j  a  va  2  s.co  m
 */
private void processInternal(Context context) throws HandlerException {
    Stat runtime = new Stat("runtime.ns");
    runtime.start();

    Source source = this.getSource();
    DeserializerProcessor deser = source.getDeserProcessor();
    List<OperationProcessor> operations = source.getOperationProcessors();
    List<String> containsStrings = source.getContainsStrings();
    List<Pattern> regexPatterns = source.getRegexPatterns();

    this.getIpcService().setContext(context);

    Iterator<InternalEvent> events = this.getInternalEventIterator();

    /*
     * For logging purposes log when the function started running
     */
    this.monitor.invokeTimeNow();

    AtomicLong eventCount = new AtomicLong(0);
    AtomicLong oldestArrivalTime = new AtomicLong(System.currentTimeMillis());
    AtomicLong oldestOccurrenceTime = new AtomicLong(System.currentTimeMillis());

    /*
     * eventQueue allows for InternalEvents to be pulled from the Iterator and published to a
     * stream. A Thread is created that loops through events in the iterator and offers them to the
     * queue. Note that offering will be blocked if the queue is full (back pressure being applied).
     * When the iterator reaches the end (hasNext = false) the queue is closed.
     */
    this.eventQueue = new Queue<InternalEvent>(new LinkedBlockingQueue<InternalEvent>(this.queueSize));

    /*
     * Thread will live for duration of invocation and supply Stream with events.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            while (events.hasNext()) {
                try {
                    eventQueue.offer(events.next());
                } catch (Queue.ClosedQueueException e) {
                    break;
                }
            }
            try {
                eventQueue.close();
            } catch (Queue.ClosedQueueException e) {
            }
        }
    }).start();

    Stream<InternalEvent> input = this.eventQueue.jdkStream();

    /*
     * Filter out raw events
     */
    Stream<InternalEvent> filtered = input.filter(
            /*
             * Perform regex filter
             */
            ievent -> {
                eventCount.incrementAndGet();
                String eventStr = ievent.getEventString();

                /*
                 * Apply String contains filters before deserialization
                 */
                for (String containsString : containsStrings) {
                    if (eventStr.contains(containsString)) {
                        return false;
                    }
                }

                /*
                 * Apply regex patterns before deserialization
                 */
                for (Pattern regexPattern : regexPatterns) {
                    Matcher m = regexPattern.matcher(eventStr);

                    if (m.find()) {
                        return false;
                    }
                }

                return true;
            });

    /*
     * Deserialize
     */
    Stream<InternalEvent> deserialized = filtered.map(ievent -> {
        DeserializedEvent data = deser.deserialize(ievent.getEventString());

        if (data == null || data.getPayload() == null) {
            logger.warn("Failed to deserialize: " + ievent.getEventString());
            return null;
        }

        ievent.setEventObj(data);
        return ievent;
    }).filter(Objects::nonNull);

    /*
     * Perform Operations
     */
    Stream<InternalEvent> operated = deserialized;
    for (OperationProcessor operation : operations) {
        operated = operation.perform(operated);
    }

    /*
     * Serialize
     */
    Stream<InternalEvent> serialized = operated.map(ievent -> {
        try {
            String raw = null;
            raw = this.ser.serialize(this.wrapper.getWrapped(ievent));
            ievent.setSerialized(raw);
            return ievent;
        } catch (SerializationException e) {
            return null;
        }
    }).filter(Objects::nonNull);

    /*
     * Transport
     */
    serialized.forEach(ievent -> {
        /*
         * Update times
         */
        updateOldest(oldestArrivalTime, ievent.getArrivalTime());
        updateOldest(oldestOccurrenceTime, ievent.getEventTime());

        try {
            this.getIpcService().add(ievent);
        } catch (TransportException e) {
            logger.warn("error adding event", e);
        }
    });

    /*
     * Wait for transporters to finish
     */
    try {
        this.getIpcService().flush();
    } catch (TransportException e) {
        throw new HandlerException("encounted TransportException while shutting down ipcService", e);
    } catch (InterruptedException e) {
        throw new HandlerException("thread was interruptedwhile shutting down ipcService", e);
    } finally {
        String evtSource = this.getSourceName();

        runtime.stop();

        if (!this.skipWriteStats) {
            writeStats(eventCount.get(), oldestArrivalTime.get(), oldestOccurrenceTime.get(), evtSource,
                    runtime);
        }

        if (logger.isTraceEnabled()) {
            getGCStats();
        }
    }
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;//from   w  w w  .  ja  va 2s. c o m
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:de.tudarmstadt.lt.seg.app.Segmenter.java

private void run_parallel() throws Exception {

    InputStream in = System.in;
    if (!"-".equals(_filename_in))
        in = new FileInputStream(_filename_in);
    Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines();

    ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> {
        try {// w w  w. j a  v  a  2s . c o m
            return newSentenceSplitter();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });
    ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> {
        try {
            return newTokenizer();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });

    final PrintWriter[] w = new PrintWriter[_parallelism];
    // init writers
    for (int i = 0; i < _parallelism; i++) {
        OutputStream out = System.out;
        if (!"-".equals(_filename_out)) {
            out = new FileOutputStream(String.format("%s_%d", _filename_out, i));
        }
        w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset()));
    }

    BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true);
    ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue);

    AtomicLong lc = new AtomicLong(0);
    liter.forEach((line) -> {
        // don't try to submit new threads, wait until the thread queue has some capacity again
        while (queue.remainingCapacity() == 0)
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                /**/}
        es.submit(() -> {
            final long docid = lc.incrementAndGet();
            if (docid % 1000 == 0)
                System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
            final int w_i = (int) (docid % _parallelism);
            split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid),
                    sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types,
                    _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]);

        });
    });
    es.shutdown();
    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);

    // TODO: the stream parallelism version does not work because it submits too many threads at once
    //      AtomicLong lc = new AtomicLong(0);
    //      ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism);
    //      forkJoinPool.submit(() -> 
    //         liter.parallel().forEach((line) -> {
    //            final long docid = lc.incrementAndGet();
    //            if(docid % 1000 == 0)
    //               System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
    //   
    //            String l = line.replace("\\t", "\t").replace("\\n", "\n");
    //            split_and_tokenize(
    //                  new StringReader(l),
    //                  String.format("%s:%d", _filename_in, docid),
    //                  sentenceSplitter.get(), 
    //                  tokenizer.get(), 
    //                  _level_filter,
    //                  _level_normalize,
    //                  _merge_types,
    //                  _merge_tokens,
    //                  _separator_sentence,
    //                  _separator_token,
    //                  _separator_desc,
    //                  w);
    //      })).get();

}

From source file:com.ejie.uda.jsonI18nEditor.Editor.java

public void importResources(Path dir) {

    Stream<Path> filter;

    try {//  w  w w.ja v a2s  .c om
        if (!closeCurrentSession()) {
            return;
        }
        if (Files.isDirectory(dir, LinkOption.NOFOLLOW_LINKS)) {
            reset();
            resourcesDir = dir;
            filter = Files.walk(resourcesDir, 1).filter(path -> Resources.isResource(path));

        } else {
            reset();
            // Se ha arrastrado un fichero de 18n individual, se debe de obtener los recursos relacionados con el bundle al que pertenece.
            Pattern.matches(BUNDLE_REGEX, dir.getFileName().toString());
            Pattern regex = Pattern.compile(BUNDLE_REGEX);
            resourcesDir = dir.getParent();
            inputFile = dir;
            Matcher regexMatcher = regex.matcher(dir.getFileName().toString());
            if (regexMatcher.find()) {
                this.bundle = regexMatcher.group(1);
                filter = Files.walk(resourcesDir, 1).filter(path -> Resources.isResource(path, this.bundle));
            } else {
                showError(MessageBundle.get("resources.open.error.multiple"));
                return;
            }
            //         Pattern.matches("BUNDLE_REGEX", dir.getFileName().toString());
            //         showError(MessageBundle.get("resources.open.error.multiple"));
            //         return;
        }

        filter.forEach(path -> {
            try {
                Resource resource = Resources.read(path);
                setupResource(resource);
            } catch (Exception e) {
                e.printStackTrace();
                showError(MessageBundle.get("resources.open.error.single", path.toString()));
            }
        });

        List<String> recentDirs = settings.getListProperty("history");
        recentDirs.remove(dir);
        recentDirs.add(dir.toString());
        if (recentDirs.size() > 5) {
            recentDirs.remove(0);
        }
        settings.setProperty("history", recentDirs);
        editorMenu.setRecentItems(Lists.reverse(recentDirs));

        Map<String, String> keys = Maps.newTreeMap();
        resources.forEach(resource -> keys.putAll(resource.getTranslations()));
        //         resources.forEach(resource -> {
        //            
        //            
        //            
        //         });
        List<String> keyList = Lists.newArrayList(keys.keySet());
        translationTree.setModel(new TranslationTreeModel(keyList));

        updateUI();
        //         for (String key : keyList) {
        //            boolean anyEmpty = false;
        //            
        //            for (Resource resource : resources) {
        //               if (StringUtils.isBlank(resource.getTranslation(key))){
        //                  anyEmpty = true;
        //               }
        //            }
        //            
        //            TranslationTreeModel model = (TranslationTreeModel) translationTree.getModel();
        //            TranslationTreeNode node = model.getNodeByKey(key);
        //            
        //            node
        //         }
        //         keyList.stream().filter(key -> {
        //            
        //            resources.stream().filter(resource -> {
        //               return StringUtils.isNotBlank(resource.getTranslation(key));
        //            });
        //            return true;
        //         });
    } catch (IOException e) {
        e.printStackTrace();
        showError(MessageBundle.get("resources.open.error.multiple"));
    }
}

From source file:com.nextdoor.bender.operation.conditional.ConditionalOperation.java

public Stream<InternalEvent> getOutputStream(Stream<InternalEvent> input) {
    /*//w w w  .j  a  v  a  2 s . c  om
     * outputStreams keeps track of the output Stream of each Condition.
     */
    List<Stream<InternalEvent>> outputStreams = new ArrayList<Stream<InternalEvent>>(
            this.conditionsAndProcs.size());

    /*
     * From a list of operation configurations in each condition construct queues and streams.
     */
    this.filtersAndQueues = new ArrayList<Pair<FilterOperation, Queue<InternalEvent>>>(
            this.conditionsAndProcs.size());
    for (Pair<FilterOperation, List<OperationProcessor>> filterAndProcs : this.conditionsAndProcs) {

        FilterOperation filter = filterAndProcs.getLeft();
        List<OperationProcessor> procs = filterAndProcs.getRight();

        /*
         * Construct a Queue for each conditional. This is the input to each Condition.
         */
        Queue<InternalEvent> queue = new Queue<InternalEvent>(
                new LinkedBlockingQueue<InternalEvent>(procs.size()));

        this.filtersAndQueues.add(new ImmutablePair<FilterOperation, Queue<InternalEvent>>(filter, queue));

        /*
         * Connect the condition's input Queue with operations. Each operation returns a stream with its
         * operation concatenated on.
         */
        Stream<InternalEvent> conditionInput = queue.jdkStream();
        for (OperationProcessor proc : procs) {
            conditionInput = proc.perform(conditionInput);
        }

        /*
         * Last input is the output.
         */
        outputStreams.add(conditionInput);
    }

    /*
     * Condition Consumer Threads
     * 
     * Combine each condition's output stream and write to the output Queue. When all data is consumed
     * the last condition closes the output Queue.
     */
    Queue<InternalEvent> outputQueue = new Queue<InternalEvent>(
            new LinkedBlockingQueue<InternalEvent>(this.conditionsAndProcs.size()));
    AtomicInteger lock = new AtomicInteger(outputStreams.size());

    outputStreams.forEach(stream -> {
        this.es.execute(new StreamToQueue(stream, outputQueue, lock));
    });

    /*
     * Consume input Stream in a thread and publish to each condition's Queue.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            input.forEach(ievent -> {
                boolean matches = false;

                for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                    FilterOperation filter = filterAndQueue.getLeft();

                    /*
                     * If event passes the filter offer event to queue.
                     */
                    if (filter.test(ievent)) {
                        filterAndQueue.getRight().offer(ievent);
                        matches = true;
                        break;
                    }
                }

                /*
                 * Send to output queue if no case matches
                 */
                if (!matches && !filterNonMatch) {
                    outputQueue.offer(ievent);
                }
            });

            /*
             * Close queues when source queue is consumed.
             */
            for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                filterAndQueue.getRight().close();
            }
        }
    }).start();

    return outputQueue.jdkStream();
}