List of usage examples for java.util.stream Stream peek
Stream<T> peek(Consumer<? super T> action);
From source file:com.github.aptd.simulation.elements.common.CMath.java
/** * consums a stream of matrix objects// www. j a va2 s .c o m * * @param p_stream stream * @param p_consumer consumer function * @return stream */ public static Stream<DoubleMatrix1D> matrixconsumer(final Stream<DoubleMatrix1D> p_stream, final Consumer<String> p_consumer) { return p_stream.peek(i -> p_consumer.accept(MATRIXFORMAT.toString(i) + " ")); }
From source file:com.ikanow.aleph2.analytics.services.GraphBuilderEnrichmentService.java
@Override public void onObjectBatch(Stream<Tuple2<Long, IBatchRecord>> batch, Optional<Integer> batch_size, Optional<JsonNode> grouping_key) { if (_enabled.get()) { // Also process +annoying hack to ensure the stream is also emitted normally _delegate.optional()//from ww w . j av a 2 s . c om .ifPresent( delegate -> delegate.onObjectBatch( batch.peek(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(), Optional.empty(), grouping_key)), batch_size, grouping_key)); } try { // Passthrough if the stream hasn't been processed (ie not enabled), else harmless error batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(), Optional.empty(), grouping_key)); } catch (IllegalStateException e) { // just means the stream was already processed, ie the above peek worked, ie we're good } }
From source file:org.codice.ddf.admin.application.service.migratable.TaskList.java
/** * Executes all tasks defined in the first available operation/group of tasks. * * <p><i>Note:</i> The task list will be cleared unless we have exceeded the maximum number of * attempts for the first available operation. * * @return <code>true</code> if all tasks in the first available operation group were successful; * <code>false</code> otherwise or if we have exceeded the maximum number of attempts for the * first available operation//from ww w. j a va 2s . com */ public boolean execute() { LOGGER.debug("Executing {}s import", type); final Operation op = getOperation().orElse(null); if (op == null) { // if no groups have tasks LOGGER.debug("No {} tasks recorded", type); return true; } if (LOGGER.isDebugEnabled()) { groups.entrySet().forEach( e -> LOGGER.debug("{} tasks recorded for {}s: {}", e.getKey(), type, e.getValue().keySet())); compoundGroups.entrySet().forEach( e -> LOGGER.debug("{} compound tasks recorded for {}s: {}", e.getKey(), type, e.getValue())); } final String opName = op.name().toLowerCase(Locale.getDefault()); final int n = attemptsLeft.get(op).getAndDecrement(); if (n <= 0) { // too many attempts for this operation already, fail! LOGGER.debug("No more {} tasks attempts left for {}s", opName, type); report.recordOnFinalAttempt( new MigrationException("Import error: too many %ss %s attempts", type, opName)); return false; } LOGGER.debug("{} tasks attempts left for {}s: {}", opName, type, n); final Map<String, Predicate<ProfileMigrationReport>> tasks = groups.get(op); try { boolean result = true; // until proven otherwise if (tasks != null) { Stream<Map.Entry<String, Predicate<ProfileMigrationReport>>> s = tasks.entrySet().stream(); if (LOGGER.isDebugEnabled()) { s = s.peek(e -> LOGGER.debug("Executing {} task for {} '{}'", opName, type, e.getKey())); } result &= s.map(Map.Entry::getValue).map(t -> t.test(report)) // execute each tasks in the first group found .reduce(true, (a, b) -> a && b); // 'and' all tasks' results } final CompoundTask<?> compoundTask = compoundGroups.get(op); if (compoundTask != null) { LOGGER.debug("Executing {} compound task for {}s", opName, type); result &= compoundTask.test(report); } return result; } finally { // clear all other tasks since we only want to execute the first group each time we fill the // list to ensure we re-compute based on whatever would have changed as a result of executing // the tasks for a group groups.clear(); compoundGroups.clear(); } }