Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:org.hawkular.metrics.dropwizard.HawkularReporterTest.java

@Test
public void shouldReportIntegerGauges() {
    HawkularReporter reporter = HawkularReporter.builder(registry, "unit-test").useHttpClient(uri -> client)
            .build();/*from w w w.j  a v a 2  s .c o  m*/

    final Gauge<Integer> gauge = () -> 1;
    registry.register("gauge.integer", gauge);
    reporter.report();

    assertThat(client.getMetricsRestCalls()).hasSize(1);
    JSONObject metrics = new JSONObject(client.getMetricsRestCalls().get(0));
    assertThat(metrics.keySet()).containsOnly("gauges");

    JSONArray gaugesJson = metrics.getJSONArray("gauges");
    Map<String, Double> values = StreamSupport.stream(gaugesJson.spliterator(), false)
            .collect(toMap(idFromRoot::extract, dValueFromRoot::extract));
    assertThat(values).containsOnly(entry("gauge.integer", 1d));
}

From source file:net.staticsnow.nexus.repository.apt.internal.hosted.AptHostedFacet.java

private List<Asset> selectOldPackagesToRemove(String packageName, String arch)
        throws IOException, PGPException {
    if (config.assetHistoryLimit == null) {
        return Collections.emptyList();
    }/*from  w  ww. java  2  s  .co  m*/
    int count = config.assetHistoryLimit;
    StorageTx tx = UnitOfWork.currentTx();
    Map<String, Object> sqlParams = new HashMap<>();
    sqlParams.put(P_PACKAGE_NAME, packageName);
    sqlParams.put(P_ARCHITECTURE, arch);
    sqlParams.put(P_ASSET_KIND, "DEB");
    Iterable<Asset> assets = tx.findAssets(ASSETS_BY_PACKAGE_AND_ARCH, sqlParams,
            Collections.singleton(getRepository()), "");
    List<Asset> removals = new ArrayList<>();
    Map<String, List<Asset>> assetsByArch = StreamSupport.stream(assets.spliterator(), false)
            .collect(Collectors.groupingBy(a -> a.formatAttributes().get(P_ARCHITECTURE, String.class)));
    for (Map.Entry<String, List<Asset>> entry : assetsByArch.entrySet()) {
        if (entry.getValue().size() <= count) {
            continue;
        }

        int trimCount = entry.getValue().size() - count;
        Set<String> keepVersions = entry.getValue().stream()
                .map(a -> new Version(a.formatAttributes().get(P_PACKAGE_VERSION, String.class))).sorted()
                .skip(trimCount).map(v -> v.toString()).collect(Collectors.toSet());

        entry.getValue().stream()
                .filter(a -> !keepVersions.contains(a.formatAttributes().get(P_PACKAGE_VERSION, String.class)))
                .forEach((item) -> removals.add(item));
    }

    return removals;
}

From source file:fr.landel.utils.assertor.utils.AssertorIterable.java

private static <I extends Iterable<T>, T> boolean has(final I iterable, final Predicate<T> predicate,
        final boolean all, final EnumAnalysisMode analysisMode) {
    if (EnumAnalysisMode.STANDARD.equals(analysisMode)) {
        if (all) {
            for (final T object : iterable) {
                if (!predicate.test(object)) {
                    return false;
                }/*from  w ww.  j  av a2 s. c o  m*/
            }
            return true;
        } else {
            for (final T object : iterable) {
                if (predicate.test(object)) {
                    return true;
                }
            }
            return false;
        }
    } else {
        final Stream<T> stream = StreamSupport.stream(iterable.spliterator(),
                EnumAnalysisMode.PARALLEL.equals(analysisMode));
        if (all) {
            return stream.allMatch(predicate);
        } else {
            return stream.anyMatch(predicate);
        }
    }
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchIndexUtils.java

/** Get a set of field mappings from the "properties" section of a mapping
 * @param index/*ww w  . j a  va2  s  .co  m*/
 * @return
 */
protected static LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> getProperties(
        final JsonNode index) {
    return Optional.ofNullable(index.get("properties")).filter(p -> !p.isNull()).map(p -> {
        if (!p.isObject())
            throw new RuntimeException("properties must be object");
        return p;
    }).map(p -> {
        return StreamSupport.stream(Spliterators.spliteratorUnknownSize(p.fields(), Spliterator.ORDERED), false)
                .map(kv -> {
                    if (!kv.getValue().has("type") && !kv.getValue().has("properties"))
                        throw new RuntimeException(SearchIndexErrorUtils
                                .get("field {0} must have a 'type' or 'properties' sub-field", kv.getKey()));
                    return kv;
                }).collect(
                        Collectors.<Map.Entry<String, JsonNode>, Either<String, Tuple2<String, String>>, JsonNode, LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>>toMap(
                                kv -> Either.<String, Tuple2<String, String>>left(kv.getKey()),
                                kv -> kv.getValue(), (v1, v2) -> v1, // (should never happen)
                                () -> new LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>()));
    }).orElse(new LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>());
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_Buckets.java

/** Gets a list of keys,modified from v1 and a list matching keys,modified from V2 (ie _id minus ';')
 * @param bucket_mgmt/*w  w w  . j  av  a 2s . co  m*/
 * @param source_db
 * @return tuple of id-vs-(date-or-null-if-not-approved) for v1, id-vs-date for v2
 */
protected static CompletableFuture<Tuple2<Map<String, String>, Map<String, Date>>> compareSourcesToBuckets_get(
        final IManagementCrudService<DataBucketBean> bucket_mgmt, final ICrudService<JsonNode> source_db) {
    // (could make this more efficient by having a regular "did something happen" query with a slower "get everything and resync)
    // (don't forget to add "modified" to the compund index though)
    CompletableFuture<Cursor<JsonNode>> f_v1_sources = source_db.getObjectsBySpec(
            CrudUtils.allOf().when("extractType", "V2DataBucket"),
            Arrays.asList("key", "modified", "isApproved"), true);

    return f_v1_sources.<Map<String, String>>thenApply(v1_sources -> {
        return StreamSupport.stream(v1_sources.spliterator(), false).collect(Collectors.toMap(
                j -> safeJsonGet("key", j).asText(),
                j -> safeJsonGet("isApproved", j).asBoolean() ? safeJsonGet("modified", j).asText() : ""));
    }).<Tuple2<Map<String, String>, Map<String, Date>>>thenCompose(v1_key_datestr_map -> {
        final SingleQueryComponent<DataBucketBean> bucket_query = CrudUtils.allOf(DataBucketBean.class)
                .rangeIn(DataBucketBean::_id, "aleph...bucket.", true, "aleph...bucket/", true);

        return bucket_mgmt.getObjectsBySpec(bucket_query, Arrays.asList(JsonUtils._ID, "modified"), true)
                .<Tuple2<Map<String, String>, Map<String, Date>>>thenApply(c -> {
                    final Map<String, Date> v2_key_date_map = StreamSupport.stream(c.spliterator(), false)
                            .collect(Collectors.toMap(b -> getV1SourceKeyFromBucketId(b._id()), // (convert to v1 source key format)
                                    b -> b.modified()));

                    return Tuples._2T(v1_key_datestr_map, v2_key_date_map);
                });
    });
}

From source file:org.hawkular.metrics.dropwizard.HawkularReporterTest.java

@Test
public void shouldReportLongGauges() {
    HawkularReporter reporter = HawkularReporter.builder(registry, "unit-test").useHttpClient(uri -> client)
            .build();//from   ww w  .  j a  v a2  s  .  c om

    final Gauge<Long> gauge = () -> 101L;
    registry.register("gauge.long", gauge);
    reporter.report();

    assertThat(client.getMetricsRestCalls()).hasSize(1);
    JSONObject metrics = new JSONObject(client.getMetricsRestCalls().get(0));
    assertThat(metrics.keySet()).containsOnly("gauges");

    JSONArray gaugesJson = metrics.getJSONArray("gauges");
    Map<String, Double> values = StreamSupport.stream(gaugesJson.spliterator(), false)
            .collect(toMap(idFromRoot::extract, dValueFromRoot::extract));
    assertThat(values).containsOnly(entry("gauge.long", 101d));
}

From source file:ubicrypt.core.Utils.java

public static <T> Stream<T> toStream(final Iterator<T> iterator) {
    return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false);
}

From source file:de.tudarmstadt.lt.seg.app.Segmenter.java

public static void split_and_tokenize(Reader reader, String docid, ISentenceSplitter sentenceSplitter,
        ITokenizer tokenizer, int level_filter, int level_normalize, boolean merge_types, boolean merge_tokens,
        String separator_sentence, String separator_token, String separator_desc, PrintWriter writer) {
    try {/* w  w w .  ja  va 2s  .  c om*/
        final StringBuffer buf = new StringBuffer(); // used for checking of stream is empty; take care when not running sequentially but in parallel!
        sentenceSplitter.init(reader).stream().sequential().forEach(sentence_segment -> {
            if (DEBUG) {
                writer.format("%s%s", docid, separator_desc);
                writer.println(sentence_segment.toString());
                writer.print(separator_sentence);
            }
            if (sentence_segment.type != SegmentType.SENTENCE)
                return;
            tokenizer.init(sentence_segment.asString());
            Stream<String> tokens = null;
            if (DEBUG)
                tokens = tokenizer.stream().map(x -> x.toString() + separator_token);
            else
                tokens = StreamSupport.stream(tokenizer
                        .filteredAndNormalizedTokens(level_filter, level_normalize, merge_types, merge_tokens)
                        .spliterator(), false).map(x -> x + separator_token);
            Spliterator<String> spliterator = tokens.spliterator();
            tokens = StreamSupport.stream(spliterator, false);
            buf.setLength(0);
            boolean empty = !spliterator.tryAdvance(x -> {
                buf.append(x);
            });
            if (empty)
                return;
            synchronized (writer) {
                // writer.write(Thread.currentThread().getId() + "\t");
                writer.format("%s%s", docid, separator_desc);
                writer.print(buf);
                tokens.forEach(writer::print);
                writer.print(separator_sentence);
                writer.flush();
            }
        });
    } catch (Exception e) {
        Throwable t = e;
        while (t != null) {
            System.err.format("%s: %s%n", e.getClass(), e.getMessage());
            t = e.getCause();
        }
    }
}

From source file:com.simiacryptus.util.Util.java

/**
 * To iterator stream./*from w  ww.  jav  a2s  . com*/
 *
 * @param <T>      the type parameter
 * @param iterator the iterator
 * @return the stream
 */
public static <T> Stream<T> toIterator(@javax.annotation.Nonnull final Iterator<T> iterator) {
    return StreamSupport.stream(Spliterators.spliterator(iterator, 1, Spliterator.ORDERED), false);
}

From source file:org.onosproject.p4runtime.ctl.P4RuntimeClientImpl.java

private Collection<PiTableEntry> doDumpTable(PiTableId piTableId, PiPipeconf pipeconf) {

    log.debug("Dumping table {} from {} (pipeconf {})...", piTableId, deviceId, pipeconf.id());

    P4InfoBrowser browser = PipeconfHelper.getP4InfoBrowser(pipeconf);
    int tableId;//w ww .  j  a va 2 s  .c om
    try {
        tableId = browser.tables().getByName(piTableId.id()).getPreamble().getId();
    } catch (P4InfoBrowser.NotFoundException e) {
        log.warn("Unable to dump table: {}", e.getMessage());
        return Collections.emptyList();
    }

    ReadRequest requestMsg = ReadRequest.newBuilder().setDeviceId(p4DeviceId).addEntities(
            Entity.newBuilder().setTableEntry(TableEntry.newBuilder().setTableId(tableId).build()).build())
            .build();

    Iterator<ReadResponse> responses;
    try {
        responses = blockingStub.read(requestMsg);
    } catch (StatusRuntimeException e) {
        log.warn("Unable to dump table {} from {}: {}", piTableId, deviceId, e.getMessage());
        return Collections.emptyList();
    }

    Iterable<ReadResponse> responseIterable = () -> responses;
    List<TableEntry> tableEntryMsgs = StreamSupport.stream(responseIterable.spliterator(), false)
            .map(ReadResponse::getEntitiesList).flatMap(List::stream)
            .filter(entity -> entity.getEntityCase() == TABLE_ENTRY).map(Entity::getTableEntry)
            .collect(Collectors.toList());

    log.debug("Retrieved {} entries from table {} on {}...", tableEntryMsgs.size(), piTableId, deviceId);

    return TableEntryEncoder.decode(tableEntryMsgs, pipeconf);
}