Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:org.graylog.plugins.nats.output.AbstractGelfNatsOutput.java

private static String[] splitByNewline(String text) {
    return StreamSupport.stream(Arrays.spliterator(text.split("\n")), false).map(String::trim)
            .filter(s -> !s.isEmpty()).toArray(String[]::new);
}

From source file:org.xsystem.sql2.dml.DmlCommand.java

public Stream<Map<String, Object>> stream(Connection con, String stmt, List<DmlParams> paramsSpec, Long skip,
        Integer total, Map<String, Object> value) {
    ResultSetIterator rsItr = new ResultSetIterator(con, stmt, paramsSpec, skip, total, value);
    return StreamSupport.stream(Spliterators.spliteratorUnknownSize(rsItr, 0), false);
}

From source file:TypeServiceTest.java

@Test
public void testCreateBackup() {
    typeService.save("type-test.xml", storedType());
    simulateWait();/* w ww  .  ja  va  2  s .  c  om*/
    typeService.save("type-test.xml", storedType());

    try (DirectoryStream<Path> s = Files.newDirectoryStream(fileSystemProvider.getFileSystem().getPath("/"))) {
        Iterable<Path> iterable = () -> s.iterator();

        Assert.assertEquals(2L, StreamSupport.stream(iterable.spliterator(), false)
                .filter(p -> p.getFileName().toString().startsWith("type-test")).count());
    } catch (IOException ex) {
        log.fatal(ex);
    }
}

From source file:org.apache.zeppelin.interpreter.launcher.SparkInterpreterLauncher.java

@Override
protected Map<String, String> buildEnvFromProperties(InterpreterLaunchContext context) {
    Map<String, String> env = super.buildEnvFromProperties(context);
    Properties sparkProperties = new Properties();
    String sparkMaster = getSparkMaster(properties);
    for (String key : properties.stringPropertyNames()) {
        if (RemoteInterpreterUtils.isEnvString(key)) {
            env.put(key, properties.getProperty(key));
        }/*from  w  w w  .j a v  a  2s .  c o  m*/
        if (isSparkConf(key, properties.getProperty(key))) {
            sparkProperties.setProperty(key, toShellFormat(properties.getProperty(key)));
        }
    }

    setupPropertiesForPySpark(sparkProperties);
    setupPropertiesForSparkR(sparkProperties);
    if (isYarnMode() && getDeployMode().equals("cluster")) {
        env.put("ZEPPELIN_SPARK_YARN_CLUSTER", "true");
        sparkProperties.setProperty("spark.yarn.submit.waitAppCompletion", "false");
    }

    StringBuilder sparkConfBuilder = new StringBuilder();
    if (sparkMaster != null) {
        sparkConfBuilder.append(" --master " + sparkMaster);
    }
    if (isYarnMode() && getDeployMode().equals("cluster")) {
        if (sparkProperties.containsKey("spark.files")) {
            sparkProperties.put("spark.files", sparkProperties.getProperty("spark.files") + ","
                    + zConf.getConfDir() + "/log4j_yarn_cluster.properties");
        } else {
            sparkProperties.put("spark.files", zConf.getConfDir() + "/log4j_yarn_cluster.properties");
        }
    }
    for (String name : sparkProperties.stringPropertyNames()) {
        sparkConfBuilder.append(" --conf " + name + "=" + sparkProperties.getProperty(name));
    }
    String useProxyUserEnv = System.getenv("ZEPPELIN_IMPERSONATE_SPARK_PROXY_USER");
    if (context.getOption().isUserImpersonate()
            && (StringUtils.isBlank(useProxyUserEnv) || !useProxyUserEnv.equals("false"))) {
        sparkConfBuilder.append(" --proxy-user " + context.getUserName());
    }
    Path localRepoPath = Paths.get(zConf.getInterpreterLocalRepoPath(), context.getInterpreterSettingId());
    if (isYarnMode() && getDeployMode().equals("cluster") && Files.exists(localRepoPath)
            && Files.isDirectory(localRepoPath)) {
        try {
            StreamSupport
                    .stream(Files.newDirectoryStream(localRepoPath, entry -> Files.isRegularFile(entry))
                            .spliterator(), false)
                    .map(jar -> jar.toAbsolutePath().toString()).reduce((x, y) -> x.concat(",").concat(y))
                    .ifPresent(extraJars -> sparkConfBuilder.append(" --jars ").append(extraJars));
        } catch (IOException e) {
            LOGGER.error("Cannot make a list of additional jars from localRepo: {}", localRepoPath, e);
        }

    }

    env.put("ZEPPELIN_SPARK_CONF", sparkConfBuilder.toString());

    // set these env in the order of
    // 1. interpreter-setting
    // 2. zeppelin-env.sh
    // It is encouraged to set env in interpreter setting, but just for backward compatability,
    // we also fallback to zeppelin-env.sh if it is not specified in interpreter setting.
    for (String envName : new String[] { "SPARK_HOME", "SPARK_CONF_DIR", "HADOOP_CONF_DIR" }) {
        String envValue = getEnv(envName);
        if (envValue != null) {
            env.put(envName, envValue);
        }
    }

    String keytab = zConf.getString(ZeppelinConfiguration.ConfVars.ZEPPELIN_SERVER_KERBEROS_KEYTAB);
    String principal = zConf.getString(ZeppelinConfiguration.ConfVars.ZEPPELIN_SERVER_KERBEROS_PRINCIPAL);

    if (!StringUtils.isBlank(keytab) && !StringUtils.isBlank(principal)) {
        env.put("ZEPPELIN_SERVER_KERBEROS_KEYTAB", keytab);
        env.put("ZEPPELIN_SERVER_KERBEROS_PRINCIPAL", principal);
        LOGGER.info("Run Spark under secure mode with keytab: " + keytab + ", principal: " + principal);
    } else {
        LOGGER.info("Run Spark under non-secure mode as no keytab and principal is specified");
    }
    LOGGER.debug("buildEnvFromProperties: " + env);
    return env;

}

From source file:io.mashin.rich.spark.TestJavaRichRDD.java

@Test
public void testHttpRDD() {
    String serverIP = HttpMockConfig.serverIP();
    int serverPort = HttpMockConfig.serverPort();

    JavaSparkContext sc = sc("testHttpRDD");
    HttpMock mock = new HttpMock();
    mock.start();//from   w  w  w  .  ja v a  2 s. co m

    int numPages = 4;

    JavaRDD<String> rdd = JavaRichRDD
            .httpRDD(sc, i -> new HttpGet("http://" + serverIP + ":" + serverPort + "/rdd?page=" + (i + 1)),
                    (i, httpResponse) -> {
                        BufferedReader is = new BufferedReader(
                                new InputStreamReader(httpResponse.getEntity().getContent()));
                        String s = is.readLine();
                        is.close();
                        return Arrays.asList(s.split(",")).iterator();
                    }, numPages)
            .cache();

    assertEquals(numPages, rdd.getNumPartitions());
    assertEquals(numPages * HttpMockConfig.perPage(), rdd.count());

    boolean isValid = rdd.mapPartitionsWithIndex((i, iter) -> {
        List<String> list = StreamSupport
                .stream(Spliterators.spliteratorUnknownSize(iter, Spliterator.ORDERED), false)
                .collect(Collectors.toList());
        return IntStream.range(0, list.size()).mapToObj(j -> HttpMockConfig.isValidElement(list.get(j), i, j))
                .iterator();
    }, true).reduce(Boolean::logicalAnd);

    assertTrue(isValid);

    sc.stop();
    mock.stop();
}

From source file:org.mskcc.shenkers.data.interval.GIntervalTree.java

public Stream<IntervalFeature> stream() {
    boolean parallel = false;
    int characteristics = 0;
    return intervals.entrySet().stream().flatMap(e -> {
        String chr = e.getKey().chr;
        Strand strand = e.getKey().strand;
        return StreamSupport
                .stream(Spliterators.spliteratorUnknownSize((Iterator<Node>) e.getValue().iterator(),
                        characteristics), parallel)
                .map(n -> new IntervalFeatureImpl(chr, n.getStart(), n.getEnd(), strand, null));
    });//from w w  w.j  a va 2s.  c  o m
}

From source file:com.yahoo.elide.graphql.GraphQLEndpoint.java

/**
 * Create handler.// w  w  w.  ja  va  2 s  .  c om
 *
 * @param securityContext security context
 * @param graphQLDocument post data as jsonapi document
 * @return response
 */
@POST
@Consumes(MediaType.APPLICATION_JSON)
public Response post(@Context SecurityContext securityContext, String graphQLDocument) {
    ObjectMapper mapper = elide.getMapper().getObjectMapper();

    JsonNode topLevel;

    try {
        topLevel = mapper.readTree(graphQLDocument);
    } catch (IOException e) {
        log.debug("Invalid json body provided to GraphQL", e);
        // NOTE: Can't get at isVerbose setting here for hardcoding to false. If necessary, we can refactor
        // so this can be set appropriately.
        return buildErrorResponse(new InvalidEntityBodyException(graphQLDocument), false);
    }

    Function<JsonNode, Response> executeRequest = (node) -> executeGraphQLRequest(mapper, securityContext,
            graphQLDocument, node);

    if (topLevel.isArray()) {
        Iterator<JsonNode> nodeIterator = topLevel.iterator();
        Iterable<JsonNode> nodeIterable = () -> nodeIterator;
        // NOTE: Create a non-parallel stream
        // It's unclear whether or not the expectations of the caller would be that requests are intended
        // to run serially even outside of a single transaction. We should revisit this.
        Stream<JsonNode> nodeStream = StreamSupport.stream(nodeIterable.spliterator(), false);
        ArrayNode result = nodeStream.map(executeRequest).map(response -> {
            try {
                return mapper.readTree((String) response.getEntity());
            } catch (IOException e) {
                log.debug("Caught an IO exception while trying to read response body");
                return JsonNodeFactory.instance.objectNode();
            }
        }).reduce(JsonNodeFactory.instance.arrayNode(), (arrayNode, node) -> arrayNode.add(node),
                (left, right) -> left.addAll(right));
        try {
            return Response.ok(mapper.writeValueAsString(result)).build();
        } catch (IOException e) {
            log.error("An unexpected error occurred trying to serialize array response.", e);
            return Response.serverError().build();
        }
    }

    return executeRequest.apply(topLevel);
}

From source file:com.crossover.trial.weather.endpoint.RestWeatherCollectorEndpoint.java

@Transactional(readOnly = true)
@RequestMapping(path = "/collect/airports", method = GET, produces = "application/json")
@ApiResponses(@ApiResponse(code = 200, message = "OK"))
public List<Airport> listAirportIataCodes() {
    return StreamSupport.stream(airportRepository.findAll().spliterator(), false).collect(Collectors.toList());
}

From source file:org.opensingular.lib.wicket.util.lambda.ILambdasMixin.java

default <T> IFunction<T, Stream<T>> recursiveIterable(IFunction<T, Iterable<T>> childrenFunction) {
    IFunction<Iterable<T>, Stream<T>> toStream = c -> (c == null) ? Stream.empty()
            : (c instanceof Collection) ? ((Collection<T>) c).stream()
                    : StreamSupport.stream(c.spliterator(), true);
    return t -> Stream.concat(Stream.of(t), (t == null) ? Stream.empty()
            : toStream.apply(childrenFunction.apply(t)).flatMap(recursiveIterable(childrenFunction)));
}

From source file:org.apache.james.mailbox.elasticsearch.search.ElasticSearchSearcher.java

private Stream<Pair<MailboxId, Long>> transformResponseToUidStream(SearchResponse searchResponse) {
    return StreamSupport.stream(searchResponse.getHits().spliterator(), false).map(this::extractContentFromHit)
            .filter(Optional::isPresent).map(Optional::get);
}