Example usage for java.util.stream Stream map

List of usage examples for java.util.stream Stream map

Introduction

In this page you can find the example usage for java.util.stream Stream map.

Prototype

<R> Stream<R> map(Function<? super T, ? extends R> mapper);

Source Link

Document

Returns a stream consisting of the results of applying the given function to the elements of this stream.

Usage

From source file:org.springframework.cloud.gateway.route.builder.GatewayFilterSpec.java

public List<GatewayFilter> transformToOrderedFilters(Stream<GatewayFilter> stream) {
    return stream.map(filter -> {
        if (filter instanceof Ordered) {
            return filter;
        } else {/*from  www.  jav a  2s .  c  o m*/
            return new OrderedGatewayFilter(filter, 0);
        }
    }).collect(Collectors.toList());
}

From source file:org.talend.dataprep.api.dataset.row.DataSetRow.java

/**
 * Returns the current row as an array of Strings.
 *
 * @param filters An optional set of {@link Predicate filters} to be used to filter values. See {@link #SKIP_TDP_ID}
 * for example./*from  ww w .  j a  v  a 2 s.  com*/
 * @return The current row as array of String eventually with filtered out columns depending on filter.
 */
@SafeVarargs
public final String[] toArray(Predicate<Map.Entry<String, String>>... filters) {
    Stream<Map.Entry<String, String>> stream = stream(values.entrySet().spliterator(), false);
    // Apply filters
    for (Predicate<Map.Entry<String, String>> filter : filters) {
        stream = stream.filter(filter);
    }
    // Get as string array the selected columns
    final List<String> strings = stream.map(Map.Entry::getValue) //
            .map(String::valueOf) //
            .collect(Collectors.toList());
    return strings.toArray(new String[strings.size()]);
}

From source file:org.talend.dataprep.dataset.service.analysis.asynchronous.BackgroundAnalysis.java

/**
 * Compute the statistics for the given dataset metadata and content.
 *
 * @param analyzer the analyzer to perform.
 * @param columns the columns metadata./*from   ww  w  . j  av a2  s  . co  m*/
 * @param stream the content to compute the statistics from.
 */
private void computeStatistics(final Analyzer<Analyzers.Result> analyzer, final List<ColumnMetadata> columns,
        final Stream<DataSetRow> stream) {
    // Create a content with the expected format for the StatisticsClientJson class
    stream.map(row -> row.toArray(DataSetRow.SKIP_TDP_ID)).forEach(analyzer::analyze);
    analyzer.end();

    // Store results back in data set
    adapter.adapt(columns, analyzer.getResult());
}

From source file:org.talend.dataprep.dataset.service.analysis.synchronous.QualityAnalysis.java

/**
 * Compute the quality (count, valid, invalid and empty) of the given dataset.
 *
 * @param dataset the dataset metadata.//from  ww  w  .  j  av a 2 s.  c  om
 * @param records the dataset records
 * @param limit indicates how many records will be read from stream. Use a number < 0 to perform a full scan of
 */
public void computeQuality(DataSetMetadata dataset, Stream<DataSetRow> records, long limit) {
    // Compute valid / invalid / empty count, need data types for analyzer first
    final List<ColumnMetadata> columns = dataset.getRowMetadata().getColumns();
    if (columns.isEmpty()) {
        LOGGER.debug("Skip analysis of {} (no column information).", dataset.getId());
        return;
    }
    try (Analyzer<Analyzers.Result> analyzer = analyzerService.qualityAnalysis(columns)) {
        if (limit > 0) { // Only limit number of rows if limit > 0 (use limit to speed up sync analysis.
            LOGGER.debug("Limit analysis to the first {}.", limit);
            records = records.limit(limit);
        } else {
            LOGGER.debug("Performing full analysis.");
        }
        records.map(row -> row.toArray(DataSetRow.SKIP_TDP_ID)).forEach(analyzer::analyze);
        // Determine content size
        final List<Analyzers.Result> result = analyzer.getResult();
        adapter.adapt(columns, result);
        // Remember the number of records
        if (!result.isEmpty()) {
            final long recordCount = result.get(0).get(ValueQualityStatistics.class).getCount();
            dataset.getContent().setNbRecords((int) recordCount);
        }
    } catch (Exception e) {
        throw new TDPException(CommonErrorCodes.UNEXPECTED_EXCEPTION, e);
    }
}

From source file:org.talend.dataprep.dataset.service.DataSetService.java

/**
 * Returns the <b>full</b> data set content for given id.
 *
 * @param metadata If <code>true</code>, includes data set metadata information.
 * @param dataSetId A data set id.//from  w  ww . j  a  va  2 s  .com
 * @return The full data set.
 */
@RequestMapping(value = "/datasets/{id}/content", method = RequestMethod.GET, produces = APPLICATION_JSON_VALUE)
@ApiOperation(value = "Get a data set by id", notes = "Get a data set content based on provided id. Id should be a UUID returned by the list operation. Not valid or non existing data set id returns empty content.")
@Timed
@ResponseBody
public Callable<DataSet> get(
        @RequestParam(defaultValue = "true") @ApiParam(name = "metadata", value = "Include metadata information in the response") boolean metadata, //
        @RequestParam(defaultValue = "false") @ApiParam(name = "includeInternalContent", value = "Include internal content in the response") boolean includeInternalContent, //
        @PathVariable(value = "id") @ApiParam(name = "id", value = "Id of the requested data set") String dataSetId) {
    return () -> {
        final Marker marker = Markers.dataset(dataSetId);
        LOG.debug(marker, "Get data set #{}", dataSetId);
        try {
            DataSetMetadata dataSetMetadata = dataSetMetadataRepository.get(dataSetId);
            assertDataSetMetadata(dataSetMetadata, dataSetId);
            // Build the result
            DataSet dataSet = new DataSet();
            if (metadata) {
                dataSet.setMetadata(conversionService.convert(dataSetMetadata, UserDataSetMetadata.class));
            }
            Stream<DataSetRow> stream = contentStore.stream(dataSetMetadata, -1); // Disable line limit
            if (!includeInternalContent) {
                LOG.debug("Skip internal content when serving data set #{} content.", dataSetId);
                stream = stream.map(r -> {
                    final Map<String, Object> values = r.values();
                    final Map<String, Object> filteredValues = new HashMap<>(values);
                    values.forEach((k, v) -> {
                        if (k != null && k.startsWith(FlagNames.INTERNAL_PROPERTY_PREFIX)) { // Removes technical properties
                                                                                             // from returned values.
                            filteredValues.remove(k);
                        }
                    });
                    filteredValues.put(FlagNames.TDP_ID, r.getTdpId()); // Include TDP_ID anyway
                    return new DataSetRow(r.getRowMetadata(), filteredValues);
                });
            }
            dataSet.setRecords(stream);
            return dataSet;
        } finally {
            LOG.debug(marker, "Get done.");
        }
    };
}

From source file:org.trellisldp.io.JenaIOService.java

@Override
public void write(final Stream<Triple> triples, final OutputStream output, final RDFSyntax syntax,
        final IRI... profiles) {
    requireNonNull(triples, "The triples stream may not be null!");
    requireNonNull(output, "The output stream may not be null!");
    requireNonNull(syntax, "The RDF syntax value may not be null!");

    try {/*  w  ww  . j  a  v  a 2  s  .  c  om*/
        if (RDFA.equals(syntax)) {
            writeHTML(triples, output, profiles.length > 0 ? profiles[0].getIRIString() : null);
        } else {
            final Lang lang = rdf.asJenaLang(syntax).orElseThrow(
                    () -> new RuntimeTrellisException("Invalid content type: " + syntax.mediaType()));

            final RDFFormat format = defaultSerialization(lang);

            if (nonNull(format)) {
                LOGGER.debug("Writing stream-based RDF: {}", format);
                final StreamRDF stream = getWriterStream(output, format);
                stream.start();
                ofNullable(nsService).ifPresent(svc -> svc.getNamespaces().forEach(stream::prefix));
                triples.map(rdf::asJenaTriple).forEachOrdered(stream::triple);
                stream.finish();
            } else {
                LOGGER.debug("Writing buffered RDF: {}", lang);
                final org.apache.jena.graph.Graph graph = createDefaultGraph();
                ofNullable(nsService).map(NamespaceService::getNamespaces)
                        .ifPresent(graph.getPrefixMapping()::setNsPrefixes);
                triples.map(rdf::asJenaTriple).forEachOrdered(graph::add);
                if (JSONLD.equals(lang)) {
                    writeJsonLd(output, DatasetGraphFactory.create(graph), profiles);
                } else {
                    RDFDataMgr.write(output, graph, lang);
                }
            }
        }
    } catch (final AtlasException ex) {
        throw new RuntimeTrellisException(ex);
    }
}

From source file:org.trellisldp.rosid.file.RDFPatch.java

/**
 * Write RDF Patch statements to the specified file
 * @param file the file//from   w  ww.ja  v  a  2  s  . co  m
 * @param delete the quads to delete
 * @param add the quads to add
 * @param time the time
 * @return true if the write succeeds; false otherwise
 */
public static Boolean write(final File file, final Stream<? extends Quad> delete,
        final Stream<? extends Quad> add, final Instant time) {
    try (final BufferedWriter writer = newBufferedWriter(file.toPath(), UTF_8, CREATE, APPEND)) {
        writer.write(BEGIN + time.truncatedTo(MILLIS) + lineSeparator());
        final Iterator<String> delIter = delete.map(quadToString).iterator();
        while (delIter.hasNext()) {
            writer.write("D " + delIter.next() + lineSeparator());
        }
        final Iterator<String> addIter = add.map(quadToString).iterator();
        while (addIter.hasNext()) {
            writer.write("A " + addIter.next() + lineSeparator());
        }
        writer.write(END + time.truncatedTo(MILLIS) + lineSeparator());
    } catch (final IOException ex) {
        LOGGER.error("Error writing data to resource {}: {}", file, ex.getMessage());
        return false;
    }
    return true;
}

From source file:org.zanata.sync.jobs.plugin.zanata.util.PushPullOptionsUtil.java

/**
 * @param repoBase//w w w.j  a  va  2s.co m
 *         base path of a source repo.
 * @return absolute paths for all the project configs found under repoBase
 */
public static Set<File> findProjectConfigs(File repoBase) {
    try {
        Stream<Path> pathStream = Files.find(repoBase.toPath(), MAX_DEPTH,
                (path, basicFileAttributes) -> basicFileAttributes.isRegularFile()
                        && path.toFile().getName().equals("zanata.xml"));
        return pathStream.map(Path::toFile).collect(Collectors.toSet());
    } catch (IOException e) {
        throw new ZanataSyncException("Failed finding project config", e);
    }
}

From source file:processing.app.debug.Compiler.java

private void copyAdditionalFilesToBuildFolderSavingOriginalFolderStructure(SketchData sketch, String buildPath)
        throws RunnerException {
    Path sketchPath = Paths.get(sketch.getFolder().getAbsolutePath());
    Stream<Path> otherFilesStream;
    try {/*from  ww w.  j  a  va2  s .co  m*/
        otherFilesStream = Files.find(sketchPath, ADDITIONAL_FILES_COPY_MAX_DEPTH,
                (path, attribs) -> !attribs.isDirectory() && isPathInASubfolder(sketchPath, path)
                        && FileUtils.hasExtension(path.toFile(), SketchData.OTHER_ALLOWED_EXTENSIONS));
    } catch (IOException e) {
        throw new RunnerException(e);
    }
    otherFilesStream
            .map((path) -> new Pair<>(path, Paths.get(buildPath, sketchPath.relativize(path).toString())))
            .forEach((pair) -> {
                try {
                    Files.createDirectories(pair.value.getParent());
                    Files.copy(pair.key, pair.value, StandardCopyOption.REPLACE_EXISTING);
                } catch (IOException e) {
                    e.printStackTrace();
                    throw new RuntimeException(I18n.format(_("Problem moving {0} to the build folder"),
                            sketchPath.relativize(pair.key).toString()));
                }
            });
}

From source file:processing.app.debug.OldCompiler.java

private void copyAdditionalFilesToBuildFolderSavingOriginalFolderStructure(SketchData sketch, String buildPath)
        throws RunnerException {
    Path sketchPath = Paths.get(sketch.getFolder().getAbsolutePath());
    Stream<Path> otherFilesStream;
    try {/*from   w ww.j  a  v a  2  s. c  om*/
        otherFilesStream = Files.find(sketchPath, ADDITIONAL_FILES_COPY_MAX_DEPTH,
                (path, attribs) -> !attribs.isDirectory() && isPathInASubfolder(sketchPath, path)
                        && FileUtils.hasExtension(path.toFile(), SketchData.OTHER_ALLOWED_EXTENSIONS));
    } catch (IOException e) {
        throw new RunnerException(e);
    }
    otherFilesStream
            .map((path) -> new Pair<>(path, Paths.get(buildPath, sketchPath.relativize(path).toString())))
            .forEach((pair) -> {
                try {
                    Files.createDirectories(pair.value.getParent());
                    Files.copy(pair.key, pair.value, StandardCopyOption.REPLACE_EXISTING);
                } catch (IOException e) {
                    e.printStackTrace();
                    throw new RuntimeException(I18n.format(tr("Problem moving {0} to the build folder"),
                            sketchPath.relativize(pair.key).toString()));
                }
            });
}