Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:org.fcrepo.apix.jena.Util.java

/**
 * Perform a sparql query against a model.
 *
 * @param sparql A sparql CONSTRUCT query
 * @param model the model//w  w  w. ja  va  2s.c o m
 * @return Stream of matching triples.
 */
public static Stream<Triple> query(final String sparql, final Model model) {
    final Iterable<Triple> i = () -> QueryExecutionFactory.create(QueryFactory.create(sparql), model)
            .execConstructTriples();
    return StreamSupport.stream(i.spliterator(), false);
}

From source file:ch.heigvd.gamification.api.BadgesEndpoint.java

@Override
@RequestMapping(method = RequestMethod.GET)
public ResponseEntity<List<BadgeDTO>> badgesGet(
        @ApiParam(value = "token that identifies the app sending the request", required = true) @RequestHeader(value = "X-Gamification-Token", required = true) String xGamificationToken) {

    AuthenKey apiKey = authenKeyRepository.findByAppKey(xGamificationToken);

    //Application app = apprepository.findByAppKey(apiKey);
    if (apiKey == null) {
        return new ResponseEntity(HttpStatus.UNAUTHORIZED);
    }/*from w w w .  java 2 s.c  om*/

    Application app = apiKey.getApp();

    if (app != null) {
        return new ResponseEntity<>(StreamSupport.stream(badgeRepository.findAllByApp(app).spliterator(), true)
                .map(p -> toDTO(p)).collect(toList()), HttpStatus.OK);
    }
    return new ResponseEntity("no content available", HttpStatus.BAD_REQUEST);

}

From source file:com.thinkbiganalytics.metadata.modeshape.user.JcrUserGroup.java

@Override
public GroupPrincipal getPrincial() {
    Set<Principal> members = StreamSupport.stream(getGroups().spliterator(), false).map(g -> g.getPrincial())
            .collect(Collectors.toSet());

    return new GroupPrincipal(getSystemName(), members);
}

From source file:com.epam.reportportal.extension.bugtracking.jira.JiraStrategy.java

@Override
public Ticket submitTicket(final PostTicketRQ ticketRQ, ExternalSystem details) {
    expect(ticketRQ.getFields(), not(isNull())).verify(UNABLE_INTERACT_WITH_EXTRERNAL_SYSTEM,
            "External System fields set is empty!");
    List<PostFormField> fields = ticketRQ.getFields();

    // TODO add validation of any field with allowedValues() array
    // Additional validation required for unsupported
    // ticket type and/or components in JIRA.
    PostFormField issuetype = new PostFormField();
    PostFormField components = new PostFormField();
    for (PostFormField object : fields) {
        if ("issuetype".equalsIgnoreCase(object.getId()))
            issuetype = object;//from   w w  w . ja  v a 2  s  .  c  o  m
        if ("components".equalsIgnoreCase(object.getId()))
            components = object;
    }

    expect(issuetype.getValue().size(), equalTo(1)).verify(UNABLE_INTERACT_WITH_EXTRERNAL_SYSTEM,
            formattedSupplier("[IssueType] field has multiple values '{}' but should be only one",
                    issuetype.getValue()));
    final String issueTypeStr = issuetype.getValue().get(0);
    expect(JIRATicketType.findByName(issueTypeStr), not(isNull())).verify(UNABLE_INTERACT_WITH_EXTRERNAL_SYSTEM,
            formattedSupplier("Ticket with [IssueType] '{}' cannot be send to external system", issueTypeStr));

    try (JiraRestClient client = getClient(details.getUrl(), ticketRQ.getUsername(), ticketRQ.getPassword())) {
        Project jiraProject = getProject(client, details);

        if (null != components.getValue()) {
            Set<String> validComponents = StreamSupport.stream(jiraProject.getComponents().spliterator(), false)
                    .map(JiraPredicates.COMPONENT_NAMES).collect(toSet());
            validComponents.forEach(component -> expect(component, in(validComponents)).verify(
                    UNABLE_INTERACT_WITH_EXTRERNAL_SYSTEM,
                    formattedSupplier("Component '{}' not exists in the external system", component)));
        }

        // TODO consider to modify code below - project cached
        Optional<IssueType> issueType = StreamSupport.stream(jiraProject.getIssueTypes().spliterator(), false)
                .filter(input -> issueTypeStr.equalsIgnoreCase(input.getName())).findFirst();

        expect(issueType, Preconditions.IS_PRESENT).verify(UNABLE_INTERACT_WITH_EXTRERNAL_SYSTEM,
                formattedSupplier("Unable post issue with type '{}' for project '{}'.",
                        issuetype.getValue().get(0), details.getProject()));
        IssueInput issueInput = JIRATicketUtils.toIssueInput(client, jiraProject, issueType, ticketRQ,
                ticketRQ.getBackLinks().keySet(), descriptionService);

        Map<String, String> binaryData = findBinaryData(issueInput);

        /*
         * Claim because we wanna be sure everything is OK
         */
        BasicIssue createdIssue = client.getIssueClient().createIssue(issueInput).claim();

        // post binary data
        Issue issue = client.getIssueClient().getIssue(createdIssue.getKey()).claim();

        AttachmentInput[] attachmentInputs = new AttachmentInput[binaryData.size()];
        int counter = 0;
        for (Map.Entry<String, String> binaryDataEntry : binaryData.entrySet()) {
            BinaryData data = dataStorage.fetchData(binaryDataEntry.getKey());
            if (null != data) {
                attachmentInputs[counter] = new AttachmentInput(binaryDataEntry.getValue(),
                        data.getInputStream());
                counter++;
            }
        }
        if (counter != 0)
            client.getIssueClient().addAttachments(issue.getAttachmentsUri(),
                    Arrays.copyOf(attachmentInputs, counter));
        return getTicket(createdIssue.getKey(), details, client).orElse(null);

    } catch (Exception e) {
        LOGGER.error(e.getMessage(), e);
        throw new ReportPortalException(e.getMessage(), e);
    }
}

From source file:org.nuxeo.ecm.automation.DetachTemplateFromAllDocuments.java

protected List<String> getDocumentsWithTemplate(CoreSession session, String query) {
    List<String> ids = new ArrayList<String>();
    IterableQueryResult it = null;//  ww w  .  ja v a2s .c  o  m
    try {
        it = session.queryAndFetch(query, NXQL);
        Spliterator<Map<String, Serializable>> spliterator = Spliterators.spliteratorUnknownSize(it.iterator(),
                Spliterator.NONNULL);
        ids = StreamSupport.stream(spliterator, false).map(map -> (String) map.get(ECM_UUID))
                .collect(Collectors.toList());
    } finally {
        if (it != null) {
            it.close();
        }
    }
    return ids;
}

From source file:alluxio.server.ft.MultiWorkerIntegrationTest.java

private void createFileOnWorker(int total, AlluxioURI filePath, WorkerNetAddress address) throws IOException {
    FileSystemTestUtils.createByteFile(mResource.get().getClient(), filePath,
            CreateFileOptions.defaults().setWriteType(WriteType.MUST_CACHE)
                    .setLocationPolicy((workerInfoList, blockSizeBytes) -> StreamSupport
                            .stream(workerInfoList.spliterator(), false)
                            .filter(x -> x.getNetAddress().equals(address)).findFirst().get().getNetAddress()),
            total);//from ww  w  .  ja  v  a 2  s .  c  o  m
}

From source file:jp.classmethod.aws.brian.BrianClient.java

@Override
public List<String> listTriggerGroups() throws BrianClientException, BrianServerException {
    logger.debug("list trigger groups: {}");
    HttpResponse httpResponse = null;//from w w  w. j  av  a 2 s . co  m
    try {
        URI uri = new URI(scheme, null, hostname, port, "/triggers", null, null);
        HttpUriRequest httpRequest = RequestBuilder.get().setUri(uri).build();
        httpResponse = httpClientExecute(httpRequest);
        int statusCode = httpResponse.getStatusLine().getStatusCode();
        logger.debug("statusCode: {}", statusCode);
        if (statusCode == HttpStatus.SC_OK) {
            JsonNode tree = mapper.readTree(httpResponse.getEntity().getContent());
            return StreamSupport.stream(tree.spliterator(), false).map(item -> item.textValue())
                    .collect(Collectors.toList());
        } else if (statusCode >= 500) {
            throw new BrianServerException("status = " + statusCode);
        } else if (statusCode >= 400) {
            throw new BrianClientException("status = " + statusCode);
        } else {
            throw new Error("status = " + statusCode);
        }
    } catch (URISyntaxException e) {
        throw new IllegalStateException(e);
    } catch (IOException e) {
        throw new BrianServerException(e);
    } catch (IllegalStateException e) {
        throw new Error(e);
    } finally {
        if (httpResponse != null) {
            EntityUtils.consumeQuietly(httpResponse.getEntity());
        }
    }
}

From source file:com.epam.ta.reportportal.database.dao.TestItemRepositoryCustomImpl.java

private Collection<String> obtainIds(Iterable<Launch> launches) {
    return StreamSupport.stream(launches.spliterator(), false).map(Launch::getId).collect(toList());
}

From source file:com.yevster.spdxtra.Read.java

private static Stream<Relationship> getRelationshipsWithSparql(Dataset dataset, String sparql) {
    try (DatasetAutoAbortTransaction transaction = DatasetAutoAbortTransaction.begin(dataset, ReadWrite.READ)) {
        QueryExecution qe = QueryExecutionFactory.create(sparql, dataset);
        ResultSet results = qe.execSelect();
        Stream<QuerySolution> solutionStream = StreamSupport.stream(
                Spliterators.spliteratorUnknownSize(results, Spliterator.ORDERED | Spliterator.NONNULL), false);

        return solutionStream.map((QuerySolution qs) -> {
            RDFNode relationshipNode = qs.get("o");
            assert (relationshipNode.isResource());
            return new Relationship(relationshipNode.asResource());
        });/*from   w  ww.j  a va 2s . co m*/

    }
}

From source file:com.uber.hoodie.io.compact.HoodieRealtimeTableCompactor.java

private List<WriteStatus> compact(HoodieCopyOnWriteTable hoodieCopyOnWriteTable,
        HoodieTableMetaClient metaClient, HoodieWriteConfig config, CompactionOperation operation,
        String commitTime) throws IOException {
    FileSystem fs = metaClient.getFs();
    Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()));

    log.info("Compacting base " + operation.getDataFilePath() + " with delta files "
            + operation.getDeltaFilePaths() + " for commit " + commitTime);
    // TODO - FIX THIS
    // Reads the entire avro file. Always only specific blocks should be read from the avro file
    // (failure recover).
    // Load all the delta commits since the last compaction commit and get all the blocks to be
    // loaded and load it using CompositeAvroLogReader
    // Since a DeltaCommit is not defined yet, reading all the records. revisit this soon.
    String maxInstantTime = metaClient.getActiveTimeline()
            .getTimelineOfActions(Sets.newHashSet(HoodieTimeline.COMMIT_ACTION, HoodieTimeline.ROLLBACK_ACTION,
                    HoodieTimeline.DELTA_COMMIT_ACTION))

            .filterCompletedInstants().lastInstant().get().getTimestamp();
    log.info("MaxMemoryPerCompaction => " + config.getMaxMemoryPerCompaction());
    HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, metaClient.getBasePath(),
            operation.getDeltaFilePaths(), readerSchema, maxInstantTime, config.getMaxMemoryPerCompaction(),
            config.getCompactionLazyBlockReadEnabled(), config.getCompactionReverseLogReadEnabled(),
            config.getMaxDFSStreamBufferSize(), config.getSpillableMapBasePath());
    if (!scanner.iterator().hasNext()) {
        return Lists.<WriteStatus>newArrayList();
    }/*from w ww  . j  a  v a  2s .c o  m*/

    Optional<HoodieDataFile> oldDataFileOpt = hoodieCopyOnWriteTable.getROFileSystemView()
            .getLatestDataFilesOn(operation.getPartitionPath(), operation.getBaseInstantTime())
            .filter(df -> df.getFileId().equals(operation.getFileId())).findFirst();

    // Compacting is very similar to applying updates to existing file
    Iterator<List<WriteStatus>> result;
    // If the dataFile is present, there is a base parquet file present, perform updates else perform inserts into a
    // new base parquet file.
    if (operation.getDataFilePath().isPresent()) {
        result = hoodieCopyOnWriteTable.handleUpdate(commitTime, operation.getFileId(), scanner.getRecords(),
                oldDataFileOpt);
    } else {
        result = hoodieCopyOnWriteTable.handleInsert(commitTime, operation.getPartitionPath(),
                operation.getFileId(), scanner.iterator());
    }
    Iterable<List<WriteStatus>> resultIterable = () -> result;
    return StreamSupport.stream(resultIterable.spliterator(), false).flatMap(Collection::stream).map(s -> {
        s.getStat().setTotalUpdatedRecordsCompacted(scanner.getNumMergedRecordsInLog());
        s.getStat().setTotalLogFilesCompacted(scanner.getTotalLogFiles());
        s.getStat().setTotalLogRecords(scanner.getTotalLogRecords());
        s.getStat().setPartitionPath(operation.getPartitionPath());
        s.getStat().setTotalLogSizeCompacted(
                operation.getMetrics().get(CompactionStrategy.TOTAL_LOG_FILE_SIZE).longValue());
        s.getStat().setTotalLogBlocks(scanner.getTotalLogBlocks());
        s.getStat().setTotalCorruptLogBlock(scanner.getTotalCorruptBlocks());
        s.getStat().setTotalRollbackBlocks(scanner.getTotalRollbacks());
        RuntimeStats runtimeStats = new RuntimeStats();
        runtimeStats.setTotalScanTime(scanner.getTotalTimeTakenToReadAndMergeBlocks());
        s.getStat().setRuntimeStats(runtimeStats);
        return s;
    }).collect(toList());
}