Example usage for java.util.stream Collectors toCollection

List of usage examples for java.util.stream Collectors toCollection

Introduction

In this page you can find the example usage for java.util.stream Collectors toCollection.

Prototype

public static <T, C extends Collection<T>> Collector<T, ?, C> toCollection(Supplier<C> collectionFactory) 

Source Link

Document

Returns a Collector that accumulates the input elements into a new Collection , in encounter order.

Usage

From source file:org.sonar.java.bytecode.cfg.BytecodeCFGBuilderTest.java

private Multiset<String> cfgOpcodes(BytecodeCFG bytecodeCFG) {
    return bytecodeCFG.blocks.stream()
            .flatMap(block -> Stream.concat(block.instructions.stream(), Stream.of(block.terminator)))
            .filter(Objects::nonNull).map(Instruction::opcode).map(opcode -> Printer.OPCODES[opcode])
            .collect(Collectors.toCollection(HashMultiset::create));
}

From source file:org.ethereum.net.server.ChannelManagerImpl.java

/**
 * broadcastBlock Propagates a block message across active peers with exclusion of
 * the peers with an id belonging to the skip set.
 *
 * @param block new Block to be sent/*  ww  w  .  jav  a  2  s .c  om*/
 * @param skip  the set of peers to avoid sending the message.
 * @return a set containing the ids of the peers that received the block.
 */
@Nonnull
public Set<NodeID> broadcastBlock(@Nonnull final Block block, @Nullable final Set<NodeID> skip) {
    Metrics.broadcastBlock(block);

    final Set<NodeID> res = new HashSet<>();
    final BlockIdentifier bi = new BlockIdentifier(block.getHash(), block.getNumber());
    final EthMessage newBlock = new RskMessage(new BlockMessage(block));
    final EthMessage newBlockHashes = new RskMessage(new NewBlockHashesMessage(Arrays.asList(bi)));
    synchronized (activePeers) {
        // Get a randomized list with all the peers that don't have the block yet.
        activePeers.values().forEach(c -> logger.trace("RSK activePeers: {}", c));
        final Vector<Channel> peers = activePeers.values().stream()
                .filter(p -> skip == null || !skip.contains(new NodeID(p.getNodeId())))
                .collect(Collectors.toCollection(() -> new Vector<>()));
        Collections.shuffle(peers);

        int sqrt = (int) Math.floor(Math.sqrt(peers.size()));
        for (int i = 0; i < sqrt; i++) {
            Channel peer = peers.get(i);
            res.add(new NodeID(peer.getNodeId()));
            logger.trace("RSK propagate: {}", peer);
            peer.sendMessage(newBlock);
        }
        for (int i = sqrt; i < peers.size(); i++) {
            Channel peer = peers.get(i);
            logger.trace("RSK announce: {}", peer);
            peer.sendMessage(newBlockHashes);
        }
    }

    return res;
}

From source file:ru.anr.base.BaseParent.java

/**
 * Filters the given collection according to the specified predicate which
 * can be a lambda expression.//  w w w. j  a  v a2s .  c  o  m
 * 
 * @param coll
 *            An original collection
 * @param predicate
 *            A predicate (can be a lambda expression)
 * @return The filtered collection
 * 
 * @param <S>
 *            The type of collection's items
 */
public static <S> List<S> filter(Collection<S> coll, Predicate<S> predicate) {

    return coll.stream().filter(predicate).collect(Collectors.toCollection(ArrayList::new));
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.vmware.VMWareProviderVirtualMachineUtil.java

public Optional<ResourcePool> getRandomResourcePool(Folder rootFolder) {
    List<ResourcePool> resourcePools = null;
    try {/*  ww  w .j  a v  a 2 s.  c  o m*/
        resourcePools = Lists
                .newArrayList(
                        new InventoryNavigator(rootFolder).searchManagedEntities(EntityType.POOL.getValue()))
                .stream().map(resourcePool -> (ResourcePool) resourcePool)
                .collect(Collectors.toCollection(ArrayList::new));
    } catch (RemoteException e) {
        throw new RuntimeException("ERROR when retrieving VMWare resource pool", e);
    }
    return Optional.ofNullable(resourcePools.get(new Random().nextInt(resourcePools.size())));
}

From source file:org.wso2.carbon.uuf.internal.core.create.AppCreator.java

private Component createComponent(String componentName, String componentVersion, String componentContext,
        ComponentReference componentReference, ClassLoader classLoader, Lookup lookup) {
    componentReference.getLayouts(supportedExtensions).parallel()
            .map(layoutReference -> createLayout(layoutReference, componentName)).forEach(lookup::add);
    componentReference.getFragments(supportedExtensions).parallel()
            .map((fragmentReference) -> createFragment(fragmentReference, componentName, classLoader))
            .forEach(lookup::add);//from  www  . ja  v  a2s .  c o m

    Yaml yaml = new Yaml();
    try {
        Map<?, ?> bindingsConfig = componentReference.getBindingsConfig()
                .map(fileReference -> yaml.loadAs(fileReference.getContent(), Map.class))
                .orElse(Collections.emptyMap());
        addBindings(bindingsConfig, lookup, componentName);
    } catch (Exception e) {
        // Yaml.loadAs() throws an Exception
        throw new MalformedConfigurationException(
                "Bindings configuration '" + componentReference.getBindingsConfig().get().getRelativePath()
                        + "' of component '" + getSimpleName(componentName) + "' is malformed.",
                e);
    }

    try {
        Map<?, ?> rawConfigurations = componentReference.getConfigurations()
                .map(fileReference -> yaml.loadAs(fileReference.getContent(), Map.class))
                .orElse(new HashMap<>(0));
        lookup.getConfiguration().merge(rawConfigurations);
    } catch (Exception e) {
        // Yaml.loadAs() throws an Exception
        throw new MalformedConfigurationException(
                "Configuration '" + componentReference.getConfigurations().get().getRelativePath()
                        + "' of component '" + getSimpleName(componentName) + "' is malformed.",
                e);
    }

    SortedSet<Page> pages = componentReference.getPages(supportedExtensions).parallel()
            .map(pageReference -> createPage(pageReference, componentName, lookup, classLoader))
            .collect(Collectors.toCollection(TreeSet::new));
    return new Component(componentName, componentVersion, componentContext, pages);
}

From source file:org.nuxeo.ecm.core.storage.marklogic.MarkLogicQueryBuilder.java

public MarkLogicQuery buildQuery() {
    Expression expression = this.expression;
    if (principals != null) {
        // Add principals to expression
        LiteralList principalLiterals = principals.stream().map(StringLiteral::new)
                .collect(Collectors.toCollection(LiteralList::new));
        Expression principalsExpression = new Expression(new Reference(ExpressionEvaluator.NXQL_ECM_READ_ACL),
                Operator.IN, principalLiterals);
        // Build final AND expression
        expression = new Expression(expression, Operator.AND, principalsExpression);
    }//from  w  w w .  j  a v  a 2  s  .c  om
    return new MarkLogicQuery(walkExpression(expression).build());
}

From source file:edu.zipcloud.cloudstreetmarket.core.services.CommunityServiceImpl.java

@Override
public Page<UserDTO> search(Specification<User> spec, Pageable pageable) {
    Page<User> users = userRepository.findAll(spec, pageable);
    List<UserDTO> result = users.getContent().stream().map(u -> hideSensitiveFieldsIfNecessary(new UserDTO(u)))
            .collect(Collectors.toCollection(LinkedList::new));
    return new PageImpl<>(result, pageable, users.getTotalElements());
}

From source file:uk.trainwatch.nrod.timetable.tools.TimeTableChecker.java

private void findUnsupportedOperatingCharacteristics(final Path cifFile) {
    // Strict mode so we fail on an invalid record type
    final CIFParser parser = new CIFParser(true);

    CounterConsumer<String> found = new CounterConsumer<>();

    LOG.log(Level.INFO, "Scanning for unsupported OperatingCharacteristics's...");

    lines(cifFile).map(parser::parse).map(Functions.castTo(BasicSchedule.class)).filter(Objects::nonNull).
    // Filter for the unkown value
            filter(s -> s.getOperatingCharacteristics().length != 6).
            // Swap back the original line from the file
            map(parser::currentLine)./*ww  w.  j a  v a2 s  .  com*/
            // Manually extract the field
            map(l -> l.substring(60, 66)).
            // Split stream into individual characters
            flatMap(l -> Stream.of(l.split("."))).
            // Filter out " "
            filter(c -> !c.equals(" ")).
            // Filter those which map to unknown
            filter(c -> OperatingCharacteristics.lookup(c) == OperatingCharacteristics.UNKNOWN).
            // Count it for the result
            peek(found).
            // collect into a set then report findings
            collect(Collectors.toCollection(() -> new TreeSet())).
            // Write to stdout
            forEach(System.out::println);

    LOG.log(Level.INFO, () -> "Found " + found.get() + " unknown entries");
}

From source file:com.ge.predix.acs.service.policy.evaluation.PolicyEvaluationServiceImpl.java

LinkedHashSet<PolicySet> filterPolicySetsByPriority(final String subjectIdentifier, final String uri,
        final List<PolicySet> allPolicySets, final LinkedHashSet<String> policySetsEvaluationOrder)
        throws IllegalArgumentException {

    if (policySetsEvaluationOrder.isEmpty()) {
        if (allPolicySets.size() > 1) {
            LOGGER.error(String.format(
                    "Found more than one policy set during policy evaluation and "
                            + "no evaluation order is provided. subjectIdentifier='%s', resourceURI='%s'",
                    subjectIdentifier, uri));
            throw new IllegalArgumentException("More than one policy set exists for this zone. "
                    + "Please provide an ordered list of policy set names to consider for this evaluation and "
                    + "resubmit the request.");
        } else {//from   w  w w  .j a  v  a  2 s.  c o m
            return allPolicySets.stream().collect(Collectors.toCollection(LinkedHashSet::new));
        }
    }

    Map<String, PolicySet> allPolicySetsMap = allPolicySets.stream()
            .collect(Collectors.toMap(PolicySet::getName, Function.identity()));
    LinkedHashSet<PolicySet> filteredPolicySets = new LinkedHashSet<PolicySet>();
    for (String policySetId : policySetsEvaluationOrder) {
        PolicySet policySet = allPolicySetsMap.get(policySetId);
        if (policySet == null) {
            LOGGER.error("No existing policy set matches policy set in the evaluation order of the request. "
                    + "Subject: " + subjectIdentifier + ", Resource: " + uri);
            throw new IllegalArgumentException(
                    "No existing policy set matches policy set in the evaluaion order of the request. "
                            + "Please review the policy evauation order and resubmit the request.");
        } else {
            filteredPolicySets.add(policySet);
        }
    }
    return filteredPolicySets;
}

From source file:com.github.horrorho.liquiddonkey.cloud.Looter.java

void backup(HttpClient client, Core core, HttpAgent agent, Backup backup)
        throws BadDataException, IOException, InterruptedException {

    logger.info("-- backup() > udid: {}", backup.backupUDID());
    std.println("Retrieving backup: " + backup.backupUDID());

    // Available snapshots
    SnapshotIdReferences references = SnapshotIdReferences.from(backup.mbsBackup());
    logger.debug("-- backup() > requested ids: {}", config.selection().snapshots());

    // Resolve snapshots with configured selection
    Set<Integer> resolved = config.selection().snapshots().stream().map(references::applyAsInt)
            .filter(id -> id != -1).collect(Collectors.toCollection(LinkedHashSet::new));
    logger.debug("-- backup() > resolved ids: {}", resolved);

    // Fetch snapshots
    for (int id : resolved) {
        logger.info("-- backup() > snapshot: {}", id);
        snapshot(client, core, agent, backup, id);

    }//w  w  w  . j a  v a  2  s . c  o m
}