Example usage for java.util.stream Collectors partitioningBy

List of usage examples for java.util.stream Collectors partitioningBy

Introduction

In this page you can find the example usage for java.util.stream Collectors partitioningBy.

Prototype

public static <T> Collector<T, ?, Map<Boolean, List<T>>> partitioningBy(Predicate<? super T> predicate) 

Source Link

Document

Returns a Collector which partitions the input elements according to a Predicate , and organizes them into a Map> .

Usage

From source file:Main.java

public static void main(String... args) {
    Map<Boolean, List<Food>> o = Food.menu.stream().collect(Collectors.partitioningBy(Food::isVegetarian));

    System.out.println(o);/*from w w  w  . ja v  a2  s  .c om*/

}

From source file:Main.java

public static void main(String[] args) {
    Map<Boolean, List<Employee>> partionedByMaleGender = Employee.persons().stream()
            .collect(Collectors.partitioningBy(Employee::isMale));
    System.out.println(partionedByMaleGender);
}

From source file:com.dclab.preparation.ReadTest.java

public void dig(File folder) {
    File[] files = folder.listFiles();
    Logger.getAnonymousLogger().info("OPENING folder " + folder.getName());
    Map<Boolean, List<File>> isZip = Arrays.stream(files)
            .collect(Collectors.partitioningBy((f -> f.getName().endsWith("zip"))));
    Map<Boolean, List<File>> isFolder = isZip.get(false).stream()
            .collect(Collectors.partitioningBy(f -> f.isDirectory()));
    isFolder.get(false).stream().filter(y -> y.getName().endsWith("xml")).forEach(this::scanFile);
    isFolder.get(true).stream().forEach(this::dig);
    isZip.get(true).stream().forEach(z -> {
        try {//from w ww  . j  a va2  s  .  c  o m
            ZipFile zipFile = new ZipFile(z);
            zipFile.stream().forEach(ze -> {
                try {
                    String s = handleZip(zipFile, ze);
                    if (s != null) {
                        printLine(s);
                    }
                } catch (IOException ex) {
                    Logger.getLogger(ReadTest.class.getName()).log(Level.SEVERE, null, ex);
                }
            });
        } catch (IOException ex) {
            Logger.getLogger(ReadTest.class.getName()).log(Level.SEVERE, null, ex);
        }
    });

}

From source file:alfio.manager.AdminReservationRequestManager.java

Pair<Integer, Integer> processPendingReservations() {
    Map<Boolean, List<MapSqlParameterSource>> result = adminReservationRequestRepository
            .findPendingForUpdate(1000).stream().map(id -> {
                AdminReservationRequest request = adminReservationRequestRepository.fetchCompleteById(id);

                Result<Triple<TicketReservation, List<Ticket>, Event>> reservationResult = Result
                        .fromNullable(optionally(() -> eventRepository.findById((int) request.getEventId()))
                                .orElse(null), ErrorCode.EventError.NOT_FOUND)
                        .flatMap(e -> Result.fromNullable(
                                optionally(() -> userRepository.findById((int) request.getUserId()))
                                        .map(u -> Pair.of(e, u)).orElse(null),
                                ErrorCode.EventError.ACCESS_DENIED))
                        .flatMap(p -> processReservation(request, p));
                return buildParameterSource(id, reservationResult);
            }).collect(Collectors.partitioningBy(
                    ps -> AdminReservationRequest.Status.SUCCESS.name().equals(ps.getValue("status"))));

    result.values().forEach(list -> {
        try {//from   ww w.  jav a2  s .  c om
            jdbc.batchUpdate(adminReservationRequestRepository.updateStatus(),
                    list.toArray(new MapSqlParameterSource[list.size()]));
        } catch (Exception e) {
            log.fatal("cannot update the status of " + list.size() + " reservations", e);
        }
    });

    return Pair.of(CollectionUtils.size(result.get(true)), CollectionUtils.size(result.get(false)));

}

From source file:inflor.core.plots.FCSChartPanel.java

public void setSelectAnnotations(Point2D p) {
    selectedAnnotations.clear();//from  w w w.j  av a 2 s  . co  m
    // Split annotations into selected and unselected lists
    Map<Boolean, List<XYGateAnnotation>> gateSelection = gateAnnotations.keySet().stream()
            .collect(Collectors.partitioningBy(annotation -> annotation.containsPoint(p)));

    selectedAnnotations = gateSelection.get(true).stream()
            .map(annotation -> updateSelectionStatus(annotation, true)).collect(Collectors.toList());

    // unselected annotations get selecetion cleared
    gateSelection.get(false).forEach(annotation -> updateSelectionStatus(annotation, false));
}

From source file:com.act.biointerpretation.cofactorremoval.CofactorRemover.java

/**
 * This function is the meat of the cofactor removal process.  It extracts all cofactors based on their ids and
 * places them in the appropriate collection within the reaciton.  Note that because this is executed by
 * BiointerpretationProcessor's `runSpecializedReactionProcessing` hook, the chemical ids have already been updated
 * to reference the chemical entries in the WriteDB.
 * @param reaction The reaction to update.
 * @param component Update substrates or products.
 *///from w  w w .j a v a  2  s  .co m
private void updateReactionProductOrSubstrate(Reaction reaction, ReactionComponent component) {
    Long[] chemIds, originalCofactorIds;
    if (component == SUBSTRATE) {
        chemIds = reaction.getSubstrates();
        originalCofactorIds = reaction.getSubstrateCofactors();
    } else {
        chemIds = reaction.getProducts();
        originalCofactorIds = reaction.getProductCofactors();
    }

    Map<Boolean, List<Long>> partitionedIds = Arrays.asList(chemIds).stream()
            .collect(Collectors.partitioningBy(knownCofactorWriteDBIds::contains));

    List<Long> cofactorIds = partitionedIds.containsKey(true) ? partitionedIds.get(true)
            : Collections.EMPTY_LIST;
    List<Long> nonCofactorIds = partitionedIds.containsKey(false) ? partitionedIds.get(false)
            : Collections.EMPTY_LIST;

    // Retain previously partitioned cofactors if any exist.
    if (originalCofactorIds != null && originalCofactorIds.length > 0) {
        // Use an ordered set to unique the partitioned and previously specified cofactors.  Original cofactors go first.
        LinkedHashSet<Long> uniqueCofactorIds = new LinkedHashSet<>(Arrays.asList(originalCofactorIds));
        uniqueCofactorIds.addAll(cofactorIds);
        /* We do this potentially expensive de-duplication step only in the presumably rare case that we find a reaction
         * that already has cofactors set.  A reaction that has not already undergone cofactor removal is very unlikely to
         * have cofactors partitioned from substrates/products. */
        cofactorIds = new ArrayList<>(uniqueCofactorIds);
    }

    // Coefficients for cofactors should automatically fall out when we update the substrate/product list.
    if (component == SUBSTRATE) {
        reaction.setSubstrateCofactors(cofactorIds.toArray(new Long[cofactorIds.size()]));
        reaction.setSubstrates(nonCofactorIds.toArray(new Long[nonCofactorIds.size()]));
        /* Coefficients should already have been set when the reaction was migrated to the new DB, so no need to update.
         * Note that this assumption depends strongly on the current coefficient implementation in the Reaction model. */
    } else {
        reaction.setProductCofactors(cofactorIds.toArray(new Long[cofactorIds.size()]));
        reaction.setProducts(nonCofactorIds.toArray(new Long[nonCofactorIds.size()]));
    }
}

From source file:org.dllearner.algorithms.qtl.operations.lgg.LGGGeneratorRDFS.java

@Override
protected Set<Triple<Node, Node, Node>> getRelatedEdges(RDFResourceTree tree1, RDFResourceTree tree2) {
    Set<Triple<Node, Node, Node>> result = new HashSet<>();

    Predicate<Node> isBuiltIn = n -> isBuiltInEntity(n);

    // split by built-in and non-built-in predicates
    Map<Boolean, List<Node>> split1 = tree1.getEdges().stream().collect(Collectors.partitioningBy(isBuiltIn));
    Map<Boolean, List<Node>> split2 = tree2.getEdges().stream().collect(Collectors.partitioningBy(isBuiltIn));

    //      SortedSet<Node> edges1 = tree1.getEdges().stream().filter(e -> !isBuiltInEntity(e))
    //            .collect(Collectors.toCollection(() -> new TreeSet<>(new NodeComparatorInv())));
    //      SortedSet<Node> edges2 = tree2.getEdges().stream().filter(e -> !isBuiltInEntity(e))
    //            .collect(Collectors.toCollection(() -> new TreeSet<>(new NodeComparatorInv())));

    for (Node e1 : split1.get(false)) {
        boolean dataproperty = tree1.getChildren(e1).iterator().next().isLiteralNode();
        EntityType entityType = dataproperty ? EntityType.DATA_PROPERTY : EntityType.OBJECT_PROPERTY;

        split2.get(false).stream().filter(e2 -> {
            RDFResourceTree child = tree2.getChildren(e2).iterator().next();
            return dataproperty && child.isLiteralNode() || !dataproperty && !child.isLiteralNode();
        }).forEach(e2 -> {/*from   w w  w  . j av  a  2s .  com*/
            Node lcs = NonStandardReasoningServices.getLeastCommonSubsumer(reasoner, e1, e2, entityType);

            if (lcs != null) {
                result.add(Triple.of(e1, e2, lcs));
            }
        });
    }

    List<Node> builtInEntities1 = split1.get(true);
    List<Node> builtInEntities2 = split2.get(true);

    Set<Triple<Node, Node, Node>> builtInEntitiesCommon = builtInEntities1.stream()
            .filter(e -> builtInEntities2.contains(e)).map(e -> Triple.of(e, e, e)).collect(Collectors.toSet());

    result.addAll(builtInEntitiesCommon);

    return result;
}

From source file:org.jasig.portlet.announcements.mvc.portlet.display.AnnouncementsViewController.java

private List[] getLists(PortletRequest request) throws PortletException {
    final String userId = userIdService.getUserId(request);

    // fetch the user's topic subscription from the database
    List<TopicSubscription> myTopics = tss.getTopicSubscription(request);

    Map<Boolean, List<Announcement>> topicLists = myTopics.stream().filter(TopicSubscription::getSubscribed)
            .map(ts -> ts.getTopic().getPublishedAnnouncements()).flatMap(x -> x.stream())
            .collect(Collectors.partitioningBy(
                    (Announcement a) -> a.getParent().getSubscriptionMethod() == Topic.EMERGENCY));

    return new List[] { topicLists.get(false), topicLists.get(true) };
}