Example usage for java.util.stream Collectors groupingBy

List of usage examples for java.util.stream Collectors groupingBy

Introduction

In this page you can find the example usage for java.util.stream Collectors groupingBy.

Prototype

public static <T, K> Collector<T, ?, Map<K, List<T>>> groupingBy(Function<? super T, ? extends K> classifier) 

Source Link

Document

Returns a Collector implementing a "group by" operation on input elements of type T , grouping elements according to a classification function, and returning the results in a Map .

Usage

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Inefficient but safe utility for sending update events to the trigger sibling
 * @param job_results/*from  w w w  . j ava  2  s .  c om*/
 * @param bucket
 * @param grouping_lambda - returns the job type based on the job and return value
 * @param me_sibling
 */
protected static <T> void sendOnTriggerEventMessages_phase2(
        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<T>>> job_results,
        final DataBucketBean bucket,
        final Function<Tuple2<AnalyticThreadJobBean, T>, Optional<JobMessageType>> grouping_lambda,
        final Tuple2<ActorRef, ActorSelection> me_sibling, final ILoggingService _logging_service) {
    final Map<Optional<JobMessageType>, List<Tuple2<AnalyticThreadJobBean, T>>> completed_jobs = job_results
            .stream().filter(j_f -> _batch_types.contains(j_f._1().analytic_type())) // (never allow streaming types to go to the triggers)
            .flatMap(Lambdas.flatWrap_i(j_f -> Tuples._2T(j_f._1(), j_f._2().get()))).collect(Collectors
                    .groupingBy((Tuple2<AnalyticThreadJobBean, T> j_f) -> grouping_lambda.apply(j_f)));

    completed_jobs.entrySet().stream().filter(kv -> kv.getKey().isPresent()).forEach(kv -> {
        if (!kv.getValue().isEmpty()) {
            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                    ErrorUtils.lazyBuildMessage(false,
                            () -> DataBucketAnalyticsChangeActor.class.getSimpleName(),
                            () -> "sendOnTriggerEventMessages_phase2", () -> null,
                            () -> ErrorUtils.get("Forwarding bucket information to {0}: bucket {1} event {2}",
                                    me_sibling._2(), bucket.full_name(), kv.getKey().get()),
                            () -> Collections.emptyMap()));

            final BucketActionMessage.BucketActionAnalyticJobMessage fwd_msg = new BucketActionMessage.BucketActionAnalyticJobMessage(
                    bucket, kv.getValue().stream().map(j_f -> j_f._1()).collect(Collectors.toList()),
                    kv.getKey().get());
            me_sibling._2().tell(new AnalyticTriggerMessage(fwd_msg), me_sibling._1());
        }
    });
}

From source file:oct.analysis.application.comp.EZWorker.java

@Override
protected EZEdgeCoord doInBackground() throws Exception {
    int foveaCenterXPosition = analysisManager.getFoveaCenterXPosition();
    /*//w  ww .j a  va  2s.  co  m
     first get a sharpened version of the OCT and use that to obtain the segmentation
     of the Bruch's membrane. Use a Loess interpolation algorithm to smooth 
     out imperfetions in the segmentation line.
     */
    UnivariateInterpolator interpolator = new LoessInterpolator(0.1, 0);
    ArrayList<Point> rawBrmPoints = new ArrayList<>(analysisManager
            .getSegmentation(new SharpenOperation(15, 0.5F)).getSegment(Segmentation.BrM_SEGMENT));
    double[][] brmSeg = Util.getXYArraysFromPoints(rawBrmPoints);
    UnivariateFunction brmInterp = interpolator.interpolate(brmSeg[0], brmSeg[1]);
    BufferedImage sharpOCT = analysisManager.getSharpenedOctImage(8.5D, 1.0F);
    setProgress(10);
    /*
     Starting from the identified location of the fovea search northward in 
     the image until the most northern pixels northward (in a 3x3 matrix of 
     pixels arround the the search point (X,Y) ) are black (ie. the search
     matrix is has found that the search point isn't totally surrounded by
     white pixels). Then a recursive search algorithm determines if the 
     black area signifies the seperation between bands or simply represents
     a closed (a black blob entirely surrounded by white pixels) black band.
     It will continue searching northward in the image until it can find an 
     open region of all blak pixels. Once this is found it will find the contour
     of the edge between the black and white pixels along the width of the image.
     */
    int searchY = (int) Math.round(brmInterp.value(foveaCenterXPosition)) + 1;
    do {
        searchY--;
    } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) > 0
            || !isContrastPoint(foveaCenterXPosition, searchY, sharpOCT));
    LinkedList<Point> contour = new LinkedList<>();
    Point startPoint = new Point(foveaCenterXPosition, searchY);
    //find contour by searching for white pixel boundary to te right of the fovea
    contour.add(findContourRight(startPoint, Cardinality.SOUTH, startPoint, Cardinality.SOUTH, contour,
            sharpOCT, 0));
    //search until open black area found (ie. if the search algorithm arrives back at
    //the starting pixel keep moving north to next black area to search)
    while (contour.get(0).equals(startPoint)) {
        contour = new LinkedList<>();
        do {
            searchY--;
        } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) == 0);
        do {
            searchY--;
        } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) > 0
                || isSurroundedByWhite(foveaCenterXPosition, searchY, sharpOCT));
        startPoint = new Point(foveaCenterXPosition, searchY);
        contour.add(findContourRight(startPoint, Cardinality.SOUTH, startPoint, Cardinality.SOUTH, contour,
                sharpOCT, 0));
    }
    setProgress(20);
    //open balck space found, complete contour to left of fovea
    contour.add(
            findContourLeft(startPoint, Cardinality.SOUTH, startPoint, Cardinality.SOUTH, contour, sharpOCT));
    analysisManager.getImgPanel().setDrawPoint(new Point(foveaCenterXPosition, searchY));
    setProgress(30);
    /*
     since the contour can snake around due to aberations and low image density 
     we need to create a single line (represented by points) from left to right
     to represent the countour. This is easily done by building a line of
     points consisting of the point with the largest Y value (furthest from 
     the top of the image) at each X value. This eliminates overhangs from the 
     contour line.
     */
    Map<Double, List<Point>> grouped = contour.stream().collect(Collectors.groupingBy(Point::getX));
    List<Point> refinedEZContour = grouped.values().stream().map((List<Point> points) -> {
        int maxY = points.stream().mapToInt((Point p) -> p.y).min().getAsInt();
        return new Point(points.get(0).x, maxY);
    }).sorted((Point p1, Point p2) -> Integer.compare(p1.x, p2.x)).collect(Collectors.toList());
    setProgress(35);
    /*
     Starting from the identified location of the fovea search southward in 
     the image until the most southern pixels (in a 3x3 matrix of 
     pixels arround the the search point (X,Y) ) are black (ie. the search
     matrix has found that the search point isn't totally surrounded by
     white pixels). Then a recursive search algorithm determines if the 
     black area signifies the bottom of the Bruch's membrane or simply represents
     a closed (a black blob entirely surrounded by white pixels) black band.
     It will continue searching southward in the image until it can find an 
     open region of all black pixels. Once this is found it will find the contour
     of the edge between the black and white pixels, along the width of the image,
     of the bottom of the Bruch's membrane.
     */
    //        sharpOCT = getSharpenedOctImage(5D, 1.0F);
    searchY = (int) Math.round(brmInterp.value(foveaCenterXPosition));
    do {
        searchY++;
    } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) > 0
            || isSurroundedByWhite(foveaCenterXPosition, searchY, sharpOCT));
    contour = new LinkedList<>();
    startPoint = new Point(foveaCenterXPosition, searchY);
    /*
     Find contour by searching for white pixel boundary to te right of the fovea.
     Sometimes the crap below the Bruchs membrane causes too much interferance for the
     algorithm to work properly so we must tweak some of the parameters of the 
     sharpening performed on the image until the algorithm succedes or we can no longer
     tweak parameters. In the case of the later event we can use the raw segmented
     Bruchs membrane as a substitute to keep the method from failing.
     */
    contour.add(findContourRight(startPoint, Cardinality.NORTH, startPoint, Cardinality.NORTH, contour,
            sharpOCT, 0));
    double filtValue = 8.5D;
    boolean tweakFailed = false;
    while (contour.contains(null)) {
        contour = new LinkedList<>();
        filtValue -= 0.5D;
        System.out.println("Reducing sigma to " + filtValue);
        if (filtValue <= 0D) {
            tweakFailed = true;
            break;
        }
        sharpOCT = analysisManager.getSharpenedOctImage(8.5D, 1.0F);
        contour.add(findContourRight(startPoint, Cardinality.NORTH, startPoint, Cardinality.NORTH, contour,
                sharpOCT, 0));
    }

    if (tweakFailed) {
        contour = new LinkedList<>(rawBrmPoints);
    } else {
        //search until open black area found (ie. if the search algorithm arrives back at
        //the starting pixel keep moving south to next black area to search)
        while (contour.get(0).equals(startPoint)) {
            contour = new LinkedList<>();
            do {
                searchY++;
            } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) == 0);
            do {
                searchY++;
            } while (Util.calculateGrayScaleValue(sharpOCT.getRGB(foveaCenterXPosition, searchY)) > 0
                    || isSurroundedByWhite(foveaCenterXPosition, searchY, sharpOCT));
            startPoint = new Point(foveaCenterXPosition, searchY);
            contour.add(findContourRight(startPoint, Cardinality.NORTH, startPoint, Cardinality.NORTH, contour,
                    sharpOCT, 0));
        }
        setProgress(45);
        //open balck space found, complete contour to left of fovea
        contour.add(findContourLeft(startPoint, Cardinality.NORTH, startPoint, Cardinality.NORTH, contour,
                sharpOCT));
    }
    setProgress(55);
    /*
     since the contour can snake around due to aberations and low image density 
     we need to create a single line (represented by points) from left to right
     to represent the countour. This is easily done by building a line of
     points consisting of the point with the smallest Y value (closest to 
     the top of the image) at each X value. This eliminates overhangs from the 
     contour line.
     */
    grouped = contour.stream().collect(Collectors.groupingBy(Point::getX));
    List<Point> refinedBruchsMembraneContour = grouped.values().stream().map((List<Point> points) -> {
        int minY = points.stream().mapToInt((Point p) -> p.y).min().getAsInt();
        return new Point(points.get(0).x, minY);
    }).sorted((Point p1, Point p2) -> Integer.compare(p1.x, p2.x)).collect(Collectors.toList());
    setProgress(70);

    /*
     use a Loess interpolator again to smooth the new contours of the EZ and Bruch's Membrane
     */
    double[][] refinedContourPoints = Util.getXYArraysFromPoints(refinedEZContour);
    UnivariateFunction interpEZContour = interpolator.interpolate(refinedContourPoints[0],
            refinedContourPoints[1]);
    refinedContourPoints = Util.getXYArraysFromPoints(refinedBruchsMembraneContour);
    UnivariateFunction interpBruchsContour = interpolator.interpolate(refinedContourPoints[0],
            refinedContourPoints[1]);

    /*
     find the average difference in the distance in the Y between the 10 pixels
     at each end of the Bruch's Membrane contour and the contour created
     along the top of the EZ.
     */
    //since the lines are sorted on X position it is easy to align the lines
    //based on the tails of each line
    int minX = refinedEZContour.get(0).x;
    int maxX;
    //the interpolator can shorten the range of the X values from the original supplied
    //so we need to test where the end of the range occurs since it isn't directly accessible
    for (maxX = refinedEZContour.get(refinedEZContour.size() - 1).x; maxX > minX; maxX--) {
        try {
            double tmp = interpEZContour.value(maxX) - interpBruchsContour.value(maxX);
            //if this break is reached we have found the max value the interpolators will allow
            break;
        } catch (OutOfRangeException oe) {
            //do nothing but let loop continue
        }
    }
    double avgDif = Stream
            .concat(IntStream.range(minX + 30, minX + 50).boxed(),
                    IntStream.range(maxX - 49, maxX - 28).boxed())
            .mapToDouble(x -> interpBruchsContour.value(x) - interpEZContour.value(x)).average().getAsDouble();

    int height = sharpOCT.getHeight();//make to use in lambda expression
    List<LinePoint> ezLine = IntStream.rangeClosed(minX, maxX)
            .mapToObj(x -> new LinePoint(x, height - interpEZContour.value(x) - avgDif))
            .collect(Collectors.toList());
    List<LinePoint> bmLine = IntStream.rangeClosed(minX, maxX)
            .mapToObj(x -> new LinePoint(x, height - interpBruchsContour.value(x)))
            .collect(Collectors.toList());
    List<LinePoint> bmUnfiltLine = refinedBruchsMembraneContour.stream()
            .map((Point p) -> new LinePoint(p.x, height - p.getY())).collect(Collectors.toList());
    Util.graphPoints(ezLine, bmLine, bmUnfiltLine);
    analysisManager.getImgPanel().setDrawnLines(
            IntStream.rangeClosed(minX, maxX).mapToObj(x -> new LinePoint(x, interpEZContour.value(x)))
                    .collect(Collectors.toList()),
            IntStream.rangeClosed(minX, maxX).mapToObj(x -> new LinePoint(x, interpBruchsContour.value(x)))
                    .collect(Collectors.toList()));
    /*
     Find the difference between the two contours (Bruch's membrane and the
     EZ + Bruch's membrane) and use this to determine where the edge of the
     EZ is
     */
    List<LinePoint> diffLine = findDiffWithAdjustment(interpBruchsContour, 0D, interpEZContour, avgDif, minX,
            maxX);
    setProgress(90);
    //        List<LinePoint> peaks = Util.findPeaksAndVallies(diffLine);
    //        Util.graphPoints(diffLine, peaks);

    /*
     Find the first zero crossings of the difference line on both sides of the fovea.
     If a zero crossing can't be found then search for the first crossing of a
     value of 1, then 2, then 3, etc. until an X coordinate of a crossing is
     found on each side of the fovea.
     */
    OptionalInt ezLeftEdge;
    double crossingThreshold = 0.25D;
    do {
        double filtThresh = crossingThreshold;
        System.out.println("Crossing threshold = " + crossingThreshold);
        ezLeftEdge = diffLine.stream().filter(lp -> lp.getY() <= filtThresh && lp.getX() < foveaCenterXPosition)
                .mapToInt(LinePoint::getX).max();
        crossingThreshold += 0.25D;
    } while (!ezLeftEdge.isPresent());
    OptionalInt ezRightEdge;
    crossingThreshold = 0.25D;
    do {
        double filtThresh = crossingThreshold;
        System.out.println("Crossing threshold = " + crossingThreshold);
        ezRightEdge = diffLine.stream()
                .filter(lp -> lp.getY() <= filtThresh && lp.getX() > foveaCenterXPosition)
                .mapToInt(LinePoint::getX).min();
        crossingThreshold += 0.25D;
    } while (!ezRightEdge.isPresent());
    //return findings
    return new EZEdgeCoord(ezLeftEdge.getAsInt(), ezRightEdge.getAsInt());
}

From source file:org.ajoberstar.reckon.core.git.GitInventorySupplier.java

private TaggedVersion findBase(RevWalk walk, RevCommit head, Stream<TaggedVersion> versions)
        throws IOException {
    walk.reset();//from  w  w  w . ja v a2s  . c  o  m
    walk.setRevFilter(RevFilter.ALL);
    walk.markStart(head);

    Map<RevCommit, List<TaggedVersion>> versionsByCommit = versions
            .collect(Collectors.groupingBy(TaggedVersion::getCommit));

    Stream.Builder<List<TaggedVersion>> builder = Stream.builder();

    for (RevCommit commit : walk) {
        List<TaggedVersion> matches = versionsByCommit.get(commit);
        if (matches != null) {
            // Parents can't be "nearer". Exclude them to avoid extra walking.
            for (RevCommit parent : commit.getParents()) {
                walk.markUninteresting(parent);
            }
            builder.accept(matches);
        }
    }

    return builder.build().flatMap(List::stream).max(Comparator.comparing(TaggedVersion::getVersion))
            .orElse(new TaggedVersion(Versions.VERSION_0, null));
}

From source file:org.apache.carbondata.presto.impl.CarbonTableReader.java

public List<CarbonLocalMultiBlockSplit> getInputSplits2(CarbonTableCacheModel tableCacheModel,
        Expression filters, TupleDomain<HiveColumnHandle> constraints, Configuration config)
        throws IOException {
    List<CarbonLocalInputSplit> result = new ArrayList<>();
    List<CarbonLocalMultiBlockSplit> multiBlockSplitList = new ArrayList<>();
    CarbonTable carbonTable = tableCacheModel.getCarbonTable();
    TableInfo tableInfo = tableCacheModel.getCarbonTable().getTableInfo();
    config.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, "");
    String carbonTablePath = carbonTable.getAbsoluteTableIdentifier().getTablePath();
    config.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath);
    config.set(CarbonTableInputFormat.DATABASE_NAME, carbonTable.getDatabaseName());
    config.set(CarbonTableInputFormat.TABLE_NAME, carbonTable.getTableName());
    config.set("query.id", queryId);
    CarbonInputFormat.setTransactionalTable(config, carbonTable.isTransactionalTable());
    CarbonInputFormat.setTableInfo(config, carbonTable.getTableInfo());

    JobConf jobConf = new JobConf(config);
    List<PartitionSpec> filteredPartitions = new ArrayList<>();

    PartitionInfo partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName());
    LoadMetadataDetails[] loadMetadataDetails = null;
    if (partitionInfo != null && partitionInfo.getPartitionType() == PartitionType.NATIVE_HIVE) {
        try {/*from w  ww. j  a v a  2 s . c o m*/
            loadMetadataDetails = SegmentStatusManager
                    .readTableStatusFile(CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath()));
        } catch (IOException e) {
            LOGGER.error(e.getMessage(), e);
            throw e;
        }
        filteredPartitions = findRequiredPartitions(constraints, carbonTable, loadMetadataDetails);
    }
    try {
        CarbonTableInputFormat.setTableInfo(config, tableInfo);
        CarbonTableInputFormat carbonTableInputFormat = createInputFormat(jobConf,
                carbonTable.getAbsoluteTableIdentifier(), filters, filteredPartitions);
        Job job = Job.getInstance(jobConf);
        List<InputSplit> splits = carbonTableInputFormat.getSplits(job);
        Gson gson = new Gson();
        if (splits != null && splits.size() > 0) {
            for (InputSplit inputSplit : splits) {
                CarbonInputSplit carbonInputSplit = (CarbonInputSplit) inputSplit;
                result.add(new CarbonLocalInputSplit(carbonInputSplit.getSegmentId(),
                        carbonInputSplit.getPath().toString(), carbonInputSplit.getStart(),
                        carbonInputSplit.getLength(), Arrays.asList(carbonInputSplit.getLocations()),
                        carbonInputSplit.getNumberOfBlocklets(), carbonInputSplit.getVersion().number(),
                        carbonInputSplit.getDeleteDeltaFiles(), carbonInputSplit.getBlockletId(),
                        gson.toJson(carbonInputSplit.getDetailInfo()),
                        carbonInputSplit.getFileFormat().ordinal()));
            }

            // Use block distribution
            List<List<CarbonLocalInputSplit>> inputSplits = new ArrayList(result.stream()
                    .map(x -> (CarbonLocalInputSplit) x).collect(Collectors.groupingBy(carbonInput -> {
                        if (FileFormat.ROW_V1.equals(carbonInput.getFileFormat())) {
                            return carbonInput.getSegmentId().concat(carbonInput.getPath())
                                    .concat(carbonInput.getStart() + "");
                        }
                        return carbonInput.getSegmentId().concat(carbonInput.getPath());
                    })).values());
            if (inputSplits != null) {
                for (int j = 0; j < inputSplits.size(); j++) {
                    multiBlockSplitList.add(new CarbonLocalMultiBlockSplit(inputSplits.get(j),
                            inputSplits.get(j).stream().flatMap(f -> Arrays.stream(getLocations(f))).distinct()
                                    .toArray(String[]::new)));
                }
            }
            LOGGER.error("Size fo MultiblockList   " + multiBlockSplitList.size());

        }

    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    return multiBlockSplitList;
}

From source file:org.apache.flink.client.cli.CliFrontend.java

private static void printJobStatusMessages(List<JobStatusMessage> jobs) {
    SimpleDateFormat dateFormat = new SimpleDateFormat("dd.MM.yyyy HH:mm:ss");
    Comparator<JobStatusMessage> startTimeComparator = (o1,
            o2) -> (int) (o1.getStartTime() - o2.getStartTime());
    Comparator<Map.Entry<JobStatus, List<JobStatusMessage>>> statusComparator = (o1,
            o2) -> String.CASE_INSENSITIVE_ORDER.compare(o1.getKey().toString(), o2.getKey().toString());

    Map<JobStatus, List<JobStatusMessage>> jobsByState = jobs.stream()
            .collect(Collectors.groupingBy(JobStatusMessage::getJobState));
    jobsByState.entrySet().stream().sorted(statusComparator).map(Map.Entry::getValue).flatMap(List::stream)
            .sorted(startTimeComparator)
            .forEachOrdered(job -> System.out.println(dateFormat.format(new Date(job.getStartTime())) + " : "
                    + job.getJobId() + " : " + job.getJobName() + " (" + job.getJobState() + ")"));
}

From source file:org.apache.samza.system.kafka.KafkaSystemAdmin.java

/**
 * A helper method that takes oldest, newest, and upcoming offsets for each
 * system stream partition, and creates a single map from stream name to
 * SystemStreamMetadata.//from w w w .  j  a  va  2s . c  o  m
 *
 * @param newestOffsets map of SSP to newest offset
 * @param oldestOffsets map of SSP to oldest offset
 * @param upcomingOffsets map of SSP to upcoming offset
 * @return a {@link Map} from {@code system} to {@link SystemStreamMetadata}
 */
@VisibleForTesting
static Map<String, SystemStreamMetadata> assembleMetadata(Map<SystemStreamPartition, String> oldestOffsets,
        Map<SystemStreamPartition, String> newestOffsets, Map<SystemStreamPartition, String> upcomingOffsets) {
    HashSet<SystemStreamPartition> allSSPs = new HashSet<>();
    allSSPs.addAll(oldestOffsets.keySet());
    allSSPs.addAll(newestOffsets.keySet());
    allSSPs.addAll(upcomingOffsets.keySet());

    Map<String, SystemStreamMetadata> assembledMetadata = allSSPs.stream()
            .collect(Collectors.groupingBy(SystemStreamPartition::getStream)).entrySet().stream()
            .collect(Collectors.toMap(Map.Entry::getKey, entry -> {
                Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = entry
                        .getValue().stream()
                        .collect(Collectors.toMap(SystemStreamPartition::getPartition,
                                ssp -> new SystemStreamMetadata.SystemStreamPartitionMetadata(
                                        oldestOffsets.getOrDefault(ssp, null),
                                        newestOffsets.getOrDefault(ssp, null), upcomingOffsets.get(ssp))));
                return new SystemStreamMetadata(entry.getKey(), partitionMetadata);
            }));

    return assembledMetadata;
}

From source file:org.cbioportal.service.impl.GenesetHierarchyServiceImpl.java

/**
 * Return the hierarchy nodes and leafs (gene sets) based on data found in genesetScores.
 * /*  ww  w  .  j a v  a 2s.  c o  m*/
 * @param genesetScores: gene set score data found for the given geneticProfileId and sampleIds
 * @param percentile: percentile to use for the representative score calculation
 * @param scoreThreshold: filter criterion
 * @param pvalueThreshold: filter criterion
 * 
 * @return
 * @throws MolecularProfileNotFoundException
 */
private List<GenesetHierarchyInfo> getGenesetHierarchyItems(List<GenesetMolecularData> genesetScores,
        List<GenesetMolecularData> genesetPvalues, Integer percentile, Double scoreThreshold,
        Double pvalueThreshold) throws MolecularProfileNotFoundException {

    List<String> genesetIds = new ArrayList<String>(
            genesetScores.stream().map(o -> o.getGenesetId()).collect(Collectors.toSet()));

    //add all hierarchy nodes that have no leafs, but are intermediate/super nodes:
    List<GenesetHierarchyInfo> hierarchySuperNodes = genesetHierarchyRepository
            .getGenesetHierarchySuperNodes(genesetIds);

    //index genesetData : 
    Map<String, List<GenesetMolecularData>> genesetScoresMap = genesetScores.stream()
            .collect(Collectors.groupingBy(GenesetMolecularData::getGenesetId));
    Map<String, List<GenesetMolecularData>> genesetPvaluesMap = genesetPvalues.stream()
            .collect(Collectors.groupingBy(GenesetMolecularData::getGenesetId));

    //get the nodes that have gene sets as child/leafs:
    List<GenesetHierarchyInfo> hierarchyGenesetParents = genesetHierarchyRepository
            .getGenesetHierarchyParents(genesetIds);//maybe rename to pre-leafItems?
    if (genesetIds != null) {
        //complement the result with the gene sets info:
        for (GenesetHierarchyInfo hierarchyItem : hierarchyGenesetParents) {
            List<Geneset> genesets = genesetHierarchyRepository
                    .getGenesetHierarchyGenesets(hierarchyItem.getNodeId());
            //get only  the ones that have data, filtering out the other ones (probably not needed, but just to be sure):
            genesets = getFilteredGenesets(genesets, genesetScoresMap);
            //for each gene set, calculate representative score:
            fillRepresentativeScoresAndPvalues(genesets, genesetScoresMap, genesetPvaluesMap, percentile);
            //filter out the ones that don't satisfy thresholds:
            genesets = getFilteredGenesets(genesets, scoreThreshold, pvalueThreshold);
            hierarchyItem.setGenesets(genesets);
            //if genesets turns out to be empty, still consider it as a possible super node (will be filtered below if it is not):
            if (genesets.size() == 0) {
                hierarchySuperNodes.add(hierarchyItem);
            }
        }
    }
    //remove the hierarchyGenesetParents that have no gene set leaf nodes:
    hierarchyGenesetParents = getFilteredHierarchyGenesetParents(hierarchyGenesetParents);
    //remove nodes from hierarchySuperNodes that do not lead to a leaf node:
    hierarchySuperNodes = getFilteredHierarchySuperNodes(hierarchySuperNodes, hierarchyGenesetParents);
    //TODO could probably simplify some of this code above by merging hierarchySuperNodes and hierarchyGenesetParents at the start
    //and filtering the tree in one recursive function... 

    //join both lists:
    hierarchySuperNodes.addAll(hierarchyGenesetParents);

    return hierarchySuperNodes;
}

From source file:org.codice.ddf.commands.catalog.ExportCommand.java

private void auditRecords(List<ExportItem> exportedItems) {
    AtomicInteger counter = new AtomicInteger();
    exportedItems.stream().map(ExportItem::getId).distinct()
            .collect(Collectors.groupingBy(e -> logPartition(e, counter))).values()
            .forEach(this::writePartitionToLog);
}

From source file:org.codice.ddf.configuration.admin.ImportMigrationConfigurationAdminContext.java

public ImportMigrationConfigurationAdminContext(ImportMigrationContext context,
        ConfigurationAdminMigratable admin, ConfigurationAdmin configurationAdmin,
        Configuration[] memoryConfigs) {
    Validate.notNull(context, "invalid null context");
    Validate.notNull(admin, "invalid null configuration admin migratable");
    Validate.notNull(configurationAdmin, "invalid null configuration admin");
    Validate.notNull(memoryConfigs, "invalid null configurations");
    this.context = context;
    this.admin = admin;
    this.configurationAdmin = configurationAdmin;
    // categorize memory configurations
    this.managedServicesToDelete = Stream.of(memoryConfigs)
            .filter(ConfigurationAdminMigratable::isManagedService)
            .collect(Collectors.toMap(Configuration::getPid, Function.identity()));
    this.managedServiceFactoriesToDelete = Stream.of(memoryConfigs)
            .filter(ConfigurationAdminMigratable::isManagedServiceFactory)
            .collect(Collectors.groupingBy(Configuration::getFactoryPid));
    // categorize exported admin configurations
    final ImportMigrationConfigurationAdminEntry[] entries = context
            .entries(ImportMigrationConfigurationAdminContext.ADMIN_DIR).map(this::proxy)
            .filter(Objects::nonNull).toArray(ImportMigrationConfigurationAdminEntry[]::new);

    this.exportedServices = Stream.of(entries).filter(ImportMigrationConfigurationAdminEntry::isManagedService)
            .collect(Collectors.toMap(ImportMigrationConfigurationAdminEntry::getPid, Function.identity()));
    this.exportedFactoryServices = Stream.of(entries)
            .filter(ImportMigrationConfigurationAdminEntry::isManagedServiceFactory)
            .collect(Collectors.groupingBy(ImportMigrationConfigurationAdminEntry::getFactoryPid));
    context.getReport().doAfterCompletion(this::deleteUnexportedConfigurationsAfterCompletion);
}

From source file:org.codice.ddf.spatial.ogc.wps.process.endpoint.Validator.java

/**
 * @param inputDatas//from   w ww  .j a va 2s  .co  m
 * @param inputDescriptions
 * @throws WpsException
 */
public static void validateProcessInputs(List<Data> inputDatas, List<DataDescription> inputDescriptions) {
    // bin the inputs by id
    Map<String, List<Data>> inputs = inputDatas.stream().collect(Collectors.groupingBy(Data::getId));

    // verify no unexpected inputs are being passed
    Map<String, DataDescription> inputDesc = inputDescriptions.stream()
            .collect(Collectors.toMap(DataDescription::getId, Function.identity()));
    inputs.keySet().forEach(key -> {
        if (!inputDesc.containsKey(key)) {
            throw new WpsException(
                    "One or more of the input identifiers passed does not match with any of the input identifiers of this process.",
                    "NoSuchInput", key);
        }
    });

    inputDesc.forEach((key, value) -> validateProcessInputsMinMaxOccurs(inputs.get(key), value));
}