Example usage for java.util.stream Collectors groupingBy

List of usage examples for java.util.stream Collectors groupingBy

Introduction

In this page you can find the example usage for java.util.stream Collectors groupingBy.

Prototype

public static <T, K> Collector<T, ?, Map<K, List<T>>> groupingBy(Function<? super T, ? extends K> classifier) 

Source Link

Document

Returns a Collector implementing a "group by" operation on input elements of type T , grouping elements according to a classification function, and returning the results in a Map .

Usage

From source file:org.obiba.mica.dataset.rest.entity.StudyEntitiesCountService.java

/**
 * Parse the RQL query and translate each node as a Opal query wrapped in a {@link StudyEntitiesCountQuery}.
 *
 * @param query// w w w  .  j av a2s.c  o  m
 * @param entityType
 * @return
 */
public List<StudyEntitiesCountQuery> newQueries(String query, String entityType) {
    RQLCriteriaOpalConverter converter = applicationContext.getBean(RQLCriteriaOpalConverter.class);
    converter.parse(query);
    Map<BaseStudy, List<RQLCriterionOpalConverter>> studyConverters = converter.getCriterionConverters()
            .stream().filter(c -> !c.hasMultipleStudyTables()) // TODO include Dataschema variables
            .collect(Collectors.groupingBy(c -> c.getVariableReferences().getStudy()));

    return studyConverters.keySet().stream()
            .map(study -> newQuery(entityType, study, studyConverters.get(study))).collect(Collectors.toList());
}

From source file:de.wpsverlinden.dupfind.DupeFinder.java

public Collection<List<FileEntry>> getDupeEntries() {
    Map<String, List<FileEntry>> dupeMap = fileIndex.values().parallelStream()
            .collect(Collectors.groupingBy((e) -> e.getSize() + "-" + e.getHash()));
    return dupeMap.values();
}

From source file:edu.washington.gs.skyline.model.quantification.GroupComparisonDataSet.java

public LinearFitResult calculateFoldChange(String label) {
    List<Replicate> replicates = removeIncompleteReplicates(label, this.replicates);
    if (replicates.size() == 0) {
        return null;
    }//  w  w  w .j a  v a2 s .  c o m
    List<Replicate> summarizedRows;
    if (replicates.stream().anyMatch(row -> null != row.getBioReplicate())) {
        Map<Pair<Boolean, Object>, List<Replicate>> groupedByBioReplicate = replicates.stream().collect(
                Collectors.groupingBy(replicate -> Pair.of(replicate.isControl(), replicate.bioReplicate)));
        summarizedRows = new ArrayList<>();
        for (Map.Entry<Pair<Boolean, Object>, List<Replicate>> entry : groupedByBioReplicate.entrySet()) {
            Double log2Abundance = calculateMean(entry.getValue().stream()
                    .map(replicateData -> replicateData.getLog2Abundance(label)).collect(Collectors.toList()));
            if (log2Abundance == null) {
                continue;
            }
            Replicate combinedReplicate = new Replicate(entry.getKey().getLeft(), entry.getKey().getValue());
            ResultFileData resultFileData = combinedReplicate.ensureResultFileData();
            resultFileData.setTransitionAreas(label,
                    TransitionAreas.fromMap(Collections.singletonMap("", Math.pow(2.0, log2Abundance))));
            if (getNormalizationMethod() instanceof NormalizationMethod.RatioToLabel) {
                TransitionAreas denominator = TransitionAreas.fromMap(Collections.singletonMap("", 1.0));
                resultFileData.setTransitionAreas(
                        ((NormalizationMethod.RatioToLabel) getNormalizationMethod()).getIsotopeLabelTypeName(),
                        denominator);
            }
            summarizedRows.add(combinedReplicate);
        }
    } else {
        summarizedRows = replicates;
    }

    List<Double> abundances = summarizedRows.stream()
            .map(replicateData -> replicateData.getLog2Abundance(label)).collect(Collectors.toList());
    List<Integer> features = Collections.nCopies(summarizedRows.size(), 0);
    List<Integer> runs = IntStream.range(0, summarizedRows.size()).boxed().collect(Collectors.toList());
    List<Integer> subjects = IntStream.range(0, summarizedRows.size()).boxed().collect(Collectors.toList());
    List<Boolean> subjectControls = summarizedRows.stream().map(Replicate::isControl)
            .collect(Collectors.toList());
    FoldChangeDataSet foldChangeDataSet = new FoldChangeDataSet(abundances, features, runs, subjects,
            subjectControls);
    DesignMatrix designMatrix = DesignMatrix.getDesignMatrix(foldChangeDataSet, false);
    LinearFitResult linearFitResult = designMatrix.performLinearFit().get(0);
    return linearFitResult;
}

From source file:ch.wisv.areafiftylan.seats.service.SeatServiceImpl.java

@Override
public SeatmapResponse getAllSeats() {
    List<Seat> all = seatRepository.findAll();

    Map<String, List<Seat>> seatMapGroups = all.stream().collect(Collectors.groupingBy(Seat::getSeatGroup));

    return new SeatmapResponse(seatMapGroups);
}

From source file:tds.assessment.services.impl.AssessmentWindowServiceImpl.java

@Override
@Cacheable(CacheType.LONG_TERM)// w  ww  .j a  v  a2  s . c  o  m
public Map<String, List<AssessmentWindow>> findAssessmentWindowsForAssessmentIds(final String clientName,
        final String... assessmentIds) {
    List<AssessmentWindow> assessmentWindows = assessmentWindowQueryRepository
            .findAssessmentWindowsForAssessmentIds(clientName, assessmentIds);
    return assessmentWindows.stream().collect(Collectors.groupingBy(AssessmentWindow::getAssessmentKey));
}

From source file:com.pscnlab.train.services.impls.TrainPeopleServiceImpl.java

@Override
public Map<Integer, List<TrainPeople>> findMapByTrainIds(List<Integer> es) {
    List<TrainPeople> trainPeoples = this.findListByTrainIds(es);
    if (CollectionUtils.isEmpty(trainPeoples)) {
        return Collections.EMPTY_MAP;
    }//w ww  . j av a2 s  . co  m
    return trainPeoples.stream().collect(Collectors.groupingBy(TrainPeople::getUuidTrain));
}

From source file:br.com.Summoner.core.base.cartas.monk.MonkCard.java

public static void GeraEstatisca(StringBuilder strbOut) {

    strbOut.append("\r\n");
    strbOut.append("\r\n");
    strbOut.append(" Estatsticas de Combo Humano ");
    strbOut.append("\r\n");
    strbOut.append("\r\n");

    Comparator<MonkComboResult> byCombo = (e1, e2) -> e2.ComboCriatura.compareTo(e1.ComboCriatura);
    CombosRealizados = CombosRealizados.stream()
            .filter(c -> c.ComboCriatura != null && c.ComboCriatura.length() > 1).sorted(byCombo)
            .collect(Collectors.toList());

    Map<String, List<MonkComboResult>> combosRealizados;
    combosRealizados = CombosRealizados.stream()
            .collect(Collectors.groupingBy(classifier -> classifier.ComboCriatura));

    for (Map.Entry<String, List<MonkComboResult>> entry : combosRealizados.entrySet()) {
        strbOut.append(" Combo Realizado:\t").append(entry.getKey()).append("\tQuantidade:\t")
                .append(entry.getValue().size()).append("\r\n");
    }// www. ja v  a  2  s  .  c om

    //        for (MonkComboResult comboResult : CombosRealizados) {
    //
    //            if (comboResult.ComboRealizado.length() > 1) {
    //                strbOut.append(" Combo Realizado - ").append(comboResult.ComboRealizado).append("\r\n");
    //                strbOut.append(" Combo Utilizado - ").append(comboResult.ComboCartaUtilizado == null ? "" : comboResult.ComboCartaUtilizado).append("\r\n");
    //                strbOut.append("\r\n");
    //            }
    //            for (MonkMonsterCombo combo : comboResult.CombosCarta)
    //            {
    //                strbOut.append(" Combo - ").append(combo.CombinacaoCombo);
    //                strbOut.append("\r\n");
    //            }
    //            strbOut.append("\r\n");
    //            strbOut.append("\r\n");
    //        }
    strbOut.append("\r\n");
    strbOut.append("\r\n");

}

From source file:edu.brandeis.wisedb.scheduler.experiments.SkewDistributionExperiment.java

public static void calculateBurn(int samples) throws Exception {
    TightenableSLA sla = PercentSLA.nintyTenSLA();
    //TightenableSLA sla = new SimpleLatencyModelSLA(9 * 60 * 1000, 1);
    //TightenableSLA sla = PerQuerySLA.getLatencyTimesN(2.0);
    //TightenableSLA sla = new AverageLatencyModelSLA(7 * 60 * 1000, 1);
    QueryTimePredictor qtp = new QueryTimePredictor();

    File f = new File("distSkew.csv");
    if (f.exists())
        f.delete();// w  ww.ja  v a 2s. c  o m

    try (Trainer t = new Trainer("distSkew.csv", sla)) {
        t.train(2000, 12);
    }

    DTSearcher dt = new DTSearcher("distSkew.csv", qtp, sla);
    AStarGraphSearch astar = new AStarGraphSearch(new UnassignedQueryTimeHeuristic(qtp), sla, qtp);
    //FirstFitDecreasingGraphSearch astar = new FirstFitDecreasingGraphSearch(sla, qtp);

    ChiSquareTest cst = new ChiSquareTest();
    ChiSquaredDistribution cqd = new ChiSquaredDistribution(qtp.QUERY_TYPES.length - 1);
    double[] expceted = Arrays.stream(qtp.QUERY_TYPES).mapToDouble(i -> 20.0 / (qtp.QUERY_TYPES.length))
            .toArray();

    System.out.println("Chi\tDT\tOpt");

    for (int i = 0; i < samples; i++) {
        Set<ModelQuery> smp = ModelWorkloadGenerator.randomQueries(20);

        // reject samples that don't have at least one of each query type
        long repr = smp.stream().mapToInt(q -> q.getType()).distinct().count();
        if (repr != qtp.QUERY_TYPES.length) {
            i--;
            continue;
        }

        Map<Integer, List<ModelQuery>> groups = smp.stream().collect(Collectors.groupingBy(q -> q.getType()));

        long obs[] = Arrays.stream(qtp.QUERY_TYPES).mapToLong(v -> groups.get(v).size()).toArray();

        double chi = cst.chiSquare(expceted, obs);
        chi = cqd.cumulativeProbability(chi);

        Cost dtCost = dt.getCostForQueries(smp, sla);
        Cost optCost = astar.getCostForQueries(smp, sla);

        System.out.println(chi + "\t" + dtCost.getTotalCost() + "\t" + optCost.getTotalCost());
    }

}

From source file:org.wallerlab.yoink.density.service.density.DensityVectorBasedCalculator.java

/**
 * calculate the density of a point from molecules
 * /*  ww w  . ja  va 2s.c  om*/
 * @param currentCoord
 *            -{@link org.wallerlab.yoink.api.model.molecular.Coord}
 * @param molecules
 *            -a Set of molecules
 *            {@link org.wallerlab.yoink.api.model.molecular.Molecule}
 * @return the density of a point from molecules
 */
public Double calculate(Coord currentCoord, Set<Molecule> molecules) {
    // long starting = System.currentTimeMillis();
    // get all atoms
    List<Atom> atoms = new ArrayList<Atom>();
    for (Molecule molecule : molecules) {
        atoms.addAll(molecule.getAtoms());
    }
    // group atoms by element type
    Map<Element, List<Atom>> atomMap = atoms.stream()
            .collect(Collectors.groupingBy(atom -> atom.getElementType()));
    double density = loopOverEveryElement(currentCoord, atomMap);
    // if the density is too small, zero or close to zero, take the default
    // density value.
    density = Math.max(density, Constants.DENSITY_DEFAULT);
    // System.out.println("starting to ending (millseconds): "
    // + (System.currentTimeMillis() - starting));
    return density;
}

From source file:org.ng200.openolympus.services.ContestService.java

public Instant getContestEndIncludingAllTimeExtensions(final Contest contest) {
    return contest.getStartTime().toInstant().plusMillis(contest.getDuration())
            .plusMillis(this.contestTimeExtensionRepository.findByContest(contest).stream()
                    .collect(Collectors.groupingBy(timeExtension -> timeExtension.getContest())).values()
                    .stream()//ww  w  . j ava2s  .  c  o  m
                    .map(group -> group.stream().map(timeExtension -> timeExtension.getDuration())
                            .reduce((l, r) -> l + r))
                    .max((l, r) -> l.orElse(0l).compareTo(r.orElse(0l))).orElse(Optional.of(0l)).orElse(0l));
}