Example usage for java.util.stream Collectors counting

List of usage examples for java.util.stream Collectors counting

Introduction

In this page you can find the example usage for java.util.stream Collectors counting.

Prototype

public static <T> Collector<T, ?, Long> counting() 

Source Link

Document

Returns a Collector accepting elements of type T that counts the number of input elements.

Usage

From source file:eu.cloudwave.wp5.feedbackhandler.aggregations.strategies.SimpleRequestAggregationStrategyImpl.java

/**
 * {@inheritDoc}/*  w  w w  .  ja va2s  .  c  o  m*/
 */
@Override
public RequestAggregationValues aggregate(RequestCollector requests) {

    IntSummaryStatistics stats = requests.getReqTimestamps().stream()
            .collect(Collectors.groupingBy(
                    timestamp -> DateUtils.round(new Date(timestamp), timestampAggregation),
                    Collectors.counting()))
            .values().stream().mapToInt(p -> toInt(p)).summaryStatistics();

    return new RequestAggregationValuesImpl(stats.getMin(), stats.getMax(), stats.getAverage(), stats.getSum(),
            stats.getCount());
}

From source file:eu.cloudwave.wp5.feedbackhandler.repositories.aggregations.AggregatedMicroserviceClientRequest.java

public IntSummaryStatistics getStatistics() {
    // Map<Date, Long> groupedRequests = microserviceClientRequest.getReqTimestamps().stream()
    // .collect(Collectors.groupingBy(timestamp -> DateUtils.round(new Date(timestamp), timestampAggregation),
    // Collectors.counting()));

    return microserviceClientRequest.getReqTimestamps().stream()
            .collect(Collectors.groupingBy(
                    timestamp -> DateUtils.round(new Date(timestamp), timestampAggregation),
                    Collectors.counting()))
            .values().stream().mapToInt(p -> toInt(p)).summaryStatistics();
}

From source file:eu.cloudwave.wp5.feedbackhandler.aggregations.strategies.RequestAggregationStrategyImpl.java

/**
 * {@inheritDoc}//from w  w  w  .  jav  a2 s . c  o  m
 */
@Override
public RequestAggregationValues aggregate(RequestCollector requests) {
    double expectedCount = getExpectedNumberOfMeasurementValueGroups();

    /*
     * Group by aggregation interval and create summary statistics with min, avg, max and count
     */
    Collection<Long> groupedByAggregationInterval = requests.getReqTimestamps().stream()
            .collect(Collectors.groupingBy(
                    timestamp -> DateUtils.round(new Date(timestamp), timestampAggregation),
                    Collectors.counting()))
            .values();
    int calculatedCount = groupedByAggregationInterval.size();

    try {
        if (calculatedCount != 0) {
            // use integer summaryStatistics to get min, avg, max
            IntSummaryStatistics stats = groupedByAggregationInterval.stream().mapToInt(p -> toInt(p))
                    .summaryStatistics();

            // no time range selected, just return int summary
            if (expectedCount == 0) {
                return new RequestAggregationValuesImpl(stats.getMin(), stats.getMax(), stats.getAverage(),
                        stats.getSum(), stats.getCount());
            } else {
                // if calculated count != expected count --> adjust minimum, average and count value
                if (Double.compare(calculatedCount, expectedCount) != 0) {
                    double newAverage = (double) (stats.getSum() / expectedCount);
                    return new RequestAggregationValuesImpl(0, stats.getMax(), newAverage, stats.getSum(),
                            (int) expectedCount);
                } else {
                    return new RequestAggregationValuesImpl(stats.getMin(), stats.getMax(), stats.getAverage(),
                            stats.getSum(), (int) expectedCount);
                }
            }
        } else {
            return new RequestAggregationValuesImpl(0, 0, 0, 0, 0);
        }
    } catch (ArithmeticException e) {
        System.out.println(e.getMessage());
        return new RequestAggregationValuesImpl(0, 0, 0, 0, 0);
    }
}

From source file:org.trustedanalytics.datasetpublisher.boundary.MetadataMapper.java

private void checkDuplicates(List<String> strings, String exceptionMessagePrefix) {
    strings.stream().collect(Collectors.groupingBy(Function.identity(), Collectors.counting())).entrySet()
            .stream().filter(group -> group.getValue() > 1).map(Map.Entry::getKey).sorted()
            .reduce((acc, column) -> acc + ", " + column).ifPresent(duplicates -> {
                throw new IllegalStateException(exceptionMessagePrefix + ": " + duplicates);
            });// ww w. j a  v a 2s  . c om
}

From source file:org.owasp.webgoat.session.UserTracker.java

public int numberOfAssignmentsSolved() {
    int numberOfAssignmentsSolved = 0;
    for (LessonTracker lessonTracker : storage.values()) {
        Map<Assignment, Boolean> lessonOverview = lessonTracker.getLessonOverview();
        numberOfAssignmentsSolved = lessonOverview.values().stream().filter(b -> b)
                .collect(Collectors.counting()).intValue();
    }//from w ww .  j a  va  2s.  c  o  m
    return numberOfAssignmentsSolved;
}

From source file:hr.diskobolos.service.impl.EvaluationAnswerServiceImpl.java

@Override
public TermsOfCompetitionDto fetchTermsOfCompetitionByMemberRegisterAndQuestionnaireType(
        MemberRegister memberRegister, QuestionnaireType questionnaireType) {
    TermsOfCompetitionDto termsOfCompetitionDto = new TermsOfCompetitionDto();
    termsOfCompetitionDto.setId(memberRegister.getId());
    termsOfCompetitionDto.setName(memberRegister.getName());
    termsOfCompetitionDto.setAddress(memberRegister.getLocation().getAddress());
    termsOfCompetitionDto.setRegisterNumber(memberRegister.getRegisterNumber());
    termsOfCompetitionDto.setRegistrationDate(memberRegister.getRegistrationDate());

    List<EvaluationAnswer> evaluationAnswers = evaluationAnswerPersistence
            .findAllByMemberRegisterAndQuestionnaireType(memberRegister, questionnaireType);

    List<EvaluationQuestionnaireDefEnum> questionnaireDef = Arrays
            .asList(EvaluationQuestionnaireDefEnum.values());
    Long numberOfQuestion = questionnaireDef.stream()
            .filter(q -> q.getQuestionnaireType().equals(QuestionnaireType.TERMS_OF_CONDITION))
            .collect(Collectors.counting());
    termsOfCompetitionDto.setQuestionnairePercentage(
            getQuestionnairePercentage(evaluationAnswers, numberOfQuestion, questionnaireDef));

    TermsOfConditionStatus termsOfConditionStatus = TermsOfConditionStatus.NONE;
    if (!evaluationAnswers.isEmpty() && evaluationAnswers.size() == numberOfQuestion) {
        boolean isValid = evaluationAnswers.stream().allMatch(e -> e.getAnswer().getLabel()
                .equals(messageSource.getMessage("QuestionChoicesDef.yes", null, Locale.ENGLISH)));
        termsOfConditionStatus = isValid ? TermsOfConditionStatus.VALID : TermsOfConditionStatus.INVALID;
    }//from   w  w w.j a va2  s  . co  m
    termsOfCompetitionDto.setTermsOfConditionStatus(termsOfConditionStatus.name());
    return termsOfCompetitionDto;
}

From source file:io.syndesis.dao.DeploymentDescriptorTest.java

@Test
public void thereShouldBeNoDuplicateMavenCoordinates() {
    final Map<String, Long> coordinatesWithCount = StreamSupport.stream(deployment.spliterator(), true)
            .filter(data -> "connector".equals(data.get("kind").asText()))
            .flatMap(//ww w.  j a v a  2 s  . c o  m
                    connector -> StreamSupport.stream(connector.get("data").get("actions").spliterator(), true))
            .map(action -> action.get("camelConnectorGAV").asText())
            .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));

    final Map<String, Long> multipleCoordinates = coordinatesWithCount.entrySet().stream()
            .filter(e -> e.getValue() > 1).collect(Collectors.toMap(Entry::getKey, Entry::getValue));

    assertThat(multipleCoordinates).as("Expected connector GAV coordinates to be unique").isEmpty();
}

From source file:io.syndesis.dao.DeploymentDescriptorTest.java

@Test
public void thereShouldBeNoDuplicateNames() {
    final Map<String, Long> namesWithCount = StreamSupport.stream(deployment.spliterator(), true)
            .filter(data -> "connector".equals(data.get("kind").asText()))
            .flatMap(/*from   w w  w.j  a va2s  .  c  o  m*/
                    connector -> StreamSupport.stream(connector.get("data").get("actions").spliterator(), true))
            .map(action -> action.get("name").asText())
            .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));

    final Map<String, Long> multipleNames = namesWithCount.entrySet().stream().filter(e -> e.getValue() > 1)
            .collect(Collectors.toMap(Entry::getKey, Entry::getValue));

    assertThat(multipleNames).as("Expected unique action names").isEmpty();
}

From source file:hr.diskobolos.persistence.impl.EvaluationAnswerPersistenceImpl.java

@Override
public ConcurrentMap<TermsOfConditionStatus, AtomicLong> fetchTermsOfCompetitionStatistic() {
    CriteriaBuilder cb = entityManager.getCriteriaBuilder();
    CriteriaQuery<EvaluationAnswer> cq = cb.createQuery(EvaluationAnswer.class);
    Root<EvaluationAnswer> evaluationAnswer = cq.from(EvaluationAnswer.class);
    Join<EvaluationAnswer, QuestionChoicesDef> choiceDef = evaluationAnswer.join(EvaluationAnswer_.answer);
    Join<QuestionChoicesDef, EvaluationQuestionDef> questionDef = choiceDef
            .join(QuestionChoicesDef_.evaluationQuestionDef);
    ParameterExpression<QuestionnaireType> questionnaireType = cb.parameter(QuestionnaireType.class,
            "questionnaireType");
    cq.select(evaluationAnswer);//  www.  j  a v  a2s  .c  om
    cq.where(cb.equal(questionDef.get(EvaluationQuestionDef_.questionnaireType), questionnaireType));
    TypedQuery<EvaluationAnswer> query = entityManager.createQuery(cq);
    query.setParameter("questionnaireType", QuestionnaireType.TERMS_OF_CONDITION);
    List<EvaluationAnswer> evaluationAnswers = query.getResultList();

    ConcurrentMap<TermsOfConditionStatus, AtomicLong> distributionByTermsOfCompetitionStatus = new ConcurrentHashMap<>();

    List<EvaluationQuestionnaireDefEnum> questionnaireDef = Arrays
            .asList(EvaluationQuestionnaireDefEnum.values());
    long numberOfQuestion = questionnaireDef.stream()
            .filter(q -> q.getQuestionnaireType().equals(QuestionnaireType.TERMS_OF_CONDITION))
            .collect(Collectors.counting());

    List<MemberRegister> memberRegisters = evaluationAnswers.stream()
            .filter(StreamUtil.distinctByKey((EvaluationAnswer e) -> e.getMemberRegister().getId()))
            .map(EvaluationAnswer::getMemberRegister).collect(Collectors.toList());
    memberRegisters.stream().forEach((memberRegister) -> {
        TermsOfConditionStatus termsOfConditionStatus = TermsOfConditionStatus.NONE;
        if (evaluationAnswers.stream().filter(m -> m.getMemberRegister().equals(memberRegister))
                .count() == numberOfQuestion) {
            boolean isValid = evaluationAnswers.stream()
                    .filter(m -> m.getMemberRegister().equals(memberRegister))
                    .allMatch(e -> e.getAnswer().getLabel()
                            .equals(messageSource.getMessage("QuestionChoicesDef.yes", null, Locale.ENGLISH)));
            termsOfConditionStatus = isValid ? TermsOfConditionStatus.VALID : TermsOfConditionStatus.INVALID;
        }
        distributionByTermsOfCompetitionStatus.putIfAbsent(termsOfConditionStatus, new AtomicLong(0));
        distributionByTermsOfCompetitionStatus.get(termsOfConditionStatus).incrementAndGet();
    });

    return distributionByTermsOfCompetitionStatus;
}

From source file:com.ggvaidya.scinames.ui.DatasetDiffController.java

private Table<String, Dataset, String> getComparisonStats(Dataset... datasets) {
    Table<String, Dataset, String> precalc = HashBasedTable.create();

    // No datasets? Give up now.
    if (datasets.length == 0)
        return precalc;

    // For each row, we start with the actual stats for the first dataset and 
    // then provide diffs to subsequent datasets.
    Dataset ds1 = datasets[0];/*ww  w.  j  a v  a2  s .  c  om*/
    Project project = datasetDiffView.getProjectView().getProject();

    precalc.put("Number of rows", ds1, String.valueOf(ds1.getRowCount()));

    Set<Name> namesInRows1 = ds1.getNamesInAllRows();
    precalc.put("Number of names in rows", ds1, String.valueOf(namesInRows1.size()));

    Set<Name> recognizedNames1 = project.getRecognizedNames(ds1);
    precalc.put("Number of names recognized", ds1, String.valueOf(recognizedNames1.size()));

    Set<Name> binomialNamesInRows1 = ds1.getNamesInAllRows().stream().flatMap(n -> n.asBinomial())
            .collect(Collectors.toSet());
    precalc.put("Number of binomial names in rows", ds1, String.valueOf(binomialNamesInRows1.size()));

    Set<Name> binomialRecognizedNames1 = project.getRecognizedNames(ds1).stream().flatMap(n -> n.asBinomial())
            .collect(Collectors.toSet());
    precalc.put("Number of binomial names recognized", ds1, String.valueOf(binomialRecognizedNames1.size()));

    Set<DatasetColumn> ds1_cols = new HashSet<>(ds1.getColumns());
    precalc.put("Number of columns", ds1, String.valueOf(ds1_cols.size()));

    List<Change> ds1_changes = ds1.getChanges(project).collect(Collectors.toList());
    precalc.put("Number of changes", ds1, String.valueOf(ds1_changes.size()));

    Map<ChangeType, Long> ds1_changes_by_type = ds1_changes.stream()
            .collect(Collectors.groupingBy(ch -> ch.getType(), Collectors.counting()));
    for (ChangeType ct : ds1_changes_by_type.keySet()) {
        precalc.put("Number of changes of type '" + ct.getType() + "'", ds1,
                String.valueOf(ds1_changes_by_type.get(ct)));
    }

    // Now do comparison stats for each subsequent dataset.
    for (Dataset ds : datasets) {
        if (ds == ds1)
            continue;

        precalc.put("Number of rows", ds, ds.getRowCount() + ": " + (ds.getRowCount() - ds1.getRowCount())
                + " (" + percentage(ds.getRowCount() - ds1.getRowCount(), ds1.getRowCount()) + ")");

        Set<Name> recognizedNames = project.getRecognizedNames(ds);

        precalc.put("Number of names recognized", ds,
                recognizedNames.size() + ": " + (recognizedNames.size() - recognizedNames1.size()) + " ("
                        + compareSets(recognizedNames1, recognizedNames) + ", "
                        + percentage(recognizedNames.size() - recognizedNames1.size(), recognizedNames1.size())
                        + ")");

        Set<Name> namesInRows = ds.getNamesInAllRows();
        precalc.put("Number of names in rows", ds,
                namesInRows.size() + ": " + (namesInRows.size() - namesInRows1.size()) + " ("
                        + compareSets(namesInRows1, namesInRows) + ", "
                        + percentage(namesInRows.size() - namesInRows1.size(), namesInRows1.size()) + ")");

        Set<Name> binomialRecognizedNames = project.getRecognizedNames(ds).stream().flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());

        precalc.put("Number of binomial names recognized", ds,
                binomialRecognizedNames.size() + ": "
                        + (binomialRecognizedNames.size() - binomialRecognizedNames1.size()) + " ("
                        + compareSets(binomialRecognizedNames1, binomialRecognizedNames) + ", "
                        + percentage(binomialRecognizedNames.size() - binomialRecognizedNames1.size(),
                                binomialRecognizedNames1.size())
                        + ")");

        Set<Name> binomialNamesInRows = ds.getNamesInAllRows().stream().flatMap(n -> n.asBinomial())
                .collect(Collectors.toSet());
        precalc.put("Number of binomial names in rows", ds,
                binomialNamesInRows.size() + ": " + (binomialNamesInRows.size() - binomialNamesInRows1.size())
                        + " (" + compareSets(binomialNamesInRows1, binomialNamesInRows) + ", "
                        + percentage(binomialNamesInRows.size() - binomialNamesInRows1.size(),
                                binomialNamesInRows1.size())
                        + ")");

        Set<DatasetColumn> ds_cols = new HashSet<>(ds.getColumns());
        precalc.put("Number of columns", ds, ds_cols.size() + ": " + (ds_cols.size() - ds1_cols.size()) + " ("
                + compareSets(ds1.getColumns(), ds.getColumns()) + ", "
                + percentage(ds.getColumns().size() - ds1.getColumns().size(), ds1.getColumns().size()) + ")");

        // What we want here is actually the number of changes SINCE ds1
        // So:
        List<Dataset> datasetsBetween1AndDs = new LinkedList<>();
        boolean ds1_found = false;
        for (Dataset dt : project.getDatasets()) {
            // Don't start until we see the first dataset.
            if (dt == ds1) {
                ds1_found = true;
                continue;
            }

            // Add every subsequent dataset
            if (ds1_found)
                datasetsBetween1AndDs.add(dt);

            // Until we find the current dataset.
            if (dt == ds)
                break;
        }

        List<Change> ds_changes = datasetsBetween1AndDs.stream().flatMap(dt -> dt.getChanges(project))
                .collect(Collectors.toList());
        precalc.put("Number of changes", ds, String.valueOf(ds_changes.size()));

        Map<ChangeType, Long> ds_changes_by_type = ds_changes.stream()
                .collect(Collectors.groupingBy(ch -> ch.getType(), Collectors.counting()));
        for (ChangeType ct : ds_changes_by_type.keySet()) {
            precalc.put("Number of changes of type '" + ct.getType() + "'", ds,
                    String.valueOf(ds_changes_by_type.get(ct)));
        }
    }

    return precalc;
}