Example usage for org.apache.commons.math.stat.descriptive DescriptiveStatistics getMin

List of usage examples for org.apache.commons.math.stat.descriptive DescriptiveStatistics getMin

Introduction

In this page you can find the example usage for org.apache.commons.math.stat.descriptive DescriptiveStatistics getMin.

Prototype

public double getMin() 

Source Link

Document

Returns the minimum of the available values

Usage

From source file:datafu.hourglass.jobs.StagedOutputJob.java

/**
 * Writes Hadoop counters and other task statistics to a file in the file system.
 * //ww  w . j  a va  2 s  .  c om
 * @param fs
 * @throws IOException
 */
private void writeCounters(final FileSystem fs) throws IOException {
    final Path actualOutputPath = FileOutputFormat.getOutputPath(this);

    SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyyMMddHHmmss");

    String suffix = timestampFormat.format(new Date());

    if (_countersParentPath != null) {
        if (!fs.exists(_countersParentPath)) {
            _log.info("Creating counter parent path " + _countersParentPath);
            fs.mkdirs(_countersParentPath, FsPermission.valueOf("-rwxrwxr-x"));
        }
        // make the name as unique as possible in this case because this may be a directory
        // where other counter files will be dropped
        _countersPath = new Path(_countersParentPath, ".counters." + suffix);
    } else {
        _countersPath = new Path(actualOutputPath, ".counters." + suffix);
    }

    _log.info(String.format("Writing counters to %s", _countersPath));
    FSDataOutputStream counterStream = fs.create(_countersPath);
    BufferedOutputStream buffer = new BufferedOutputStream(counterStream, 256 * 1024);
    OutputStreamWriter writer = new OutputStreamWriter(buffer);
    for (String groupName : getCounters().getGroupNames()) {
        for (Counter counter : getCounters().getGroup(groupName)) {
            writeAndLog(writer, String.format("%s=%d", counter.getName(), counter.getValue()));
        }
    }

    JobID jobID = this.getJobID();

    org.apache.hadoop.mapred.JobID oldJobId = new org.apache.hadoop.mapred.JobID(jobID.getJtIdentifier(),
            jobID.getId());

    long minStart = Long.MAX_VALUE;
    long maxFinish = 0;
    long setupStart = Long.MAX_VALUE;
    long cleanupFinish = 0;
    DescriptiveStatistics mapStats = new DescriptiveStatistics();
    DescriptiveStatistics reduceStats = new DescriptiveStatistics();
    boolean success = true;

    JobClient jobClient = new JobClient(this.conf);

    Map<String, String> taskIdToType = new HashMap<String, String>();

    TaskReport[] setupReports = jobClient.getSetupTaskReports(oldJobId);
    if (setupReports.length > 0) {
        _log.info("Processing setup reports");
        for (TaskReport report : jobClient.getSetupTaskReports(oldJobId)) {
            taskIdToType.put(report.getTaskID().toString(), "SETUP");
            if (report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start time");
                continue;
            }
            setupStart = Math.min(setupStart, report.getStartTime());
        }
    } else {
        _log.error("No setup reports");
    }

    TaskReport[] mapReports = jobClient.getMapTaskReports(oldJobId);
    if (mapReports.length > 0) {
        _log.info("Processing map reports");
        for (TaskReport report : mapReports) {
            taskIdToType.put(report.getTaskID().toString(), "MAP");
            if (report.getFinishTime() == 0 || report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start or finish time");
                continue;
            }
            minStart = Math.min(minStart, report.getStartTime());
            mapStats.addValue(report.getFinishTime() - report.getStartTime());
        }
    } else {
        _log.error("No map reports");
    }

    TaskReport[] reduceReports = jobClient.getReduceTaskReports(oldJobId);
    if (reduceReports.length > 0) {
        _log.info("Processing reduce reports");
        for (TaskReport report : reduceReports) {
            taskIdToType.put(report.getTaskID().toString(), "REDUCE");
            if (report.getFinishTime() == 0 || report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start or finish time");
                continue;
            }
            maxFinish = Math.max(maxFinish, report.getFinishTime());
            reduceStats.addValue(report.getFinishTime() - report.getStartTime());
        }
    } else {
        _log.error("No reduce reports");
    }

    TaskReport[] cleanupReports = jobClient.getCleanupTaskReports(oldJobId);
    if (cleanupReports.length > 0) {
        _log.info("Processing cleanup reports");
        for (TaskReport report : cleanupReports) {
            taskIdToType.put(report.getTaskID().toString(), "CLEANUP");
            if (report.getFinishTime() == 0) {
                _log.warn("Skipping report with finish time of zero");
                continue;
            }
            cleanupFinish = Math.max(cleanupFinish, report.getFinishTime());
        }
    } else {
        _log.error("No cleanup reports");
    }

    if (minStart == Long.MAX_VALUE) {
        _log.error("Could not determine map-reduce start time");
        success = false;
    }
    if (maxFinish == 0) {
        _log.error("Could not determine map-reduce finish time");
        success = false;
    }

    if (setupStart == Long.MAX_VALUE) {
        _log.error("Could not determine setup start time");
        success = false;
    }
    if (cleanupFinish == 0) {
        _log.error("Could not determine cleanup finish time");
        success = false;
    }

    // Collect statistics on successful/failed/killed task attempts, categorized by setup/map/reduce/cleanup.
    // Unfortunately the job client doesn't have an easier way to get these statistics.
    Map<String, Integer> attemptStats = new HashMap<String, Integer>();
    _log.info("Processing task attempts");
    for (TaskCompletionEvent event : getTaskCompletionEvents(jobClient, oldJobId)) {
        String type = taskIdToType.get(event.getTaskAttemptId().getTaskID().toString());
        String status = event.getTaskStatus().toString();

        String key = String.format("%s_%s_ATTEMPTS", status, type);
        if (!attemptStats.containsKey(key)) {
            attemptStats.put(key, 0);
        }
        attemptStats.put(key, attemptStats.get(key) + 1);
    }

    if (success) {
        writeAndLog(writer, String.format("SETUP_START_TIME_MS=%d", setupStart));
        writeAndLog(writer, String.format("CLEANUP_FINISH_TIME_MS=%d", cleanupFinish));
        writeAndLog(writer, String.format("COMPLETE_WALL_CLOCK_TIME_MS=%d", cleanupFinish - setupStart));

        writeAndLog(writer, String.format("MAP_REDUCE_START_TIME_MS=%d", minStart));
        writeAndLog(writer, String.format("MAP_REDUCE_FINISH_TIME_MS=%d", maxFinish));
        writeAndLog(writer, String.format("MAP_REDUCE_WALL_CLOCK_TIME_MS=%d", maxFinish - minStart));

        writeAndLog(writer, String.format("MAP_TOTAL_TASKS=%d", (long) mapStats.getN()));
        writeAndLog(writer, String.format("MAP_MAX_TIME_MS=%d", (long) mapStats.getMax()));
        writeAndLog(writer, String.format("MAP_MIN_TIME_MS=%d", (long) mapStats.getMin()));
        writeAndLog(writer, String.format("MAP_AVG_TIME_MS=%d", (long) mapStats.getMean()));
        writeAndLog(writer, String.format("MAP_STD_TIME_MS=%d", (long) mapStats.getStandardDeviation()));
        writeAndLog(writer, String.format("MAP_SUM_TIME_MS=%d", (long) mapStats.getSum()));

        writeAndLog(writer, String.format("REDUCE_TOTAL_TASKS=%d", (long) reduceStats.getN()));
        writeAndLog(writer, String.format("REDUCE_MAX_TIME_MS=%d", (long) reduceStats.getMax()));
        writeAndLog(writer, String.format("REDUCE_MIN_TIME_MS=%d", (long) reduceStats.getMin()));
        writeAndLog(writer, String.format("REDUCE_AVG_TIME_MS=%d", (long) reduceStats.getMean()));
        writeAndLog(writer, String.format("REDUCE_STD_TIME_MS=%d", (long) reduceStats.getStandardDeviation()));
        writeAndLog(writer, String.format("REDUCE_SUM_TIME_MS=%d", (long) reduceStats.getSum()));

        writeAndLog(writer, String.format("MAP_REDUCE_SUM_TIME_MS=%d",
                (long) mapStats.getSum() + (long) reduceStats.getSum()));

        for (Map.Entry<String, Integer> attemptStat : attemptStats.entrySet()) {
            writeAndLog(writer, String.format("%s=%d", attemptStat.getKey(), attemptStat.getValue()));
        }
    }

    writer.close();
    buffer.close();
    counterStream.close();
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.feature.coreference.CoreferenceFeatures.java

@Override
protected List<Feature> extract(JCas jCas, Sentence sentence, String sentencePrefix)
        throws TextClassificationException {
    List<List<CoreferenceLink>> coreferenceChains = extractCoreferenceChains(jCas);

    FrequencyDistribution<String> featuresAcrossAllChains = new FrequencyDistribution<>();
    DescriptiveStatistics chainLength = new DescriptiveStatistics();
    DescriptiveStatistics distanceToPreviousSentence = new DescriptiveStatistics();
    DescriptiveStatistics distanceToNextSentence = new DescriptiveStatistics();
    DescriptiveStatistics interSentencesCorLinks = new DescriptiveStatistics();

    for (List<CoreferenceLink> chain : coreferenceChains) {

        SortedMap<Integer, List<CoreferenceLink>> sentencesAndLinks = extractSentencesAndLinksFromChain(chain,
                jCas);/* w  ww .ja v a 2  s  .c  o  m*/

        int currentSentencePos = getCurrentSentencePos(jCas, sentence);

        log.debug(sentencesAndLinks.keySet() + ", current " + currentSentencePos);

        // is the sentence in chain that spans more sentences?
        boolean partOfChain = sentencesAndLinks.containsKey(currentSentencePos) && sentencesAndLinks.size() > 1;

        // is part of a chain?
        if (partOfChain) {
            log.debug(chainToString(chain));
            featuresAcrossAllChains.inc(FN_PART_OF_CHAIN);

            // starts the chain?
            if (sentencesAndLinks.firstKey().equals(currentSentencePos)) {
                featuresAcrossAllChains.inc(FN_STARTS_THE_CHAIN);
            } else if (sentencesAndLinks.lastKey().equals(currentSentencePos)) {
                // ends the chain?
                featuresAcrossAllChains.inc(FN_ENDS_THE_CHAIN);
            } else {
                // in the middle of chain?
                featuresAcrossAllChains.inc(FN_IN_THE_MIDDLE_OF_CHAIN);
            }

            // length of the chain
            chainLength.addValue(sentencesAndLinks.size());

            List<CoreferenceLink> currentSentenceLinks = sentencesAndLinks.get(currentSentencePos);
            CoreferenceLink currentSentenceFirstLink = currentSentenceLinks.get(0);
            CoreferenceLink currentSentenceLastLink = currentSentenceLinks.get(currentSentenceLinks.size() - 1);

            // transition to the previous link, i.e. NOMINAL -> PRONOMINAL
            if (!sentencesAndLinks.firstKey().equals(currentSentencePos)) {
                // find the previous sentence
                List<CoreferenceLink> previousSentenceLinks = null;
                int prevSentNo = currentSentencePos;
                while (previousSentenceLinks == null && prevSentNo >= 0) {
                    prevSentNo--;

                    if (sentencesAndLinks.containsKey(prevSentNo)) {
                        previousSentenceLinks = sentencesAndLinks.get(prevSentNo);
                    }
                }

                if (previousSentenceLinks == null) {
                    throw new IllegalStateException("Oops :))");
                }

                // distance to previous sentence
                distanceToPreviousSentence.addValue(currentSentencePos - prevSentNo);

                // get the last link from the previous sentence
                CoreferenceLink prevSentenceLastLink = previousSentenceLinks
                        .get(previousSentenceLinks.size() - 1);

                // add type type transition
                String prevSentenceLastLinkReferenceType = prevSentenceLastLink.getReferenceType();
                String currentSentenceFirstLinkReferenceType = currentSentenceFirstLink.getReferenceType();
                String transitionType = prevSentenceLastLinkReferenceType + GLUE
                        + currentSentenceFirstLinkReferenceType;
                featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TYPE + transitionType, 1);

                // add token - type transition
                String glueCoreferenceCurrentSentence = glueCoreferenceLinkTokens(currentSentenceFirstLink);
                String typeToken = prevSentenceLastLinkReferenceType + GLUE + glueCoreferenceCurrentSentence;
                featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TOKEN + typeToken, 1);

                // add type - token transition
                String glueCoreferencePrevSentence = glueCoreferenceLinkTokens(prevSentenceLastLink);
                String tokenType = glueCoreferencePrevSentence + GLUE + currentSentenceFirstLinkReferenceType;
                featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TYPE + tokenType, 1);

                // add token token transition
                String tokenToken = glueCoreferencePrevSentence + GLUE + glueCoreferenceCurrentSentence;
                featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN + tokenToken, 1);

                // exact matching token-token reference?
                if (glueCoreferencePrevSentence.equals(glueCoreferenceCurrentSentence)) {
                    featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN_MATCH, 1);
                }
            }

            // transition to the previous link, i.e. NOMINAL -> PRONOMINAL
            if (!sentencesAndLinks.lastKey().equals(currentSentencePos)) {
                // find the previous sentence
                List<CoreferenceLink> nextSentenceLinks = null;
                int nextSentNo = currentSentencePos;
                while (nextSentenceLinks == null && nextSentNo <= sentencesAndLinks.lastKey()) {
                    nextSentNo++;

                    if (sentencesAndLinks.containsKey(nextSentNo)) {
                        nextSentenceLinks = sentencesAndLinks.get(nextSentNo);
                    }
                }

                if (nextSentenceLinks == null) {
                    throw new IllegalStateException("Oops :))");
                }

                // distance to next sentence
                distanceToNextSentence.addValue(nextSentNo - currentSentencePos);

                // get the last link from the previous sentence
                CoreferenceLink nextSentenceFirstLink = nextSentenceLinks.get(0);

                // add type type transition
                String currentSentenceLastLinkReferenceType = currentSentenceLastLink.getReferenceType();
                String nextSentenceFirstLinkReferenceType = nextSentenceFirstLink.getReferenceType();
                String transitionType = currentSentenceLastLinkReferenceType + GLUE
                        + nextSentenceFirstLinkReferenceType;
                featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TYPE + transitionType, 1);

                // add token - type transition
                String glueCoreferenceCurrentSent = glueCoreferenceLinkTokens(currentSentenceLastLink);
                String typeToken = glueCoreferenceCurrentSent + GLUE + nextSentenceFirstLinkReferenceType;
                featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TYPE + typeToken, 1);

                // add type - token transition
                String glueCoreferenceNextSent = glueCoreferenceLinkTokens(nextSentenceFirstLink);
                String tokenType = currentSentenceLastLinkReferenceType + GLUE + glueCoreferenceNextSent;
                featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TOKEN + tokenType, 1);

                // add token token transition
                String tokenToken = glueCoreferenceCurrentSent + GLUE + glueCoreferenceNextSent;
                featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN + tokenToken, 1);

                // exact matching token-token reference?
                if (glueCoreferenceNextSent.equals(glueCoreferenceCurrentSent)) {
                    featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN_MATCH, 1);
                }
            }
        }

        // number of inter-sentence coreference links
        if (sentencesAndLinks.containsKey(currentSentencePos)) {
            int coreferenceLinks = sentencesAndLinks.get(currentSentencePos).size();
            interSentencesCorLinks.addValue(coreferenceLinks);
        }

        /*
        List<Integer> positions = positionsOfSentenceInCurrentChain(chain, sentence);
                
        // ok, we're in a chain
        if (!positions.isEmpty()) {
        log.debug(printChain(chain));
        log.debug(sentence.getCoveredText());
        log.debug(positions);
        Integer lastPosition = positions.get(positions.size() - 1);
        Integer firstPosition = positions.get(0);
                
        if (lastPosition == positions.size() - 1) {
            log.debug("Last sentence of chain");
        }
                
        log.debug("-----");
        }
        */
    }

    List<Feature> result = new ArrayList<>();

    log.debug(featuresAcrossAllChains);
    if (distanceToNextSentence.getN() > 0) {
        log.debug("Next:" + distanceToNextSentence);

        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MIN,
                distanceToNextSentence.getMin()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MAX,
                distanceToNextSentence.getMax()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_AVG,
                distanceToNextSentence.getMean()));
    }
    if (distanceToPreviousSentence.getN() > 0) {

        log.debug("Prev: " + distanceToPreviousSentence);

        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MIN,
                distanceToPreviousSentence.getMin()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MAX,
                distanceToPreviousSentence.getMax()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_AVG,
                distanceToPreviousSentence.getMean()));
    }

    if (interSentencesCorLinks.getN() > 0) {
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MIN,
                interSentencesCorLinks.getMin()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MAX,
                interSentencesCorLinks.getMax()));
        result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_AVG,
                interSentencesCorLinks.getMean()));
    }

    log.debug("----");

    for (String feat : featuresAcrossAllChains.getKeys()) {
        // binary
        result.add(new Feature(sentencePrefix + FEATURE_NAME + feat, 1));
    }

    return result;
}

From source file:org.a3badran.platform.logging.writer.MetricsWriter.java

private Map<String, Long> getAllMetrics() {
    Map<String, Long> metrics = new HashMap<String, Long>();
    for (Entry<String, DescriptiveStatistics> entry : sampleMetrics.entrySet()) {
        // create a copy to reduce locking
        String name = entry.getKey();
        DescriptiveStatistics stats = entry.getValue().copy();
        metrics.put(name + ".sampleCount", (long) stats.getN());
        metrics.put(name + ".max", (long) stats.getMax());
        metrics.put(name + ".min", (long) stats.getMin());
        metrics.put(name + ".avg", (long) stats.getMean());
        metrics.put(name + ".50p", (long) stats.getPercentile(50));
        metrics.put(name + ".90p", (long) stats.getPercentile(90));
        metrics.put(name + ".99p", (long) stats.getPercentile(99));
    }/*w w w  . j a v a  2 s . c  om*/

    for (Entry<String, DescriptiveStatistics> cEntry : sampleCounterMetrics.entrySet()) {
        // create a copy to reduce locking
        String cName = cEntry.getKey();
        DescriptiveStatistics cStats = cEntry.getValue().copy();
        metrics.put(cName + ".max", (long) cStats.getMax());
        metrics.put(cName + ".min", (long) cStats.getMin());
        metrics.put(cName + ".avg", (long) cStats.getMean());
        metrics.put(cName + ".50p", (long) cStats.getPercentile(50));
        metrics.put(cName + ".90p", (long) cStats.getPercentile(90));
        metrics.put(cName + ".99p", (long) cStats.getPercentile(99));
    }

    for (Entry<String, AtomicLong> entry : scopeTotalMetrics.entrySet()) {
        metrics.put(entry.getKey(), entry.getValue().longValue());
    }

    for (Entry<String, AtomicLong> entry : appTotalMetrics.entrySet()) {
        metrics.put(entry.getKey(), entry.getValue().longValue());
    }

    return metrics;
}

From source file:org.apache.jackrabbit.oak.benchmark.AbstractTest.java

private void runTest(RepositoryFixture fixture, Repository repository, List<Integer> concurrencyLevels)
        throws Exception {

    setUp(repository, CREDENTIALS);//  w  w  w .  j  a  v  a2s. c o  m
    try {

        // Run a few iterations to warm up the system
        long warmupEnd = System.currentTimeMillis() + WARMUP;
        boolean stop = false;
        while (System.currentTimeMillis() < warmupEnd && !stop) {
            if (!stop) {
                // we want to execute this at lease once. after that we consider the
                // `haltRequested` flag.
                stop = haltRequested;
            }
            execute();
        }

        if (concurrencyLevels == null || concurrencyLevels.isEmpty()) {
            concurrencyLevels = Arrays.asList(1);
        }

        for (Integer concurrency : concurrencyLevels) {
            // Run the test
            DescriptiveStatistics statistics = runTest(concurrency);
            if (statistics.getN() > 0) {
                System.out.format("%-28.28s  %6d  %6.0f  %6.0f  %6.0f  %6.0f  %6.0f  %6d%n", fixture.toString(),
                        concurrency, statistics.getMin(), statistics.getPercentile(10.0),
                        statistics.getPercentile(50.0), statistics.getPercentile(90.0), statistics.getMax(),
                        statistics.getN());
                if (out != null) {
                    out.format("%-28.28s, %6d, %6.0f, %6.0f, %6.0f, %6.0f, %6.0f, %6d%n", fixture.toString(),
                            concurrency, statistics.getMin(), statistics.getPercentile(10.0),
                            statistics.getPercentile(50.0), statistics.getPercentile(90.0), statistics.getMax(),
                            statistics.getN());
                }
            }

        }
    } finally {
        tearDown();
    }
}

From source file:org.apache.jackrabbit.performance.AbstractPerformanceTest.java

private void writeReport(String test, String name, DescriptiveStatistics statistics) throws IOException {
    File report = new File("target", test + ".txt");

    boolean needsPrefix = !report.exists();
    PrintWriter writer = new PrintWriter(new FileWriterWithEncoding(report, "UTF-8", true));
    try {/*w ww.ja va 2  s.c o m*/
        if (needsPrefix) {
            writer.format("# %-34.34s     min     10%%     50%%     90%%     max%n", test);
        }

        writer.format("%-36.36s  %6.0f  %6.0f  %6.0f  %6.0f  %6.0f%n", name, statistics.getMin(),
                statistics.getPercentile(10.0), statistics.getPercentile(50.0), statistics.getPercentile(90.0),
                statistics.getMax());
    } finally {
        writer.close();
    }
}

From source file:org.fusesource.eca.processor.StatisticsCalculator.java

protected void process(StatisticsType type, Number value, ObjectNode statsNode) throws Exception {
    EventCache<Number> cache = this.eventCache;
    if (value != null && cache != null) {
        cache.add(value);/*  w  w w  .j av a2s  .c o m*/
        if (type.equals(StatisticsType.RATE)) {
            calculateRate(statsNode);
        } else {
            List<Number> list = this.eventCache.getWindow();
            DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics();
            if (list != null && !list.isEmpty()) {
                for (Number number : list) {
                    descriptiveStatistics.addValue(number.doubleValue());
                }
                switch (type) {
                case MEAN:
                    statsNode.put("mean", descriptiveStatistics.getMean());
                    break;
                case GEOMETRIC_MEAN:
                    statsNode.put("gemetric mean", descriptiveStatistics.getGeometricMean());
                    break;
                case STDDEV:
                    statsNode.put("std-dev", descriptiveStatistics.getStandardDeviation());
                    break;
                case MIN:
                    statsNode.put("minimum", descriptiveStatistics.getMin());
                    break;
                case MAX:
                    statsNode.put("maximum", descriptiveStatistics.getMax());
                    break;
                case SKEWNESS:
                    statsNode.put("skewness", descriptiveStatistics.getSkewness());
                    break;
                case KUTOSIS:
                    statsNode.put("kurtosis", descriptiveStatistics.getKurtosis());
                    break;
                case VARIANCE:
                    statsNode.put("variance", descriptiveStatistics.getVariance());
                    break;
                case COUNT:
                    statsNode.put("count", list.size());
                default:
                    statsNode.put("number", descriptiveStatistics.getN());
                    statsNode.put("mean", descriptiveStatistics.getMean());
                    statsNode.put("gemetric mean", descriptiveStatistics.getGeometricMean());
                    statsNode.put("minimum", descriptiveStatistics.getMin());
                    statsNode.put("maximum", descriptiveStatistics.getMax());
                    statsNode.put("std-dev", descriptiveStatistics.getStandardDeviation());
                    statsNode.put("median", descriptiveStatistics.getPercentile(50));
                    statsNode.put("skewness", descriptiveStatistics.getSkewness());
                    statsNode.put("kurtosis", descriptiveStatistics.getKurtosis());
                    statsNode.put("variance", descriptiveStatistics.getVariance());
                    calculateRate(statsNode);
                    statsNode.put("count", list.size());
                }
            }
        }

    }
}

From source file:org.matsim.contrib.common.stats.StatsWriter.java

/**
 * Writes a table with columns map-key and statistical indicators mean, median, min, max and number of samples. Rows
 * are sorted according to the natural order of the map keys.
 *
 * @param statsMap a map with {@code DescriptiveStatistics} objects
 * @param keyLabel the header for the first column (containing the map keys)
 * @param file     the filename//w w w  .j ava 2  s  .c o  m
 * @throws IOException
 */
public static void writeStatistics(TDoubleObjectHashMap<DescriptiveStatistics> statsMap, String keyLabel,
        String file) throws IOException {
    double[] keys = statsMap.keys();
    Arrays.sort(keys);

    BufferedWriter writer = new BufferedWriter(new FileWriter(file));

    writer.write(keyLabel);
    writer.write(TAB);
    writer.write("mean");
    writer.write(TAB);
    writer.write("median");
    writer.write(TAB);
    writer.write("min");
    writer.write(TAB);
    writer.write("max");
    writer.write(TAB);
    writer.write("n");
    writer.newLine();

    for (double key : keys) {
        DescriptiveStatistics stats = statsMap.get(key);

        writer.write(String.valueOf(key));
        writer.write(TAB);
        writer.write(String.valueOf(stats.getMean()));
        writer.write(TAB);
        writer.write(String.valueOf(stats.getPercentile(50)));
        writer.write(TAB);
        writer.write(String.valueOf(stats.getMin()));
        writer.write(TAB);
        writer.write(String.valueOf(stats.getMax()));
        writer.write(TAB);
        writer.write(String.valueOf(stats.getN()));
        writer.newLine();
    }

    writer.close();
}

From source file:org.matsim.contrib.socnetgen.sna.graph.analysis.AnalyzerTask.java

protected void printStats(DescriptiveStatistics stats, String key) {
    logger.info(String.format(//from  w  ww .j  av  a  2s  .  c o m
            "Statistics for property %1$s:\n\tmean = %2$.4f, min = %3$.4f, max = %4$.4f, N = %5$s, Var = %6$.4f",
            key, stats.getMean(), stats.getMin(), stats.getMax(), stats.getN(), stats.getVariance()));
}

From source file:org.matsim.contrib.socnetgen.sna.graph.analysis.DegreeTest.java

public void test() {
    SparseGraphBuilder builder = new SparseGraphBuilder();
    SparseGraph graph = builder.createGraph();

    SparseVertex v1 = builder.addVertex(graph);
    SparseVertex v2 = builder.addVertex(graph);
    SparseVertex v3 = builder.addVertex(graph);
    SparseVertex v4 = builder.addVertex(graph);
    SparseVertex v5 = builder.addVertex(graph);
    SparseVertex v6 = builder.addVertex(graph);

    builder.addEdge(graph, v1, v2);/*from  w  w  w  .  j a  v a2  s  . c o  m*/
    builder.addEdge(graph, v2, v3);
    builder.addEdge(graph, v3, v4);
    builder.addEdge(graph, v4, v5);
    builder.addEdge(graph, v5, v6);
    builder.addEdge(graph, v6, v1);
    builder.addEdge(graph, v2, v5);

    Degree degree = Degree.getInstance();

    DescriptiveStatistics stats = degree.statistics(graph.getVertices());
    assertEquals(2.33, stats.getMean(), 0.01);
    assertEquals(2.0, stats.getMin());
    assertEquals(3.0, stats.getMax());

    TObjectDoubleHashMap<? extends Vertex> values = degree.values(graph.getVertices());
    TObjectDoubleIterator<? extends Vertex> it = values.iterator();

    int count2 = 0;
    int count3 = 0;
    for (int i = 0; i < values.size(); i++) {
        it.advance();
        if (it.value() == 2)
            count2++;
        else if (it.value() == 3)
            count3++;
    }

    assertEquals(4, count2);
    assertEquals(2, count3);

    assertEquals(-0.166, degree.assortativity(graph), 0.001);
}

From source file:org.matsim.contrib.socnetgen.sna.graph.analysis.TransitivityTest.java

@SuppressWarnings("unchecked")
public void test() {
    SparseGraphBuilder builder = new SparseGraphBuilder();
    SparseGraph graph = builder.createGraph();

    SparseVertex v1 = builder.addVertex(graph);
    SparseVertex v2 = builder.addVertex(graph);
    SparseVertex v3 = builder.addVertex(graph);
    SparseVertex v4 = builder.addVertex(graph);

    builder.addEdge(graph, v1, v2);/*from  w w w . j  a  va  2 s  .  co m*/
    builder.addEdge(graph, v1, v4);
    builder.addEdge(graph, v1, v3);
    builder.addEdge(graph, v2, v4);

    Transitivity degree = Transitivity.getInstance();

    DescriptiveStatistics distr = degree.localClusteringDistribution(graph.getVertices());
    assertEquals(7 / 12.0, distr.getMean(), 0.01);
    assertEquals(0.0, distr.getMin());
    assertEquals(1.0, distr.getMax());

    TObjectDoubleHashMap<Vertex> values = (TObjectDoubleHashMap<Vertex>) degree
            .localClusteringCoefficients((Set<? extends Vertex>) graph.getVertices());

    assertEquals(1 / 3.0, values.get(v1));
    assertEquals(1.0, values.get(v2));
    assertEquals(1.0, values.get(v4));
    assertEquals(0.0, values.get(v3));

    assertEquals(3 / 5.0, degree.globalClusteringCoefficient(graph));
}