Example usage for org.apache.commons.math3.distribution NormalDistribution NormalDistribution

List of usage examples for org.apache.commons.math3.distribution NormalDistribution NormalDistribution

Introduction

In this page you can find the example usage for org.apache.commons.math3.distribution NormalDistribution NormalDistribution.

Prototype

public NormalDistribution(double mean, double sd) throws NotStrictlyPositiveException 

Source Link

Document

Create a normal distribution using the given mean and standard deviation.

Usage

From source file:uk.ac.babraham.SeqMonk.Filters.IntensityDifferenceFilter.java

protected void generateProbeList() {

    applyMultipleTestingCorrection = optionsPanel.multipleTestingBox.isSelected();

    Probe[] probes = startingList.getAllProbes();

    // We'll pull the number of probes to sample from the preferences if they've changed it

    Integer updatedProbesPerSet = optionsPanel.probesPerSet();
    if (updatedProbesPerSet != null)
        probesPerSet = updatedProbesPerSet;

    ProbeList newList = new ProbeList(startingList, "Filtered Probes", "", "Diff p-value");

    // We'll build up a set of p-values as we go along
    float[] lowestPValues = new float[probes.length];
    for (int p = 0; p < lowestPValues.length; p++) {
        lowestPValues[p] = 1;/* w w w .j av  a 2  s  .  c  o  m*/
    }

    // This is going to be the temporary array we populate with the set of
    // differences we are going to analyse.
    double[] currentDiffSet = new double[probesPerSet];

    // First work out the set of comparisons we're going to make

    Vector<SingleComparison> comparisons = new Vector<IntensityDifferenceFilter.SingleComparison>();
    for (int fromIndex = 0; fromIndex < fromStores.length; fromIndex++) {
        for (int toIndex = 0; toIndex < toStores.length; toIndex++) {
            if (fromStores[fromIndex] == toStores[toIndex])
                continue;

            // If we can find the fromStore in the toStores we've already done and the
            // toStore anywhere in the fromStores then we can skip this.
            boolean canSkip = false;

            for (int i = 0; i < fromIndex; i++) {
                if (fromStores[i] == toStores[toIndex]) {
                    for (int j = 0; j < toStores.length; j++) {
                        if (toStores[j] == fromStores[fromIndex]) {
                            canSkip = true;
                            break;
                        }
                    }
                    break;
                }
            }

            if (canSkip)
                continue;

            comparisons.add(new SingleComparison(fromIndex, toIndex));

        }
    }

    // Put something in the progress whilst we're ordering the probe values to make
    // the comparison.
    progressUpdated("Generating background model", 0, 1);

    for (int comparisonIndex = 0; comparisonIndex < comparisons.size(); comparisonIndex++) {

        int fromIndex = comparisons.elementAt(comparisonIndex).fromIndex;
        int toIndex = comparisons.elementAt(comparisonIndex).toIndex;

        // We need to generate a set of probe indices ordered by their average intensity
        Integer[] indices = new Integer[probes.length];
        for (int i = 0; i < probes.length; i++) {
            indices[i] = i;
        }

        Comparator<Integer> comp = new AverageIntensityComparator(fromStores[fromIndex], toStores[toIndex],
                probes);

        Arrays.sort(indices, comp);

        progressUpdated("Made " + comparisonIndex + " out of " + comparisons.size() + " comparisons",
                comparisonIndex, comparisons.size());

        IndexTTestValue[] currentPValues = new IndexTTestValue[indices.length];

        for (int i = 0; i < indices.length; i++) {

            if (cancel) {
                cancel = false;
                progressCancelled();
                return;
            }

            if (i % 1000 == 0) {

                int progress = (i * 100) / indices.length;

                progress += 100 * comparisonIndex;

                progressUpdated("Made " + comparisonIndex + " out of " + comparisons.size() + " comparisons",
                        progress, comparisons.size() * 100);
            }

            // We need to make up the set of differences to represent this probe
            int startingIndex = i - (probesPerSet / 2);
            if (startingIndex < 0)
                startingIndex = 0;
            if (startingIndex + (probesPerSet + 1) >= probes.length)
                startingIndex = probes.length - (probesPerSet + 1);

            try {
                for (int j = startingIndex; j < startingIndex + (probesPerSet + 1); j++) {
                    if (j == startingIndex)
                        continue; // Don't include the point being tested in the background model
                    else if (j < startingIndex) {
                        currentDiffSet[j - startingIndex] = fromStores[fromIndex].getValueForProbe(
                                probes[indices[j]]) - toStores[toIndex].getValueForProbe(probes[indices[j]]);
                    } else {
                        currentDiffSet[(j - startingIndex) - 1] = fromStores[fromIndex].getValueForProbe(
                                probes[indices[j]]) - toStores[toIndex].getValueForProbe(probes[indices[j]]);
                    }
                }

                // Should we fix the mean at 0?
                double mean = 0;
                //               double mean = SimpleStats.mean(currentDiffSet);
                double stdev = SimpleStats.stdev(currentDiffSet, mean);

                if (stdev == 0) {
                    currentPValues[indices[i]] = new IndexTTestValue(indices[i], 1);
                    continue;
                }

                // Get the difference for this point
                double diff = fromStores[fromIndex].getValueForProbe(probes[indices[i]])
                        - toStores[toIndex].getValueForProbe(probes[indices[i]]);

                NormalDistribution nd = new NormalDistribution(mean, stdev);

                double significance;

                if (diff < mean) {
                    significance = nd.cumulativeProbability(diff);
                } else {
                    significance = 1 - nd.cumulativeProbability(diff);
                }

                currentPValues[indices[i]] = new IndexTTestValue(indices[i], significance);

            } catch (SeqMonkException sme) {
                progressExceptionReceived(sme);
                return;
            }

        }

        // We now need to correct the set of pValues
        if (applyMultipleTestingCorrection) {
            BenjHochFDR.calculateQValues(currentPValues);
        }

        // Finally we compare these pValues to the lowest ones we have from
        // the combined set
        if (applyMultipleTestingCorrection) {
            for (int i = 0; i < currentPValues.length; i++) {
                if (currentPValues[i].q < lowestPValues[currentPValues[i].index]) {
                    lowestPValues[currentPValues[i].index] = (float) currentPValues[i].q;
                }
            }
        } else {
            for (int i = 0; i < currentPValues.length; i++) {
                if (currentPValues[i].p < lowestPValues[currentPValues[i].index]) {
                    lowestPValues[currentPValues[i].index] = (float) currentPValues[i].p;
                }
            }
        }
    }

    // Now we can go through the lowest P-value set and see if any of them
    // pass the filter.
    for (int i = 0; i < lowestPValues.length; i++) {
        if (lowestPValues[i] < pValueLimit) {
            newList.addProbe(probes[i], lowestPValues[i]);
        }
    }

    filterFinished(newList);
}

From source file:uk.ac.diamond.scisoft.ncd.core.data.stats.AndersonDarlingNormalityTest.java

public boolean acceptNullHypothesis(Dataset data) {
    Dataset sortedData = data.clone().sort(null);
    double mean = (Double) data.mean(true);
    int size = data.getSize();
    double std = (Double) data.stdDeviation();
    //double std = Math.min((Double) errors.mean(true), (Double) data.stdDeviation());
    //double std = (Double) errors.mean(true);

    double thres = criticalValue / (1.0 + 4.0 / size - 25.0 / size / size);
    if (std > 0) {
        NormalDistribution norm = new NormalDistribution(mean, std);
        double sum = 0.0;
        for (int i = 0; i < size; i++) {
            double val1 = sortedData.getDouble(i);
            double val2 = sortedData.getDouble(size - 1 - i);
            double cdf1 = norm.cumulativeProbability(val1);
            double cdf2 = norm.cumulativeProbability(val2);
            sum += (2 * i + 1) * (Math.log(cdf1) + Math.log(1.0 - cdf2));
        }//from www  .  j  a va 2  s  .  c  o m
        double A2 = -size - sum / size;
        return (A2 < 0 ? true : (Math.sqrt(A2) < thres));
    }
    return true;
}

From source file:util.Statistics.java

public Statistics(List<Integer> list) {
    scores = intsToDoubles(list);//from   w  w  w . j  a  v a  2 s  . c o  m
    DescriptiveStatistics dStats = new DescriptiveStatistics(scores);

    summaryStatistics.put("min", dStats.getMin()); // Minimum
    summaryStatistics.put("q1", dStats.getPercentile(25)); // Lower Quartile (Q1)
    summaryStatistics.put("q2", dStats.getPercentile(50)); // Middle Quartile (Median - Q2)
    summaryStatistics.put("q3", dStats.getPercentile(75)); // High Quartile (Q3)
    summaryStatistics.put("max", dStats.getMax()); // Maxiumum

    summaryStatistics.put("mean", dStats.getMean()); // Mean
    summaryStatistics.put("sd", dStats.getStandardDeviation()); // Standard Deviation

    EmpiricalDistribution distribution = new EmpiricalDistribution(NUM_BINS);
    distribution.load(scores);
    List<SummaryStatistics> binStats = distribution.getBinStats();
    double[] upperBounds = distribution.getUpperBounds();

    Double lastUpperBound = upperBounds[0];
    bins.add(new Pair<Pair<Double, Double>, Long>(
            new Pair<Double, Double>(summaryStatistics.get("min"), lastUpperBound), binStats.get(0).getN()));
    for (int i = 1; i < binStats.size(); i++) {
        bins.add(new Pair<Pair<Double, Double>, Long>(new Pair<Double, Double>(lastUpperBound, upperBounds[i]),
                binStats.get(i).getN()));
        lastUpperBound = upperBounds[i];
    }

    if (list.size() > 5 && dStats.getStandardDeviation() > 0) // Only remove outliers if relatively normal
    {
        double mean = dStats.getMean();
        double stDev = dStats.getStandardDeviation();
        NormalDistribution normalDistribution = new NormalDistribution(mean, stDev);

        Iterator<Integer> listIterator = list.iterator();
        double significanceLevel = .50 / list.size(); // Chauvenet's Criterion for Outliers
        while (listIterator.hasNext()) {
            int num = listIterator.next();
            double pValue = normalDistribution.cumulativeProbability(num);
            if (pValue < significanceLevel) {
                outliers.add(num);
                listIterator.remove();
            }
        }

        if (list.size() != dStats.getN()) // If and only if outliers have been removed
        {
            double[] significantData = intsToDoubles(list);
            dStats = new DescriptiveStatistics(significantData);

            summaryStatistics.put("min", dStats.getMin());
            summaryStatistics.put("max", dStats.getMax());
            summaryStatistics.put("mean", dStats.getMean());
            summaryStatistics.put("sd", dStats.getStandardDeviation());
        }
    }
}