Example usage for org.apache.commons.math3.distribution NormalDistribution cumulativeProbability

List of usage examples for org.apache.commons.math3.distribution NormalDistribution cumulativeProbability

Introduction

In this page you can find the example usage for org.apache.commons.math3.distribution NormalDistribution cumulativeProbability.

Prototype

public double cumulativeProbability(double x) 

Source Link

Document

If x is more than 40 standard deviations from the mean, 0 or 1 is returned, as in these cases the actual value is within Double.MIN_VALUE of 0 or 1.

Usage

From source file:org.apache.spark.ml.stat.JavaKolmogorovSmirnovTestSuite.java

@Test
public void testKSTestCDF() {
    // Create theoretical distributions
    NormalDistribution stdNormalDist = new NormalDistribution(0, 1);

    // set seeds//from  w  w  w . jav a 2  s  .  c om
    Long seed = 10L;
    stdNormalDist.reseedRandomGenerator(seed);
    Function<Double, Double> stdNormalCDF = (x) -> stdNormalDist.cumulativeProbability(x);

    double pThreshold = 0.05;

    // Comparing a standard normal sample to a standard normal distribution
    Row results = KolmogorovSmirnovTest.test(dataset, "sample", stdNormalCDF).head();
    double pValue1 = results.getDouble(0);
    // Cannot reject null hypothesis
    assert (pValue1 > pThreshold);
}

From source file:org.gitools.analysis.stats.test.MannWhitneyWilcoxonTest.java

private double calculateOneTailPValue(final double Umin, final int n1, final int n2)
        throws ConvergenceException, MaxCountExceededException {

    /* long multiplication to avoid overflow (double not used due to efficiency
     * and to avoid precision loss)/*  w  w  w.  j  a v  a 2s .  c o  m*/
     */
    final long n1n2prod = (long) n1 * n2;

    // http://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U#Normal_approximation
    final double EU = n1n2prod / 2.0;
    final double VarU = n1n2prod * (n1 + n2 + 1) / 12.0;

    final double z = (Umin - EU) / FastMath.sqrt(VarU);

    // No try-catch or advertised exception because args are valid
    final NormalDistribution standardNormal = new NormalDistribution(0, 1);

    return standardNormal.cumulativeProbability(z);
}

From source file:plugins.ImageAutocorrelation.java

/**
 * Used to execute this plugin tool.//  ww w .  jav  a 2 s  .  c  o m
 */
@Override
public void run() {
    amIActive = true;

    WhiteboxRaster image;
    int col, row, numImages, x, y;
    int cols, rows;
    int a = 0;
    double noData;
    double z, zn;
    int progress = 0;
    String progressMessage = "";
    String inputFilesString = null;
    String[] imageFiles;
    long[] n;
    double[] mean;
    String[] shortNames;
    String[] units;
    double[] I;
    double[] stdDev;
    double totalDeviation;
    int[] dX;
    int[] dY;
    double numerator, W;
    //double recipRoot2 = 1 / Math.sqrt(2);
    //double[] wNeighbour = {recipRoot2, 1, recipRoot2, 1, recipRoot2, 1, recipRoot2, 1};
    //double[] wNeighbour = {1, 1, 1, 1};

    if (args.length <= 0) {
        showFeedback("Plugin parameters have not been set.");
        return;
    }

    inputFilesString = args[0];
    imageFiles = inputFilesString.split(";");
    numImages = imageFiles.length;
    if (args[1].toLowerCase().contains("bishop")) {
        dX = new int[] { 1, 1, -1, -1 };
        dY = new int[] { -1, 1, 1, -1 };
    } else if (args[1].toLowerCase().contains("queen") || args[1].toLowerCase().contains("king")) {
        dX = new int[] { 1, 1, 1, 0, -1, -1, -1, 0 };
        dY = new int[] { -1, 0, 1, 1, 1, 0, -1, -1 };
    } else {
        // go with the rook default
        dX = new int[] { 1, 0, -1, 0 };
        dY = new int[] { 0, 1, 0, -1 };
    }

    try {

        //initialize the image data arrays
        double sigmaZ;
        n = new long[numImages];
        mean = new double[numImages];
        I = new double[numImages];
        shortNames = new String[numImages];
        units = new String[numImages];
        stdDev = new double[numImages];
        double[] E_I = new double[numImages];
        double[] varNormality = new double[numImages];
        double[] varRandomization = new double[numImages];
        double[] zN = new double[numImages];
        double[] zR = new double[numImages];
        double[] pValueN = new double[numImages];
        double[] pValueR = new double[numImages];
        double[] data;
        NormalDistribution distribution = new NormalDistribution(0, 1);

        for (a = 0; a < numImages; a++) {
            progressMessage = "Image " + (a + 1) + " of " + numImages;
            image = new WhiteboxRaster(imageFiles[a], "r");
            noData = image.getNoDataValue();
            rows = image.getNumberRows();
            cols = image.getNumberColumns();
            shortNames[a] = image.getShortHeaderFile();
            if (!image.getZUnits().toLowerCase().equals("not specified")) {
                units[a] = image.getZUnits();
            } else {
                units[a] = "";
            }

            sigmaZ = 0;
            for (row = 0; row < rows; row++) {
                data = image.getRowValues(row);
                for (col = 0; col < cols; col++) {
                    if (data[col] != noData) {
                        sigmaZ += data[col];
                        n[a]++;
                    }
                }
                if (cancelOp) {
                    cancelOperation();
                    return;
                }
                progress = (int) (row * 100.0 / rows);
                updateProgress(progressMessage, progress);
            }

            mean[a] = sigmaZ / n[a];
            E_I[a] = -1.0 / (n[a] - 1);
            totalDeviation = 0;
            W = 0;
            numerator = 0;
            double S2 = 0;
            double wij;
            int numNeighbours = dX.length;
            double k = 0;
            for (row = 0; row < rows; row++) {
                for (col = 0; col < cols; col++) {
                    z = image.getValue(row, col);
                    if (z != noData) {
                        totalDeviation += (z - mean[a]) * (z - mean[a]);
                        k += (z - mean[a]) * (z - mean[a]) * (z - mean[a]) * (z - mean[a]);
                        wij = 0;
                        for (int i = 0; i < numNeighbours; i++) {
                            x = col + dX[i];
                            y = row + dY[i];
                            zn = image.getValue(y, x);
                            if (zn != noData) { // two valid neighbour pairs
                                W += 1.0;
                                numerator += (z - mean[a]) * (zn - mean[a]); //* weight of 1.0
                                wij += 1;
                            }
                        }
                        S2 += wij * wij;
                    }
                }
                if (cancelOp) {
                    cancelOperation();
                    return;
                }
                progress = (int) (row * 100.0 / rows);
                updateProgress(progressMessage, progress);
            }

            double S1 = 4 * W;
            S2 = S2 * 4;

            stdDev[a] = Math.sqrt(totalDeviation / (n[a] - 1));

            I[a] = n[a] * numerator / (totalDeviation * W);

            varNormality[a] = (n[a] * n[a] * S1 - n[a] * S2 + 3 * W * W) / ((W * W) * (n[a] * n[a] - 1));

            zN[a] = (I[a] - E_I[a]) / (Math.sqrt(varNormality[a]));
            pValueN[a] = 2d * (1.0 - distribution.cumulativeProbability(Math.abs(zN[a])));

            k = k / (n[a] * stdDev[a] * stdDev[a] * stdDev[a] * stdDev[a]);

            varRandomization[a] = (n[a] * ((n[a] * n[a] - 3 * n[a] + 3) * S1 - n[a] * S2 + 3 * W * W)
                    - k * (n[a] * n[a] - n[a]) * S1 - 2 * n[a] * S1 + 6 * W * W)
                    / ((n[a] - 1) * (n[a] - 2) * (n[a] - 3) * W * W);

            zR[a] = (I[a] - E_I[a]) / (Math.sqrt(varRandomization[a]));
            pValueR[a] = 2d * (1.0 - distribution.cumulativeProbability(Math.abs(zR[a])));

            image.close();

            progress = (int) (100f * (a + 1) / numImages);
            updateProgress(progressMessage, progress);

        }

        StringBuilder retstr = new StringBuilder();
        DecimalFormat df1 = new DecimalFormat("###,###,###,###");
        DecimalFormat df2 = new DecimalFormat("0.0000");
        retstr.append("SPATIAL AUTOCORRELATION\n");

        for (a = 0; a < numImages; a++) {
            retstr.append("\n");
            retstr.append("Input image:\t\t\t").append(shortNames[a]).append("\n");
            retstr.append("Number of cells included:\t\t").append(df1.format(n[a])).append("\n");
            if (units[a].equals("")) {
                retstr.append("Mean of cells included:\t\t").append(df2.format(mean[a])).append("\n");
            } else {
                retstr.append("Mean of cells included:\t\t").append(df2.format(mean[a])).append(" ")
                        .append(units[a]).append("\n");
            }
            retstr.append("Spatial autocorrelation (Moran's I):\t").append(df2.format(I[a])).append("\n");
            retstr.append("Expected value:\t\t").append(df2.format(E_I[a])).append("\n");
            retstr.append("Variance of I (normality assumption):\t").append(df2.format(varNormality[a]))
                    .append("\n");
            retstr.append("z test stat (normality assumption):\t").append(df2.format(zN[a])).append("\n");
            retstr.append("p-value (normality assumption):\t").append(df2.format(pValueN[a])).append("\n");
            retstr.append("Variance of I (randomization assumption):\t").append(df2.format(varRandomization[a]))
                    .append("\n");
            retstr.append("z test stat (randomization assumption):\t").append(df2.format(zR[a])).append("\n");
            retstr.append("p-value (randomization assumption):\t").append(df2.format(pValueR[a])).append("\n");

        }

        //            System.out.println(retstr.toString());

        returnData(retstr.toString());

    } catch (OutOfMemoryError oe) {
        myHost.showFeedback("An out-of-memory error has occurred during operation.");
    } catch (Exception e) {
        myHost.showFeedback("An error has occurred during operation. See log file for details.");
        myHost.logException("Error in " + getDescriptiveName(), e);
    } finally {
        updateProgress("Progress: ", 0);
        // tells the main application that this process is completed.
        amIActive = false;
        myHost.pluginComplete();
    }
}

From source file:pyromaniac.Algorithm.BalzerOUCallFrequencyTable.java

/**
 * _calculate probability./*from w  w  w  .  j ava  2  s.  co m*/
 *
 * @param mode the mode
 * @param segmentNumber the segment number
 * @return the BigDecimal [] of probabilities, all with scale of 10.
 */
private BigDecimal[] _calculateProbabilitiesHelper(int segmentNumber, int mode) {
    BigDecimal sd, mean, modeBD;

    //this multiplicative factor was taken from elsewhere...
    BigDecimal flowEffect = new BigDecimal("0.003").multiply(new BigDecimal(segmentNumber)).setScale(SCALE,
            BigDecimal.ROUND_HALF_UP);

    modeBD = new BigDecimal(mode);

    if (mode >= 6) {
        mean = new BigDecimal(mode).setScale(SCALE, BigDecimal.ROUND_HALF_UP);
        //standard deviation is 0.03 + effect of RefLen + effect of flow position 
        sd = new BigDecimal("0.03494").add(mean.multiply(new BigDecimal("0.06856"))).add(flowEffect);
    } else {
        mean = new BigDecimal(this.normalDistParams.get(mode).getFirst()).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);
        sd = new BigDecimal(this.normalDistParams.get(mode).getSecond()).add(flowEffect).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);
    }

    NormalDistribution norm = new NormalDistribution(mean.doubleValue(), sd.doubleValue());

    try {
        //due to rounding...
        //cumulative probability [X <= x]
        //so prob under is [X <= MODE - 0.51], and prob over is 1 - prob [X <= MODE + 0.49] (i.e. prob X > MODE + 0.49)
        BigDecimal lowerBound = modeBD.subtract(new BigDecimal(SUBTRACT_FOR_LB)).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);
        BigDecimal upperBound = modeBD.add(new BigDecimal(ADD_FOR_UB)).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);

        BigDecimal probLessThan = new BigDecimal(norm.cumulativeProbability(lowerBound.doubleValue()))
                .setScale(SCALE, BigDecimal.ROUND_HALF_UP);
        BigDecimal probMoreThan = new BigDecimal("1")
                .subtract(new BigDecimal(norm.cumulativeProbability(upperBound.doubleValue())).setScale(SCALE,
                        BigDecimal.ROUND_HALF_UP));
        BigDecimal probEqualTo = new BigDecimal("1").subtract(probLessThan).subtract(probMoreThan)
                .setScale(SCALE, BigDecimal.ROUND_HALF_UP);

        BigDecimal summed = probLessThan.add(probEqualTo).add(probMoreThan).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);
        if (!summed.equals(new BigDecimal("1").setScale(SCALE, BigDecimal.ROUND_HALF_UP))) {
            probLessThan = probLessThan.divide(summed, SCALE, BigDecimal.ROUND_HALF_UP);
            probMoreThan = probMoreThan.divide(summed, SCALE, BigDecimal.ROUND_HALF_UP);
            probEqualTo = probEqualTo.divide(summed, SCALE, BigDecimal.ROUND_HALF_UP);
        }

        BigDecimal[] probs = { probLessThan, probEqualTo, probMoreThan };

        return probs;
    } catch (MathIllegalStateException me) {
        me.getStackTrace();
    }
    return null;
}

From source file:pyromaniac.Algorithm.MultinomialOneSidedTest.java

public void runTest() throws Exception {
    //not significant if no observations below and no observations above
    if (observationsBelowMode == 0 && observationsAboveMode == 0) {
        this.p = 1;
    } //significant if the number of observations above mode is equal to that at the mode. 
    /*         else if(N <= 5 && observationsAtMode != N)
    {/*from ww w  .  j a v  a2s  .c om*/
       this.significant = true;
       this.p = 0;
    }*/
    else if (alpha > 0
            && (observationsAboveMode == observationsAtMode || observationsBelowMode == observationsAtMode)) {
        this.significantAbove = true;
        this.significantBelow = true;
        //this.significantCombined = true;
        this.p = 0;
    } else {

        int indexBelowMode = 1;
        int indexAboveMode = 2;

        double[] X = new double[] { this.observationsAtMode, this.observationsBelowMode,
                this.observationsAboveMode };
        double[] xDivN = new double[] { (double) this.observationsAtMode / (double) this.N,
                (double) this.observationsBelowMode / (double) this.N,
                (double) this.observationsAboveMode / (double) this.N };
        HashSet<Integer> gamma = new HashSet<Integer>();

        if (verbose) {
            for (int i = 0; i < X.length; i++) {
                logger.writeLog("X[" + i + "] = " + X[i], AcaciaLogger.LOG_DEBUG);
                logger.writeLog("XDivN[" + i + "] = " + xDivN[i], AcaciaLogger.LOG_DEBUG);
                logger.writeLog("P[" + i + "] = " + P[i], AcaciaLogger.LOG_DEBUG);
            }
        }

        for (int i = 1; i < xDivN.length; i++) {
            if (xDivN[i] >= P[i]) {
                gamma.add(i);
            }
        }

        double sumX = 0;
        double sumP = 0;

        for (int index : gamma) {
            sumX += X[index];
            sumP += P[index];
        }

        while (gamma.size() < 2) {
            boolean added = false;

            for (int i = 1; i < xDivN.length; i++) {
                double adjP = X[i] * (1 - sumP) / (this.N - sumX);

                if (adjP > P[i] & !gamma.contains(i)) {
                    gamma.add(i);
                    added = true;
                }
            }

            if (!added) {
                break;
            }

            sumP = 0;
            sumX = 0;

            for (int index : gamma) {
                sumX += X[index];
                sumP += P[index];
            }
        }

        NormalDistribution norm = new NormalDistribution();

        double w2Num = Math.pow(((N - sumX) - N * (1 - sumP)), 2);
        double w2Denom = (N * (1 - sumP));
        double resSum = 0;
        for (int index : gamma) {
            resSum += Math.pow(X[index] - N * P[index], 2) / (N * P[index]);
        }

        double w2 = (w2Num / w2Denom) + resSum;
        double m = Math
                .sqrt(P[indexBelowMode] * P[indexAboveMode] / (1 - P[indexBelowMode] - P[indexAboveMode]));
        double calcP = (0.25 + (Math.atan(m) / (2 * Math.PI))) * Math.exp(-w2 / 2)
                + (1 - norm.cumulativeProbability(Math.sqrt(w2)));

        this.p = calcP;

        if (this.alpha > 0) {
            boolean sig = (this.p <= this.alpha);
            this.significantAbove = sig;
            this.significantBelow = sig;
            //   this.significantCombined = sig;
        } else //alpha == zero
        {
        }
    }
}

From source file:pyromaniac.Algorithm.QuinceOUFrequencyTable.java

private BigDecimal[] _calculateProbabilitiesHelper(int mode) {
    BigDecimal sd = new BigDecimal("0.04").add(new BigDecimal(mode).multiply(new BigDecimal("0.03")));
    BigDecimal modeBD = new BigDecimal(mode);

    BigDecimal lowerBound = modeBD.subtract(new BigDecimal(SUBTRACT_FOR_LB)).setScale(SCALE,
            BigDecimal.ROUND_HALF_UP);
    BigDecimal upperBound = modeBD.add(new BigDecimal(ADD_FOR_UB)).setScale(SCALE, BigDecimal.ROUND_HALF_UP);

    NormalDistribution norm = new NormalDistribution(mode, sd.doubleValue());

    try {/*from   w  w w . j  a  va 2  s . c o  m*/
        BigDecimal probLessThan = new BigDecimal(norm.cumulativeProbability(lowerBound.doubleValue()))
                .setScale(SCALE, BigDecimal.ROUND_HALF_UP);
        BigDecimal probMoreThan = new BigDecimal("1")
                .subtract(new BigDecimal(norm.cumulativeProbability(upperBound.doubleValue())))
                .setScale(SCALE, BigDecimal.ROUND_HALF_UP);
        BigDecimal probEqualTo = new BigDecimal("1").subtract(probLessThan).subtract(probMoreThan)
                .setScale(SCALE, BigDecimal.ROUND_HALF_UP);

        BigDecimal totalProb = probLessThan.add(probEqualTo).add(probMoreThan).setScale(SCALE,
                BigDecimal.ROUND_HALF_UP);

        if (!totalProb.equals(new BigDecimal("1").setScale(SCALE, BigDecimal.ROUND_HALF_UP))) {
            probLessThan = probLessThan.divide(totalProb, SCALE, BigDecimal.ROUND_HALF_UP);
            probMoreThan = probMoreThan.divide(totalProb, SCALE, BigDecimal.ROUND_HALF_UP);
            probEqualTo = probEqualTo.divide(totalProb, SCALE, BigDecimal.ROUND_HALF_UP);
        }

        BigDecimal[] probs = { probLessThan, probEqualTo, probMoreThan };
        return probs;
    } catch (MathIllegalStateException me) {
        me.getStackTrace();
    }
    return null;
}

From source file:roemetz.core.CalcGenRoeMetz.java

/**
 * Numerically integrates a one dimensional gaussian pdf times two normal
 * cdfs//from w  ww.ja  va2 s  .  co m
 * 
 * @param u Contains experiment means
 * @param scale Contains one 1-D gaussian pdf and two 2-D normal cdfs
 * @param numSamples Number of samples for numerical integration
 * @return Integrated product moment
 */
public static double prodMoment1(double[] u, double[] scale, int numSamples) {
    NormalDistribution gauss = new NormalDistribution();

    double scale1 = scale[0];
    double scale20 = scale[1];
    double scale21 = scale[2];

    double lx = 10 * Math.sqrt(scale1);
    double dx = lx / (double) numSamples;
    double[] x = new double[numSamples];
    for (int i = 0; i < numSamples; i++) {
        x[i] = ((double) i * dx) - (0.5 * lx);
    }

    double f[] = new double[numSamples];
    for (int i = 0; i < numSamples; i++) {
        f[i] = Math.exp((-(x[i] * x[i])) / 2.0 / scale1);
    }

    for (int i = 0; i < numSamples; i++) {
        f[i] = f[i] / Math.sqrt(Math.PI * 2.0 * scale1);
    }

    double[] phi = new double[numSamples];
    for (int i = 0; i < numSamples; i++) {
        phi[i] = gauss.cumulativeProbability((u[0] + x[i]) / Math.sqrt(scale20))
                * gauss.cumulativeProbability((u[1] + x[i]) / Math.sqrt(scale21));
    }

    double[] toTotal = new double[numSamples];
    for (int i = 0; i < numSamples; i++) {
        toTotal[i] = dx * f[i] * phi[i];
    }
    return Matrix.total(toTotal);
}

From source file:roemetz.core.CalcGenRoeMetz.java

/**
 * Numerically integrates a two dimensional gaussian pdf times a gaussian
 * cdf//from   w  ww  . ja v a2 s . c om
 * 
 * @param u Contains experiment means.
 * @param scale Contains 2-D gaussian pdf and cdf
 * @param numSamples Number of samples for numerical integration
 * @return Integrated product moment
 */
public static double prodMoment(double[] u, double[] scale, int numSamples) {
    NormalDistribution gauss = new NormalDistribution();
    double scaleFixed = scale[0];
    double scaleIndependentA = scale[1] + scale[3];
    double scaleIndependentB = scale[2] + scale[4];

    double lx = 10.0;
    double dx = lx / (double) numSamples;
    double[] x = new double[numSamples];
    double Integral = 0.0;

    for (int i = 0; i < numSamples; i++) {
        x[i] = ((double) i * dx) - (0.5 * lx);
    }

    double[] phi_x = new double[numSamples];
    double[] cdf_A = new double[numSamples];
    double[] cdf_B = new double[numSamples];

    for (int i = 0; i < numSamples; i++) {
        phi_x[i] = Math.exp(-(x[i] * x[i]) / 2.0) / Math.sqrt(Math.PI * 2.0);
        cdf_A[i] = gauss
                .cumulativeProbability((u[0] + x[i] * Math.sqrt(scaleFixed)) / Math.sqrt(scaleIndependentA));
        cdf_B[i] = gauss
                .cumulativeProbability((u[1] + x[i] * Math.sqrt(scaleFixed)) / Math.sqrt(scaleIndependentB));
        Integral = Integral + dx * phi_x[i] * cdf_A[i] * cdf_B[i];
    }

    return Integral;
}

From source file:roemetz.core.CalcGenRoeMetz.java

/**
 * Calculates AUC components of variance for given experiment parameters via
 * numerical integration//  ww  w. jav  a2  s  .com
 * 
 * @param u Contains experiment means. Has 2 elements.
 * @param var_t Contains variance components. Has 18 elements.
 * @param n Contains experiment sizes. Has 3 elements.
 */
public static void genRoeMetz(double[] u, double[] var_t, int Nreader, int Nnormal, int Ndisease) {
    NormalDistribution gauss = new NormalDistribution();

    // number of samples for numerical integration, can change
    final int numSamples = 256;

    double v_AR0 = var_t[0];
    double v_AC0 = var_t[1];
    double v_ARC0 = var_t[2];
    double v_AR1 = var_t[3];
    double v_AC1 = var_t[4];
    double v_ARC1 = var_t[5];
    double v_BR0 = var_t[6];
    double v_BC0 = var_t[7];
    double v_BRC0 = var_t[8];
    double v_BR1 = var_t[9];
    double v_BC1 = var_t[10];
    double v_BRC1 = var_t[11];
    double v_R0 = var_t[12];
    double v_C0 = var_t[13];
    double v_RC0 = var_t[14];
    double v_R1 = var_t[15];
    double v_C1 = var_t[16];
    double v_RC1 = var_t[17];

    m = new double[2][2][9];

    // AUC
    double scale1 = v_R0 + v_C0 + v_RC0 + v_R1 + v_C1 + v_RC1;
    double scale20 = v_AR0 + v_AC0 + v_ARC0 + v_AR1 + v_AC1 + v_ARC1;
    double scale21 = v_BR0 + v_BC0 + v_BRC0 + v_BR1 + v_BC1 + v_BRC1;
    m[0][0][0] = gauss.cumulativeProbability(u[0] / Math.sqrt(scale1 + scale20));
    m[1][1][0] = gauss.cumulativeProbability(u[1] / Math.sqrt(scale1 + scale21));
    m[1][0][0] = m[0][0][0] - m[1][1][0];
    m[0][1][0] = -m[1][0][0];

    // M1
    double[] scaleM1 = { scale1, scale20, scale21 };
    m[0][0][1] = m[0][0][0];
    m[1][1][1] = m[1][1][0];
    m[1][0][1] = prodMoment1(u, scaleM1, numSamples);
    m[0][1][1] = m[1][0][1];

    // M2
    double scale30 = v_C0 + v_RC0 + v_AC0 + v_ARC0;
    double scale31 = v_C0 + v_RC0 + v_BC0 + v_BRC0;
    scale20 = v_AR1 + v_AC1 + v_ARC1 + v_AR0;
    scale21 = v_BR1 + v_BC1 + v_BRC1 + v_BR0;
    scale1 = v_R1 + v_C1 + v_RC1 + v_R0;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][2] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][2] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    double[] scaleM = { scale1, scale20, scale21, scale30, scale31 };
    m[1][0][2] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][2] = m[1][0][2];

    // M3
    scale30 = v_C1 + v_RC1 + v_AC1 + v_ARC1;
    scale31 = v_C1 + v_RC1 + v_BC1 + v_BRC1;
    scale20 = v_AR1 + v_AR0 + v_AC0 + v_ARC0;
    scale21 = v_BR1 + v_BR0 + v_BC0 + v_BRC0;
    scale1 = v_R1 + v_R0 + v_C0 + v_RC0;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][3] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][3] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    scaleM[0] = scale1;
    scaleM[1] = scale20;
    scaleM[2] = scale21;
    scaleM[3] = scale30;
    scaleM[4] = scale31;
    m[1][0][3] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][3] = m[1][0][3];

    // M4
    scale30 = v_C1 + v_RC1 + v_AC1 + v_ARC1 + v_C0 + v_RC0 + v_AC0 + v_ARC0;
    scale31 = v_C1 + v_RC1 + v_BC1 + v_BRC1 + v_C0 + v_RC0 + v_BC0 + v_BRC0;
    scale20 = v_AR1 + v_AR0;
    scale21 = v_BR1 + v_BR0;
    scale1 = v_R1 + v_R0;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][4] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][4] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    scaleM[0] = scale1;
    scaleM[1] = scale20;
    scaleM[2] = scale21;
    scaleM[3] = scale30;
    scaleM[4] = scale31;
    m[1][0][4] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][4] = m[1][0][4];

    // M5
    scale30 = v_R0 + v_R1 + v_RC0 + v_RC1 + v_AR0 + v_AR1 + v_ARC0 + v_ARC1;
    scale31 = v_R0 + v_R1 + v_RC0 + v_RC1 + v_BR0 + v_BR1 + v_BRC0 + v_BRC1;
    scale20 = v_AC0 + v_AC1;
    scale21 = v_BC0 + v_BC1;
    scale1 = v_C1 + v_C0;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][5] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][5] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    scaleM[0] = scale1;
    scaleM[1] = scale20;
    scaleM[2] = scale21;
    scaleM[3] = scale30;
    scaleM[4] = scale31;
    m[1][0][5] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][5] = m[1][0][5];

    // M6
    scale30 = v_R0 + v_R1 + v_C0 + v_RC0 + v_RC1 + v_AR0 + v_AR1 + v_AC0 + v_ARC0 + v_ARC1;
    scale31 = v_R0 + v_R1 + v_C0 + v_RC0 + v_RC1 + v_BR0 + v_BR1 + v_BC0 + v_BRC0 + v_BRC1;
    scale20 = v_AC1;
    scale21 = v_BC1;
    scale1 = v_C1;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][6] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][6] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    scaleM[0] = scale1;
    scaleM[1] = scale20;
    scaleM[2] = scale21;
    scaleM[3] = scale30;
    scaleM[4] = scale31;
    m[1][0][6] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][6] = m[1][0][6];

    // M7
    scale30 = v_R0 + v_R1 + v_C1 + v_RC0 + v_RC1 + v_AR0 + v_AR1 + v_AC1 + v_ARC0 + v_ARC1;
    scale31 = v_R0 + v_R1 + v_C1 + v_RC0 + v_RC1 + v_BR0 + v_BR1 + v_BC1 + v_BRC0 + v_BRC1;
    scale20 = v_AC0;
    scale21 = v_BC0;
    scale1 = v_C0;

    scaleM1[0] = scale1 + scale20;
    scaleM1[1] = scale30;
    scaleM1[2] = scale30;
    m[0][0][7] = prodMoment1(new double[] { u[0], u[0] }, scaleM1, numSamples);

    scaleM1[0] = scale1 + scale21;
    scaleM1[1] = scale31;
    scaleM1[2] = scale31;
    m[1][1][7] = prodMoment1(new double[] { u[1], u[1] }, scaleM1, numSamples);

    scaleM[0] = scale1;
    scaleM[1] = scale20;
    scaleM[2] = scale21;
    scaleM[3] = scale30;
    scaleM[4] = scale31;
    m[1][0][7] = prodMoment(new double[] { u[0], u[1] }, scaleM, numSamples);
    m[0][1][7] = m[1][0][7];

    //

    m[0][0][8] = m[0][0][0] * m[0][0][0];
    m[1][1][8] = m[1][1][0] * m[1][1][0];
    m[1][0][8] = m[0][0][0] * m[1][1][0];
    m[0][1][8] = m[1][0][8];

    calcAUCsAndDecomps(Nnormal, Ndisease, Nreader);

}

From source file:uk.ac.babraham.SeqMonk.Filters.IntensityDifferenceFilter.java

protected void generateProbeList() {

    applyMultipleTestingCorrection = optionsPanel.multipleTestingBox.isSelected();

    Probe[] probes = startingList.getAllProbes();

    // We'll pull the number of probes to sample from the preferences if they've changed it

    Integer updatedProbesPerSet = optionsPanel.probesPerSet();
    if (updatedProbesPerSet != null)
        probesPerSet = updatedProbesPerSet;

    ProbeList newList = new ProbeList(startingList, "Filtered Probes", "", "Diff p-value");

    // We'll build up a set of p-values as we go along
    float[] lowestPValues = new float[probes.length];
    for (int p = 0; p < lowestPValues.length; p++) {
        lowestPValues[p] = 1;/* ww  w .j a  v a 2 s .c  o  m*/
    }

    // This is going to be the temporary array we populate with the set of
    // differences we are going to analyse.
    double[] currentDiffSet = new double[probesPerSet];

    // First work out the set of comparisons we're going to make

    Vector<SingleComparison> comparisons = new Vector<IntensityDifferenceFilter.SingleComparison>();
    for (int fromIndex = 0; fromIndex < fromStores.length; fromIndex++) {
        for (int toIndex = 0; toIndex < toStores.length; toIndex++) {
            if (fromStores[fromIndex] == toStores[toIndex])
                continue;

            // If we can find the fromStore in the toStores we've already done and the
            // toStore anywhere in the fromStores then we can skip this.
            boolean canSkip = false;

            for (int i = 0; i < fromIndex; i++) {
                if (fromStores[i] == toStores[toIndex]) {
                    for (int j = 0; j < toStores.length; j++) {
                        if (toStores[j] == fromStores[fromIndex]) {
                            canSkip = true;
                            break;
                        }
                    }
                    break;
                }
            }

            if (canSkip)
                continue;

            comparisons.add(new SingleComparison(fromIndex, toIndex));

        }
    }

    // Put something in the progress whilst we're ordering the probe values to make
    // the comparison.
    progressUpdated("Generating background model", 0, 1);

    for (int comparisonIndex = 0; comparisonIndex < comparisons.size(); comparisonIndex++) {

        int fromIndex = comparisons.elementAt(comparisonIndex).fromIndex;
        int toIndex = comparisons.elementAt(comparisonIndex).toIndex;

        // We need to generate a set of probe indices ordered by their average intensity
        Integer[] indices = new Integer[probes.length];
        for (int i = 0; i < probes.length; i++) {
            indices[i] = i;
        }

        Comparator<Integer> comp = new AverageIntensityComparator(fromStores[fromIndex], toStores[toIndex],
                probes);

        Arrays.sort(indices, comp);

        progressUpdated("Made " + comparisonIndex + " out of " + comparisons.size() + " comparisons",
                comparisonIndex, comparisons.size());

        IndexTTestValue[] currentPValues = new IndexTTestValue[indices.length];

        for (int i = 0; i < indices.length; i++) {

            if (cancel) {
                cancel = false;
                progressCancelled();
                return;
            }

            if (i % 1000 == 0) {

                int progress = (i * 100) / indices.length;

                progress += 100 * comparisonIndex;

                progressUpdated("Made " + comparisonIndex + " out of " + comparisons.size() + " comparisons",
                        progress, comparisons.size() * 100);
            }

            // We need to make up the set of differences to represent this probe
            int startingIndex = i - (probesPerSet / 2);
            if (startingIndex < 0)
                startingIndex = 0;
            if (startingIndex + (probesPerSet + 1) >= probes.length)
                startingIndex = probes.length - (probesPerSet + 1);

            try {
                for (int j = startingIndex; j < startingIndex + (probesPerSet + 1); j++) {
                    if (j == startingIndex)
                        continue; // Don't include the point being tested in the background model
                    else if (j < startingIndex) {
                        currentDiffSet[j - startingIndex] = fromStores[fromIndex].getValueForProbe(
                                probes[indices[j]]) - toStores[toIndex].getValueForProbe(probes[indices[j]]);
                    } else {
                        currentDiffSet[(j - startingIndex) - 1] = fromStores[fromIndex].getValueForProbe(
                                probes[indices[j]]) - toStores[toIndex].getValueForProbe(probes[indices[j]]);
                    }
                }

                // Should we fix the mean at 0?
                double mean = 0;
                //               double mean = SimpleStats.mean(currentDiffSet);
                double stdev = SimpleStats.stdev(currentDiffSet, mean);

                if (stdev == 0) {
                    currentPValues[indices[i]] = new IndexTTestValue(indices[i], 1);
                    continue;
                }

                // Get the difference for this point
                double diff = fromStores[fromIndex].getValueForProbe(probes[indices[i]])
                        - toStores[toIndex].getValueForProbe(probes[indices[i]]);

                NormalDistribution nd = new NormalDistribution(mean, stdev);

                double significance;

                if (diff < mean) {
                    significance = nd.cumulativeProbability(diff);
                } else {
                    significance = 1 - nd.cumulativeProbability(diff);
                }

                currentPValues[indices[i]] = new IndexTTestValue(indices[i], significance);

            } catch (SeqMonkException sme) {
                progressExceptionReceived(sme);
                return;
            }

        }

        // We now need to correct the set of pValues
        if (applyMultipleTestingCorrection) {
            BenjHochFDR.calculateQValues(currentPValues);
        }

        // Finally we compare these pValues to the lowest ones we have from
        // the combined set
        if (applyMultipleTestingCorrection) {
            for (int i = 0; i < currentPValues.length; i++) {
                if (currentPValues[i].q < lowestPValues[currentPValues[i].index]) {
                    lowestPValues[currentPValues[i].index] = (float) currentPValues[i].q;
                }
            }
        } else {
            for (int i = 0; i < currentPValues.length; i++) {
                if (currentPValues[i].p < lowestPValues[currentPValues[i].index]) {
                    lowestPValues[currentPValues[i].index] = (float) currentPValues[i].p;
                }
            }
        }
    }

    // Now we can go through the lowest P-value set and see if any of them
    // pass the filter.
    for (int i = 0; i < lowestPValues.length; i++) {
        if (lowestPValues[i] < pValueLimit) {
            newList.addProbe(probes[i], lowestPValues[i]);
        }
    }

    filterFinished(newList);
}