Example usage for org.apache.commons.math3.distribution NormalDistribution inverseCumulativeProbability

List of usage examples for org.apache.commons.math3.distribution NormalDistribution inverseCumulativeProbability

Introduction

In this page you can find the example usage for org.apache.commons.math3.distribution NormalDistribution inverseCumulativeProbability.

Prototype

public double inverseCumulativeProbability(final double p) throws OutOfRangeException 

Source Link

Document

The default implementation returns
  • #getSupportLowerBound() for p = 0 ,
  • #getSupportUpperBound() for p = 1 .

Usage

From source file:net.openhft.chronicle.timeseries.Columns.java

private static void generateBrownian(DoubleColumn col, long first, double start, double end,
        NormalDistribution nd, RandomGenerator rand) {
    double x = start;
    int chunkSize = (int) Math.min(col.length() - first, CHUNK_SIZE);
    for (int i = 0; i < chunkSize; i++) {
        col.set(first + i, x);//  www .  ja  v a 2s .  com
        double p = rand.nextFloat() + 0.5 / (1 << 24);
        double v = nd.inverseCumulativeProbability(p);
        x += v;
        assert !Double.isInfinite(x);
    }
    double diff = end - x;
    double gradient = diff / chunkSize;
    for (int i = 0; i < chunkSize; i++) {
        col.add(first + i, i * gradient);
    }
}

From source file:bide.simulation.Simulation.java

public static void diffLim(double gap, int noOfData, String simdata) {

    double limDet = -8.67;
    double meanDiff = Precision.round(globalSd * gap, 3);
    double halfMeanDiff = Precision.round(meanDiff / 2, 3);
    double smallMean = EMPIRICAL_MEAN - halfMeanDiff;
    System.out.println(meanDiff + "\t" + smallMean + "\t");

    for (int i = 1; i < 12; i++) {

        StringBuilder sb = new StringBuilder(System.getProperty("user.dir")).append(FILE_SEP).append(simdata)
                .append(FILE_SEP).append("DiffLim_").append(i).append(".csv");

        String simFile = sb.toString();
        GelSetting g = new GelSetting(noSpotPerGroup, smallMean, meanDiff, 50, 0, globalSd);
        g.gelSetP(1, 1);/*w  w w .j  a  v a  2s. co  m*/
        NormalDistribution nd = new NormalDistribution(smallMean, globalSd);

        System.out.println(sb.toString());

        try {

            limDet = nd.inverseCumulativeProbability(0.05 * (i - 1));
            System.out.println(simFile + "\t" + limDet);
            g.setLimDet(limDet);
            generateSimDataFile(simFile, g, totalSpot);
        }

        catch (Exception e) {
            e.printStackTrace();
        }
    }

}

From source file:net.openhft.chronicle.timeseries.Columns.java

public static void generateBrownian(DoubleColumn col, double start, double end, double sd) {
    long length = col.length();
    double sd2 = sd / Math.sqrt(length);
    NormalDistribution nd = new NormalDistribution(0, sd2 * CHUNK_SIZE);
    int trendLength = Math.toIntExact((length - 1) / CHUNK_SIZE + 2);
    BytesStore trend = NativeBytesStore.lazyNativeBytesStoreWithFixedCapacity(trendLength * 8L);
    double x = start;
    RandomGenerator rand = new MersenneTwister();
    for (int i = 0; i < trendLength - 1; i++) {
        float f = rand.nextFloat();
        trend.writeDouble((long) i << 3, x);
        x += nd.inverseCumulativeProbability(f);
    }//from  w w w.  j a  v a2 s . c o m
    trend.writeDouble((long) (trendLength - 1) << 3, x);
    double diff = end - x;
    double gradient = diff / (trendLength - 1);
    for (int i = 0; i < trendLength; i++) {
        double y = trend.addAndGetDoubleNotAtomic((long) i << 3, i * gradient);
        //            System.out.println(i + ": "+y);
    }
    int procs = Runtime.getRuntime().availableProcessors();
    int chunksPerTask = (trendLength - 1) / procs + 1;
    ForkJoinPool fjp = ForkJoinPool.commonPool();
    List<ForkJoinTask> tasks = new ArrayList<>(procs);
    for (int i = 0; i < procs; i++) {
        int si = i * chunksPerTask;
        int ei = Math.min(trendLength, si + chunksPerTask);
        tasks.add(fjp.submit(() -> {
            NormalDistribution nd2 = new NormalDistribution(0, sd2);
            RandomGenerator rand2 = new MersenneTwister();
            for (int j = si; j < ei; j++) {
                generateBrownian(col, (long) j * CHUNK_SIZE, trend.readDouble((long) j << 3),
                        trend.readDouble((long) (j + 1) << 3), nd2, rand2);
            }
        }));
    }
    for (ForkJoinTask task : tasks) {
        task.join();
    }
    trend.release();
}

From source file:de.huberlin.wbi.hiway.scheduler.WienerProcessModel.java

public double getEstimate(double timestamp, double alpha) {
    if (alpha == 0.5 && measurements.size() > 0) {
        return logarithmize ? Math.pow(Math.E, measurements.getLast().runtime)
                : Math.max(measurements.getLast().runtime, Double.MIN_NORMAL);
    }//from w  w  w  . j a v a2  s. c o m

    if (differences.size() < 2) {
        return 0d;
    }

    Runtime lastMeasurement = measurements.getLast();

    double variance = 0d;
    double avgDifference = sumOfDifferences / differences.size();
    for (double difference : differences) {
        variance += Math.pow(difference - avgDifference, 2d);
    }
    variance /= differences.size() - 1;

    variance *= timestamp - lastMeasurement.timestamp;

    double estimate = lastMeasurement.runtime;
    if (variance > 0d) {
        NormalDistribution nd = new NormalDistribution(lastMeasurement.runtime, Math.sqrt(variance));
        estimate = nd.inverseCumulativeProbability(alpha);
    }

    estimate = logarithmize ? Math.pow(Math.E, estimate) : Math.max(estimate, 0d);

    return estimate;
}

From source file:io.warp10.script.functions.NBOUNDS.java

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {

    Object top = stack.pop();/*from  w  ww  .j  a v a  2s .co  m*/

    if (!(top instanceof Long)) {
        throw new WarpScriptException(getName() + " expects a number of intervals on top of the stack.");
    }

    int n = ((Number) top).intValue();

    if (n < 1) {
        throw new WarpScriptException(getName() + " cannot generate bounds for less than 2 intervals.");
    }

    if (n > 65536) {
        throw new WarpScriptException(getName() + " cannot generate bounds for more than 65536 intervals.");
    }

    top = stack.pop();

    if (!(top instanceof Number)) {
        throw new WarpScriptException(
                getName() + " expects a standard deviation below the number of intervals.");
    }

    double sigma = ((Number) top).doubleValue();

    if (sigma <= 0.0D) {
        throw new WarpScriptException(getName() + " expects a standard deviation strictly positive.");
    }
    top = stack.pop();

    if (!(top instanceof Number)) {
        throw new WarpScriptException(getName() + " expects a mean below the standard deviation.");
    }

    double mu = ((Number) top).doubleValue();

    NormalDistribution nd = new org.apache.commons.math3.distribution.NormalDistribution(mu, sigma);

    List<Object> bounds = new ArrayList<Object>(n - 1);

    double area = 1.0D / n;

    for (int i = 1; i <= n - 1; i++) {
        bounds.add(nd.inverseCumulativeProbability(i * area));
    }

    stack.push(bounds);

    return stack;
}

From source file:com.itemanalysis.psychometrics.scaling.NormalizedScore.java

/**
 * For r number of score levels between min and max (as defined in PercentileRank),
 * inclusive, this method returns a r x 2 array with integer based scores in first
 * column and normalized scores in the second column. This method is useful when
 * only the raw scores and corresponding normalized scores are needed.
 *
 * @return two-way array of raw scores and normalized scores.
 *///from w  w  w.  j a v  a2s  .  c  o m
public double[][] evaluate(PercentileRank prank, DefaultLinearTransformation linear) {
    this.prank = prank;
    NormalDistribution normal = new NormalDistribution();
    double[][] pr = prank.evaluate();
    double p = 0.0;
    double q = 0.0;
    for (int i = 0; i < pr.length; i++) {
        p = pr[i][1] / 100;
        q = normal.inverseCumulativeProbability(p);
        pr[i][1] = linear.transform(q);
    }
    return pr;
}

From source file:com.itemanalysis.psychometrics.scaling.NormalizedScore.java

/**
 * Creates a TreeMap<Integer, Double> lookup table of normalized scores.
 * The key is a raw score level and the rho is a normalized score. This
 * method is useful when finding normalized scores that correspond to an examinee's
 * raw score. After calling this method, individual elements in the
 * TreeMap can be accessed with getNormalizedScoreAt(int rho) or
 * valueIterator().//from w w  w.j a va 2  s  .  c  o m
 *
 */
public void createLookupTable(PercentileRank prank, LinearTransformation linear) {
    this.prank = prank;
    NormalDistribution normal = new NormalDistribution();
    normScoreTable = new TreeMap<Integer, Double>();
    prank.createLookupTable();
    Iterator<Integer> iter = prank.valueIterator();
    double p = 0.0;
    double q = 0.0;
    Integer i = null;
    while (iter.hasNext()) {
        i = iter.next();
        p = prank.getPercentileRankAt(i) / 100.0;
        q = normal.inverseCumulativeProbability(p);
        normScoreTable.put(i, linear.transform(q));
    }
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.statistics.MannWhitneyUTest.java

/**
 * {@inheritDoc}/* ww  w . j  a v  a 2s  .co  m*/
 * <p>
 * When the samples from both populations are less than 20, only alpha
 * values of 0.05 and 0.01 are valid. This is because a table is used to
 * accurately determine the critical values. When more than 20 samples are
 * available, the normal approximation is used allowing any value for alpha.
 * 
 * @throws IllegalArgumentException if an insufficient sampling size is
 *         provided, or if an invalid alpha value is provided
 */
@Override
public boolean test(double alpha) {
    double[] R = new double[2];
    int[] n = new int[2];

    update();

    for (RankedObservation observation : data) {
        n[observation.getGroup()]++;
        R[observation.getGroup()] += observation.getRank();
    }

    double U1 = n[0] * n[1] + n[0] * (n[0] + 1) / 2.0 - R[0];
    double U2 = n[0] * n[1] + n[1] * (n[1] + 1) / 2.0 - R[1];
    double U = Math.min(U1, U2);

    // expose U for testing
    lastU = U;

    if ((n[0] <= 20) && (n[1] <= 20)) {
        return U <= getCriticalUValueFromTable(n[0], n[1], alpha);
    } else {
        double z = 0.0;
        NormalDistribution dist = new NormalDistribution();

        if (Settings.isContinuityCorrection()) {
            z = (Math.abs(U - n[0] * n[1] / 2.0) - 0.5) / Math.sqrt(n[0] * n[1] * (n[0] + n[1] + 1.0) / 12.0);
        } else {
            z = (U - n[0] * n[1] / 2.0) / Math.sqrt(n[0] * n[1] * (n[0] + n[1] + 1.0) / 12.0);
        }

        return Math.abs(z) >= Math.abs(dist.inverseCumulativeProbability(alpha));
    }
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.statistics.WilcoxonSignedRanksTest.java

/**
 * {@inheritDoc}/*from w ww .j  av  a  2 s .  c o  m*/
 * <p>
 * When the samples from both populations are less than 20, only alpha
 * values of 0.05 and 0.01 are valid. This is because a table is used to
 * accurately determine the critical values. When more than 20 samples are
 * available, the normal approximation is used allowing any value for alpha.
 * 
 * @throws IllegalArgumentException if an insufficient sampling size is
 *         provided, or if an invalid alpha value is provided
 */
@Override
public boolean test(double alpha) {
    double Rpos = 0.0;
    double Rneg = 0.0;

    update();

    for (RankedObservation observation : data) {
        if (observation.getValue() < 0.0) {
            Rneg += observation.getRank();
        } else {
            Rpos += observation.getRank();
        }
    }

    int n = data.size();
    double T = Math.min(Rpos, Rneg);

    // expose T for testing
    lastT = T;

    if (n <= 50) {
        return T <= getCriticalTValueFromTable(n, alpha);
    } else {
        double z = 0.0;
        NormalDistribution dist = new NormalDistribution();

        if (Settings.isContinuityCorrection()) {
            z = (Math.abs(T - n * (n + 1) / 4.0) - 0.5) / Math.sqrt(n * (n + 1) * (n + n + 1) / 24.0);
        } else {
            z = (T - n * (n + 1) / 4.0) / Math.sqrt(n * (n + 1) * (n + n + 1) / 24.0);
        }

        return Math.abs(z) >= Math.abs(dist.inverseCumulativeProbability(alpha));
    }
}

From source file:eu.betaas.taas.securitymanager.taastrustmanager.taastrustcalculator.StatisticsCalculator.java

public boolean isSimilarProportion(double[] valuesA, double[] valuesB) {
    double alpha = 0.05;

    // Change data a bit for avoiding issues with booleans 0/1
    /*for (int i=0; i<valuesA.length; i++)
    {/* w w w  .j  a va  2s .  c o  m*/
       valuesA[i] = valuesA[i] + 1.0;
    }
    for (int i=0; i<valuesB.length; i++)
    {
       valuesB[i] = valuesB[i] + 1.0;
    }*/

    // Calculate region of acceptance
    NormalDistribution myNormal = new NormalDistribution(0, 1);
    double myZLeft = -1 * Math.abs(myNormal.inverseCumulativeProbability(alpha / 2));
    double myZRight = Math.abs(myNormal.inverseCumulativeProbability(alpha / 2));

    logger.debug("Boundaries: " + myZLeft + " to " + myZRight);

    // Calculate proportion for valuesA dataset
    int nA = valuesA.length;
    double successA = 0;
    for (int i = 0; i < nA; i++) {
        successA = successA + valuesA[i];
    }

    logger.debug("Success number for dataset A: " + successA);
    logger.debug("Number of records for A: " + nA);

    double pA = successA / nA;

    // Calculate proportion for valuesB dataset
    int nB = valuesB.length;
    double successB = 0;
    for (int i = 0; i < nB; i++) {
        successB = successB + valuesB[i];
    }

    logger.debug("Success number for dataset B: " + successB);
    logger.debug("Number of records for B: " + nB);

    double pB = successB / nB;

    // Calculate proportion similarity
    double pPool = (nA * pA + nB * pB) / (nA + nB);
    double zComp = (pA - pB) / Math.sqrt(pPool * (1.0 - pPool) * (1.0 / nA + 1.0 / nB));

    logger.debug("pPooled = " + pPool);
    logger.debug("Z value = " + zComp);
    logger.debug("p-value = " + (1.0 - myNormal.cumulativeProbability(zComp)) * 2);

    // Determine if z score is in the region of acceptance
    if ((myZLeft <= zComp) && (zComp <= myZRight)) {
        return true;
    }

    return false;
}