Example usage for org.apache.commons.math3.distribution NormalDistribution NormalDistribution

List of usage examples for org.apache.commons.math3.distribution NormalDistribution NormalDistribution

Introduction

In this page you can find the example usage for org.apache.commons.math3.distribution NormalDistribution NormalDistribution.

Prototype

public NormalDistribution() 

Source Link

Document

Create a normal distribution with mean equal to zero and standard deviation equal to one.

Usage

From source file:com.qaant.threadModels.TWhaley.java

private void wWhaley() {

    tipoEjercicio = AMERICAN;//  ww  w.jav  a  2 s .c  o  m

    //     q=(tipoContrato==STOCK) ? dividendRate:rate; 
    //q: si es una accion q es el dividendo, si es un futuro q se toma la rate para descontar el valor futr a presente 
    //Se hace este reemplazo para poder usar la misma form en STOCK y FUTURO

    double xx;

    switch (tipoContrato) {
    case STOCK:
        q = dividendRate;
        b = rate;
        break;

    case FUTURES:
        q = rate;
        b = 0;
        break;
    }
    double vlt2 = volatModel * volatModel;
    double VltSqrDayYear = volatModel * sqrDayYear;
    double h = 1 - z; //descuento para valor presente

    double alfa = 2 * rate / vlt2;
    double beta = 2 * (b - q) / vlt2;

    double lambda = (-(beta - 1) + cpFlag * Math.sqrt((beta - 1) * (beta - 1) + 4 * alfa / h)) / 2;

    double eex = Math.exp(-q * dayYear);//descuento por dividendos

    double s1 = strike;
    double zz = 1 / Math.sqrt(2 * Math.PI);
    double zerror = 1;
    do {
        double d1 = (Math.log(s1 / strike) + ((rate - q) + vlt2 / 2) * dayYear) / VltSqrDayYear;
        xx = (1 - eex * new NormalDistribution().cumulativeProbability(cpFlag * d1));

        double corr = s1 / lambda * xx;

        QBlackScholes option = new QBlackScholes(tipoContrato, s1, volatModel, dividendRate, callPut, strike,
                daysToExpiration, rate, 0);

        double mBlackScholes = option.getPrima();
        double rhs = mBlackScholes + cpFlag * corr;

        double lhs = cpFlag * (s1 - strike);
        zerror = lhs - rhs;
        double nd1 = zz * Math.exp(-0.5 * d1 * d1); //standard normal prob?
        double slope = cpFlag * (1 - 1 / lambda) * xx + 1 / lambda * (eex * nd1) * 1 / VltSqrDayYear;
        s1 = s1 - zerror / slope;

    } while (Math.abs(zerror) > 0.000001);

    double a = cpFlag * s1 / lambda * xx;

    switch (callPut) {
    case CALL: //Call
        if (underlyingValue >= s1) {
            prima = underlyingValue - strike;
        } else {
            prima += a * Math.pow((underlyingValue / s1), lambda);
        }
        break;

    case PUT: //Put
        if (underlyingValue <= s1) {
            prima = strike - underlyingValue;
        } else {
            prima += a * Math.pow((underlyingValue / s1), lambda);
        }
        //prima=10.1;
        break;
    }
}

From source file:eu.crisis_economics.abm.ratings.NaiveMertonDistanceToDefaultAlgorithm.java

/**
  * Compute the (Naive) Merton Distance-to-Default measure for an agent
  * in a specified forecast timeframe T.
  * // w ww .jav a2  s .c o  m
  * @param debtFaceValue (F)
  *        The face value of the agent debt.
  * @param equityVolatility
  *        The volatility of agent equity.
  * @param equity (E)
  *        The current agent equity.
  * @param expectedAssetReturn
  *        The asset return of the agent during the last forecast window.
  * @param forecastHorizon (T)
  *        The period over which to forecast agent default.
  * @return
  *        A Pair<Double, Double> in the format:
  *        Pair<Naive Merton Distance-to-Default, Naive Merton
  *        Probability-of-Default>, in the period of the forecast timeframe.
  * 
  * It is not permissible for: both the debt face value (F) and equity (E)
  * aguments to be simultaneously zero; for the debt face value (F) to be
  * negative; or for the forecase horizon (T) to be zero or negative. If the 
  * debt face value is zero and equity is nonzero, then the distance to 
  * default is taken to be +Infinity.
  */
static Pair<Double, Double> compute(final double debtFaceValue, final double equityVolatility,
        final double equity, final double expectedAssetReturn, final double forecastHorizon) {
    Preconditions.checkArgument(equity != 0. || debtFaceValue > 0.);
    Preconditions.checkArgument(forecastHorizon > 0.);
    Preconditions.checkArgument(debtFaceValue >= 0.);
    final double debtVolatility = .05 + .25 * equityVolatility,
            overallValueVolatility = equityVolatility * equity / (equity + debtFaceValue)
                    + debtVolatility * debtFaceValue / (equity + debtFaceValue);
    double distanceToDefault = Math.log((equity + debtFaceValue) / debtFaceValue)
            + (expectedAssetReturn - .5 * overallValueVolatility * overallValueVolatility) * forecastHorizon;
    distanceToDefault /= Math.sqrt(forecastHorizon) * overallValueVolatility;
    NormalDistribution normalDist = new NormalDistribution();
    final double defaultProbability = normalDist.cumulativeProbability(-distanceToDefault);
    return Pair.create(distanceToDefault, defaultProbability);
}

From source file:com.mapr.synth.TermGeneratorTest.java

@Test
public void distinctVocabularies() {
    TermGenerator x1 = new TermGenerator(WORDS, 1, 0.8);
    final Multiset<String> k1 = HashMultiset.create();
    for (int i = 0; i < 50000; i++) {
        k1.add(x1.sample());/*w w w.j a v a2  s . c o  m*/
    }

    TermGenerator x2 = new TermGenerator(WORDS, 1, 0.8);
    final Multiset<String> k2 = HashMultiset.create();
    for (int i = 0; i < 50000; i++) {
        k2.add(x2.sample());
    }

    final NormalDistribution normal = new NormalDistribution();
    List<Double> scores = Ordering.natural()
            .sortedCopy(Iterables.transform(k1.elementSet(), new Function<String, Double>() {
                public Double apply(String s) {
                    return normal.cumulativeProbability(LogLikelihood.rootLogLikelihoodRatio(k1.count(s),
                            50000 - k1.count(s), k2.count(s), 50000 - k2.count(s)));
                }
            }));
    int n = scores.size();
    //        System.out.printf("%.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f", scores.get(0), scores.get((int) (0.05*n)), scores.get(n / 4), scores.get(n / 2), scores.get(3 * n / 4), scores.get((int) (0.95 * n)), scores.get(n - 1));
    int i = 0;
    for (Double score : scores) {
        if (i % 10 == 0) {
            System.out.printf("%.6f\t%.6f\n", (double) i / n, score);
        }

        i++;
    }
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.statistics.MannWhitneyUTest.java

/**
 * {@inheritDoc}/*from   w  w w  .j a  v  a 2 s  .c  o  m*/
 * <p>
 * When the samples from both populations are less than 20, only alpha
 * values of 0.05 and 0.01 are valid. This is because a table is used to
 * accurately determine the critical values. When more than 20 samples are
 * available, the normal approximation is used allowing any value for alpha.
 * 
 * @throws IllegalArgumentException if an insufficient sampling size is
 *         provided, or if an invalid alpha value is provided
 */
@Override
public boolean test(double alpha) {
    double[] R = new double[2];
    int[] n = new int[2];

    update();

    for (RankedObservation observation : data) {
        n[observation.getGroup()]++;
        R[observation.getGroup()] += observation.getRank();
    }

    double U1 = n[0] * n[1] + n[0] * (n[0] + 1) / 2.0 - R[0];
    double U2 = n[0] * n[1] + n[1] * (n[1] + 1) / 2.0 - R[1];
    double U = Math.min(U1, U2);

    // expose U for testing
    lastU = U;

    if ((n[0] <= 20) && (n[1] <= 20)) {
        return U <= getCriticalUValueFromTable(n[0], n[1], alpha);
    } else {
        double z = 0.0;
        NormalDistribution dist = new NormalDistribution();

        if (Settings.isContinuityCorrection()) {
            z = (Math.abs(U - n[0] * n[1] / 2.0) - 0.5) / Math.sqrt(n[0] * n[1] * (n[0] + n[1] + 1.0) / 12.0);
        } else {
            z = (U - n[0] * n[1] / 2.0) / Math.sqrt(n[0] * n[1] * (n[0] + n[1] + 1.0) / 12.0);
        }

        return Math.abs(z) >= Math.abs(dist.inverseCumulativeProbability(alpha));
    }
}

From source file:gamlss.distributions.ST4.java

/** This is the Skew t type4 distribution with supplied 
 * link function for each of the distribution parameters.
 * @param muLink - link function for mu distribution parameter
 * @param sigmaLink - link function for sigma distribution parameter
 * @param nuLink - link function for nu distribution parameter
 * @param tauLink - link function for tau distribution parameter
 *//*w ww .  ja v a  2 s.co m*/
public ST4(final int muLink, final int sigmaLink, final int nuLink, final int tauLink) {

    distributionParameterLink.put(DistributionSettings.MU,
            MakeLinkFunction.checkLink(DistributionSettings.ST4, muLink));
    distributionParameterLink.put(DistributionSettings.SIGMA,
            MakeLinkFunction.checkLink(DistributionSettings.ST4, sigmaLink));
    distributionParameterLink.put(DistributionSettings.NU,
            MakeLinkFunction.checkLink(DistributionSettings.ST4, nuLink));
    distributionParameterLink.put(DistributionSettings.TAU,
            MakeLinkFunction.checkLink(DistributionSettings.ST4, tauLink));

    noDist = new NormalDistribution();
    tdDist = new TDistr();
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.statistics.WilcoxonSignedRanksTest.java

/**
 * {@inheritDoc}//from   ww  w  .  ja  v  a 2s  .co m
 * <p>
 * When the samples from both populations are less than 20, only alpha
 * values of 0.05 and 0.01 are valid. This is because a table is used to
 * accurately determine the critical values. When more than 20 samples are
 * available, the normal approximation is used allowing any value for alpha.
 * 
 * @throws IllegalArgumentException if an insufficient sampling size is
 *         provided, or if an invalid alpha value is provided
 */
@Override
public boolean test(double alpha) {
    double Rpos = 0.0;
    double Rneg = 0.0;

    update();

    for (RankedObservation observation : data) {
        if (observation.getValue() < 0.0) {
            Rneg += observation.getRank();
        } else {
            Rpos += observation.getRank();
        }
    }

    int n = data.size();
    double T = Math.min(Rpos, Rneg);

    // expose T for testing
    lastT = T;

    if (n <= 50) {
        return T <= getCriticalTValueFromTable(n, alpha);
    } else {
        double z = 0.0;
        NormalDistribution dist = new NormalDistribution();

        if (Settings.isContinuityCorrection()) {
            z = (Math.abs(T - n * (n + 1) / 4.0) - 0.5) / Math.sqrt(n * (n + 1) * (n + n + 1) / 24.0);
        } else {
            z = (T - n * (n + 1) / 4.0) / Math.sqrt(n * (n + 1) * (n + n + 1) / 24.0);
        }

        return Math.abs(z) >= Math.abs(dist.inverseCumulativeProbability(alpha));
    }
}

From source file:gamlss.distributions.ST3.java

/**
 * This is the Skew t type3 distribution with supplied link 
 * function for each of the distribution parameters.
 * @param muLink - link function for mu distribution parameter
 * @param sigmaLink - link function for sigma distribution parameter
 * @param nuLink - link function for nu distribution parameter
 * @param tauLink - link function for tau distribution parameter
 *//*from   www . j  ava 2  s.c o  m*/
public ST3(final int muLink, final int sigmaLink, final int nuLink, final int tauLink) {

    distributionParameterLink.put(DistributionSettings.MU,
            MakeLinkFunction.checkLink(DistributionSettings.ST3, muLink));
    distributionParameterLink.put(DistributionSettings.SIGMA,
            MakeLinkFunction.checkLink(DistributionSettings.ST3, sigmaLink));
    distributionParameterLink.put(DistributionSettings.NU,
            MakeLinkFunction.checkLink(DistributionSettings.ST3, nuLink));
    distributionParameterLink.put(DistributionSettings.TAU,
            MakeLinkFunction.checkLink(DistributionSettings.ST3, tauLink));

    noDist = new NormalDistribution();
    tdDist = new TDistr();
}

From source file:com.ibm.iot.iotspark.IoTZScore.java

public double zScoreToPercentile(double zScore) {
    double percentile = 0;

    NormalDistribution dist = new NormalDistribution();
    percentile = dist.cumulativeProbability(zScore) * 100;
    return percentile;
}

From source file:gamlss.distributions.ST1.java

/** This is the Skew t (Azzalini type 1) distribution with
 *  supplied link function for each of the distribution parameters.
 * @param muLink - link function for mu distribution parameter
 * @param sigmaLink - link function for sigma distribution parameter
 * @param nuLink - link function for nu distribution parameter
 * @param tauLink - link function for tau distribution parameter
 */// ww  w .j a va  2  s .co m
public ST1(final int muLink, final int sigmaLink, final int nuLink, final int tauLink) {

    distributionParameterLink.put(DistributionSettings.MU,
            MakeLinkFunction.checkLink(DistributionSettings.ST1, muLink));
    distributionParameterLink.put(DistributionSettings.SIGMA,
            MakeLinkFunction.checkLink(DistributionSettings.ST1, sigmaLink));
    distributionParameterLink.put(DistributionSettings.NU,
            MakeLinkFunction.checkLink(DistributionSettings.ST1, nuLink));
    distributionParameterLink.put(DistributionSettings.TAU,
            MakeLinkFunction.checkLink(DistributionSettings.ST1, tauLink));

    tdDist = new TDistr();
    noDist = new NormalDistribution();
    integrator = new LegendreGaussIntegrator(2, LegendreGaussIntegrator.DEFAULT_RELATIVE_ACCURACY,
            LegendreGaussIntegrator.DEFAULT_ABSOLUTE_ACCURACY);
    function = new IntegratingFunction();
    interval = new double[2];
    uniRootSolver = new BrentSolver(1.0e-12, 1.0e-8);
    uniRootObj = new UniRootObjFunction();
}

From source file:jasima.core.experiment.OCBAExperiment.java

protected double[] calcPCSPriosPerConfiguration() {
    final SummaryStat best = stats[currBest];
    final double bestMean = best.mean();

    double bestNormVariance = best.variance() / best.numObs();

    double[] prodTerms = new double[stats.length];
    for (int i = 0; i < stats.length; i++) {
        if (i == currBest)
            continue;

        SummaryStat vs = stats[i];//from  w ww.  j a va  2s  .c o m
        prodTerms[i] = (bestMean - vs.mean()) / Math.sqrt(bestNormVariance + vs.variance() / vs.numObs());
    }

    NormalDistribution normalDist = new NormalDistribution();

    for (int i = 0; i < stats.length; i++) {
        if (i == currBest)
            continue;

        prodTerms[i] = normalDist.cumulativeProbability(prodTerms[i]);
        if (getProblemType() == ProblemType.MINIMIZE)
            prodTerms[i] = 1.0 - prodTerms[i];
    }

    return prodTerms;
}