Example usage for org.apache.commons.math3.special Gamma digamma

List of usage examples for org.apache.commons.math3.special Gamma digamma

Introduction

In this page you can find the example usage for org.apache.commons.math3.special Gamma digamma.

Prototype

public static double digamma(double x) 

Source Link

Document

Computes the digamma function of x.

This is an independently written implementation of the algorithm described in Jose Bernardo, Algorithm AS 103: Psi (Digamma) Function, Applied Statistics, 1976.

Some of the constants have been changed to increase accuracy at the moderate expense of run-time.

Usage

From source file:gedi.util.math.stat.distributions.LfcDistribution.java

public static double mean(double a, double b) {
    return (Gamma.digamma(a) - Gamma.digamma(b)) / Math.log(2);
}

From source file:eu.amidst.core.exponentialfamily.EF_SparseDirichletTest.java

public static void test() {
    ParameterVariables variables = new ParameterVariables(0);

    EF_SparseDirichlet dist = new EF_SparseDirichlet(variables.newSparseDirichletParameter("A", 10));

    if (Main.VERBOSE)
        System.out.println(dist.getNaturalParameters().output());
    if (Main.VERBOSE)
        System.out.println(dist.getMomentParameters().output());

    if (Main.VERBOSE)
        System.out.println();//from ww  w.  ja v  a  2 s . c  o  m

    dist.getNaturalParameters().sumConstant(1.0);
    if (Main.VERBOSE)
        System.out.println(dist.getNaturalParameters().output());

    if (Main.VERBOSE)
        System.out.println();

    dist.updateMomentFromNaturalParameters();
    if (Main.VERBOSE)
        System.out.println(dist.getMomentParameters().output());

    assertEquals(Gamma.digamma(2.0) - Gamma.digamma(10 + 10), dist.getMomentParameters().get(0));

}

From source file:gedi.lfc.foldchange.PosteriorMeanLog2FoldChange.java

@Override
public double computeFoldChange(double a, double b) {
    if (Double.isNaN(a) || Double.isNaN(b))
        return Double.NaN;
    return (Gamma.digamma(a + priorA) - Gamma.digamma(b + priorB)) / Math.log(2);
}

From source file:edu.byu.nlp.stats.SymmetricDirichletMLEOptimizableTest.java

/**
 * Test method for {@link edu.byu.nlp.stats.SymmetricDirichletMLENROptimizable#computeNext(java.lang.Double)}.
 *///from w  ww .  j av  a 2s. co  m
@Test
public void testComputeNext() {
    final double alpha = 2.13;

    final double[][] data = DirichletTestUtils.sampleDataset();
    SymmetricDirichletMLENROptimizable o = SymmetricDirichletMLENROptimizable.newOptimizable(data);

    ValueAndObject<Double> vao = o.computeNext(alpha);

    // Compute the update that should have happened
    final int N = data.length;
    final int K = data[0].length;
    double sumOfLogThetas = DoubleArrays.sum(Matrices.sumOverFirst(data));
    double value = N * Gamma.logGamma(K * alpha) - N * K * Gamma.logGamma(alpha) + (alpha - 1) * sumOfLogThetas;
    double firstDeriv = N * Gamma.digamma(K * alpha) - N * K * Gamma.digamma(alpha) + sumOfLogThetas;
    double secondDeriv = N * Gamma.trigamma(K * alpha) - N * K * Gamma.trigamma(alpha);
    double nextAlpha = alpha - firstDeriv / secondDeriv;

    // Ensure that the update was computed correctly
    assertThat(vao.getValue()).isEqualTo(value, delta);
    assertThat(vao.getObject().doubleValue()).isEqualTo(nextAlpha, delta);
}

From source file:edu.byu.nlp.stats.SymmetricDirichletMLEFPOptimizable.java

/** {@inheritDoc} */
@Override//  w ww  .  j av a2  s .  co m
public ValueAndObject<Double> computeNext(Double alphaD) {
    double alpha = alphaD.doubleValue();
    double s = K * alpha;
    double denom = (K - 1) / s - Gamma.digamma(s) + Gamma.digamma(s / K);
    double nextS = (K - 1) / denom;
    double nextAlpha = nextS / K;
    if (nextAlpha <= 0.0) {
        throw new IllegalStateException("Fixed-point update failed; alpha = " + nextAlpha);
    }

    double value = N * (Gamma.logGamma(nextS) + nextAlpha * DoubleArrays.sum(meanLogTheta)
            - K * Gamma.logGamma(nextAlpha));
    return new ValueAndObject<Double>(value, nextAlpha);
}

From source file:edu.byu.nlp.stats.DirichletMultinomialMLEOptimizable.java

private static double computeNumerator(double[][] data, double[] alpha, int k, int N) {
    double total = 0;
    for (int i = 0; i < N; i++) {
        total += Gamma.digamma(data[i][k] + alpha[k]);
    }/*from   w  w  w .j a v a  2 s .  c  o  m*/
    total -= Gamma.digamma(alpha[k]) * N; // pulled out of the loop for efficiency
    return total;
}

From source file:edu.byu.nlp.stats.SymmetricDirichletOptimizationHelper.java

public double firstDerivativeAt(double alpha) {
    Preconditions.checkArgument(alpha > 0.0, "alpha must be strictly greater than zero; was %s", alpha);
    double derivative = N * K * (Gamma.digamma(K * alpha) - Gamma.digamma(alpha)) + sumOfLogThetas;
    logger.debug(String.format("derivativeAt(%f) = %f", alpha, derivative));
    return derivative;
}

From source file:edu.byu.nlp.stats.DirichletMLEOptimizable.java

private double[] computeG(double[] alpha, double alphaSum) {
    double digammaAlphaSum = Gamma.digamma(alphaSum);
    double[] g = new double[alpha.length];
    for (int k = 0; k < g.length; k++) {
        g[k] = N * (digammaAlphaSum - Gamma.digamma(alpha[k])) + sumTheta[k];
    }//from   w  ww. j  av a  2s .  co m
    return g;
}

From source file:edu.byu.nlp.stats.DirichletMultinomialMLEOptimizable.java

private static double computeDenominator(double[] perIDataSums, double alphaSum) {
    double total = 0;
    for (int i = 0; i < perIDataSums.length; i++) {
        total += Gamma.digamma(perIDataSums[i] + alphaSum);
    }//from w w  w .j a  va  2 s. c  o m
    total -= Gamma.digamma(alphaSum) * perIDataSums.length;
    return total;
}

From source file:edu.byu.nlp.stats.DirichletMLEOptimizableTest.java

/**
 * Computes a Newton-Raphson update in-place to alpha.
 *///from  w  ww.  j a  v a  2s . c  o  m
private static RealVector newtonRaphsonUpdate(final double[][] data, double[] alpha) {
    // We'll compute the gold-standard value the "long" way (taking the inverse of the Hessian)
    RealMatrix hessian = new Array2DRowRealMatrix(alpha.length, alpha.length);
    for (int r = 0; r < hessian.getRowDimension(); r++) {
        for (int c = 0; c < hessian.getColumnDimension(); c++) {
            hessian.addToEntry(r, c, data.length * Gamma.trigamma(DoubleArrays.sum(alpha)));
            if (r == c) {
                hessian.addToEntry(r, c, -data.length * Gamma.trigamma(alpha[r]));
            }
        }
    }
    RealVector derivative = new ArrayRealVector(alpha.length);
    for (int k = 0; k < alpha.length; k++) {
        derivative.setEntry(k,
                data.length * (Gamma.digamma(DoubleArrays.sum(alpha)) - Gamma.digamma(alpha[k])));
        for (double[] theta : data) {
            derivative.addToEntry(k, theta[k]);
        }
    }

    RealMatrix hessianInverse = new LUDecomposition(hessian).getSolver().getInverse();
    RealVector negDiff = hessianInverse.preMultiply(derivative);
    negDiff.mapMultiplyToSelf(-1.0);

    RealVector expected = new ArrayRealVector(alpha, true);
    return expected.add(negDiff);
}