Example usage for org.apache.commons.math3.linear RealMatrix subtract

List of usage examples for org.apache.commons.math3.linear RealMatrix subtract

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix subtract.

Prototype

RealMatrix subtract(RealMatrix m) throws MatrixDimensionMismatchException;

Source Link

Document

Returns this minus m .

Usage

From source file:edu.cudenver.bios.power.glmm.GLMMTest.java

/**
 * Create a test to perform data analysis
 * @param fMethod// www.  j a v  a 2  s.  co  m
 * @param cdfMethod
 * @param X
 * @param XtXInverse
 * @param rank
 * @param Y
 * @param C
 * @param U
 * @param thetaNull
 */
public GLMMTest(FApproximation fMethod, RealMatrix X, RealMatrix XtXInverse, int rank, RealMatrix Y,
        RealMatrix C, RealMatrix U, RealMatrix thetaNull) {
    this.fMethod = fMethod;
    this.Xessence = null;
    this.XtXInverse = XtXInverse;
    if (this.XtXInverse == null) {
        this.XtXInverse = new LUDecomposition(X.transpose().multiply(X)).getSolver().getInverse();
    }
    this.totalN = X.getRowDimension();
    this.rank = rank;
    this.C = C;
    this.U = U;
    this.thetaNull = thetaNull;
    this.beta = this.XtXInverse.multiply(X.transpose()).multiply(Y);
    RealMatrix YHat = X.multiply(this.beta);
    RealMatrix Ydiff = Y.subtract(YHat);
    this.sigmaError = (Ydiff.transpose().multiply(Ydiff))
            .scalarMultiply(((double) 1 / (double) (totalN - rank)));

    // check if uni/multivariate design
    multivariate = (Y.getColumnDimension() > 1);

    // cache the value of M
    RealMatrix cxxcEssence = C.multiply((this.XtXInverse).multiply(C.transpose()));
    M = new LUDecomposition(cxxcEssence).getSolver().getInverse();
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

@Override
public GaussianFactor marginal(Scope marginalizationScope) {
    // the following assumes that the precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates the entries for the variables that are kept (i.e. in the scope argument) and
    // Y the variables that are marginalized out

    if (marginalizationScope.contains(scope)) {
        return this;
    }//from  w ww .j  a v  a2 s  .com

    Scope newScope = scope.intersect(marginalizationScope);
    Scope scopeToMarginalize = scope.reduceBy(newScope);

    int[] xMapping = newScope.createContinuousVariableMapping(scope);
    RealMatrix xxMatrix = precisionMatrix.getSubMatrix(xMapping, xMapping);

    int[] yMapping = scopeToMarginalize.createContinuousVariableMapping(scope);
    RealMatrix yyMatrix = precisionMatrix.getSubMatrix(yMapping, yMapping);

    RealMatrix xyMatrix = precisionMatrix.getSubMatrix(xMapping, yMapping);
    RealMatrix yxMatrix = precisionMatrix.getSubMatrix(yMapping, xMapping);

    MathUtil yyUtil = new MathUtil(yyMatrix);
    RealMatrix yyInverse = yyUtil.invert();
    RealMatrix newPrecisionMatrix = xxMatrix.subtract(xyMatrix.multiply(yyInverse.multiply(yxMatrix)));

    RealVector xVector = getSubVector(scaledMeanVector, xMapping);
    RealVector yVector = getSubVector(scaledMeanVector, yMapping);

    RealVector newScaledMeanVector = xVector.subtract(xyMatrix.operate(yyInverse.operate(yVector)));

    MathUtil inverseUtil = new MathUtil(yyInverse.scalarMultiply(2.0d * Math.PI));
    double newNormalizationConstant = normalizationConstant
            + 0.5d * (Math.log(inverseUtil.determinant()) + yVector.dotProduct(yyInverse.operate(yVector)));

    return new CanonicalGaussianFactor(newScope, newPrecisionMatrix, newScaledMeanVector,
            newNormalizationConstant);
}

From source file:com.joptimizer.algebra.CholeskyComparisonTest.java

/**
 * Test with a diagonal matrix./*ww  w  .jav  a2  s  .c  o  m*/
 */
public void testCompareFactorizations2() throws Exception {
    log.debug("testCompareFactorizations2");

    int iterations = 4;
    int m = 1000;
    int dim = m * m;

    DoubleMatrix2D QMatrix = DoubleFactory2D.sparse.diagonal(DoubleFactory1D.sparse.make(m, 1.));
    double[][] QMatrixData = QMatrix.toArray();
    //log.debug("QMatrix: " + Utils.toString(QMatrix.toArray()));
    RealMatrix A = MatrixUtils.createRealMatrix(QMatrix.toArray());
    log.debug("cardinality: " + QMatrix.cardinality());
    int nz = dim - QMatrix.cardinality();
    log.debug("sparsity index: " + 100 * new Double(nz) / dim + " %");

    //try Cholesky 1
    long t1F = System.currentTimeMillis();
    CholeskyFactorization myc1 = null;
    for (int i = 0; i < iterations; i++) {
        //factorization
        myc1 = new CholeskyFactorization(QMatrix);
        myc1.factorize();
    }
    log.debug("Cholesky standard factorization time: " + (System.currentTimeMillis() - t1F));
    RealMatrix L1 = MatrixUtils.createRealMatrix(myc1.getL().toArray());
    double norm1 = A.subtract(L1.multiply(L1.transpose())).getNorm();
    log.debug("norm1                               : " + norm1);
    assertEquals(0., norm1, 1.E-12 * Math.sqrt(dim));
    long t1I = System.currentTimeMillis();
    for (int i = 0; i < iterations; i++) {
        //inversion
        myc1.getInverse();
    }
    log.debug("Cholesky standard inversion time    : " + (System.currentTimeMillis() - t1I));
    RealMatrix AInv1 = MatrixUtils.createRealMatrix(myc1.getInverse().toArray());

    //try Cholesky 5
    long t5 = System.currentTimeMillis();
    CholeskySparseFactorization myc5 = null;
    for (int i = 0; i < iterations; i++) {
        myc5 = new CholeskySparseFactorization(new SparseDoubleMatrix2D(QMatrix.toArray()));
        myc5.factorize();
    }
    log.debug("Cholesky sparse factorization time  : " + (System.currentTimeMillis() - t5));
    RealMatrix L5 = MatrixUtils.createRealMatrix(myc5.getL().toArray());
    double norm5 = A.subtract(L5.multiply(L5.transpose())).getNorm();
    log.debug("norm5                               : " + norm5);
    assertEquals(0., norm5, 1.E-12 * Math.sqrt(dim));
    assertEquals(0., L1.subtract(L5).getNorm(), 1.E-10 * Math.sqrt(dim));
}

From source file:com.itemanalysis.psychometrics.factoranalysis.GPArotation.java

private RotationResults GPFoblq(RealMatrix A, boolean normalize, int maxIter, double eps)
        throws ConvergenceException {
    int ncol = A.getColumnDimension();

    RealMatrix Tinner = null;/*from  w  w w .  java  2s  .  c o m*/
    RealMatrix TinnerInv = null;
    RealMatrix Tmat = new IdentityMatrix(ncol);

    if (normalize) {
        //elementwise division by normalizing weights
        final RealMatrix W = getNormalizingWeights(A, true);
        A.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
            @Override
            public double visit(int row, int column, double value) {
                return value / W.getEntry(row, column);
            }
        });
    }

    RealMatrix TmatInv = new LUDecomposition(Tmat).getSolver().getInverse();
    RealMatrix L = A.multiply(TmatInv.transpose());

    //compute gradientAt and function value
    gpFunction.computeValues(L);
    RealMatrix VgQ = gpFunction.getGradient();
    RealMatrix VgQt = VgQ;
    double f = gpFunction.getValue();
    double ft = f;
    RealMatrix G = ((L.transpose().multiply(VgQ).multiply(TmatInv)).transpose()).scalarMultiply(-1.0);

    int iter = 0;
    double alpha = 1.0;
    double s = eps + 0.5;
    double s2 = Math.pow(s, 2);
    int innerMaxIter = 10;
    int innerCount = 0;

    IdentityMatrix I = new IdentityMatrix(G.getRowDimension());
    RealMatrix V1 = MatrixUtils.getVector(ncol, 1.0);

    while (iter < maxIter) {
        RealMatrix M = MatrixUtils.multiplyElements(Tmat, G);
        RealMatrix diagP = new DiagonalMatrix(V1.multiply(M).getRow(0));
        RealMatrix Gp = G.subtract(Tmat.multiply(diagP));
        s = Math.sqrt(Gp.transpose().multiply(Gp).getTrace());
        s2 = Math.pow(s, 2);

        if (s < eps) {
            break;
        }
        alpha = 2.0 * alpha;

        innerCount = 0;
        for (int i = 0; i < innerMaxIter; i++) {
            RealMatrix X = Tmat.subtract(Gp.scalarMultiply(alpha));
            RealMatrix X2 = MatrixUtils.multiplyElements(X, X);
            RealMatrix V = V1.multiply(X2);
            V.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
                @Override
                public double visit(int row, int column, double value) {
                    return 1.0 / Math.sqrt(value);
                }
            });

            //compute new value of T, its inverse, and the rotated loadings
            RealMatrix diagV = new DiagonalMatrix(V.getRow(0));
            Tinner = X.multiply(diagV);
            TinnerInv = new LUDecomposition(Tinner).getSolver().getInverse();
            L = A.multiply(TinnerInv.transpose());

            //compute new values of the gradientAt and the rotation criteria
            gpFunction.computeValues(L);
            VgQt = gpFunction.getGradient();
            ft = gpFunction.getValue();

            innerCount++;
            if (ft < f - 0.5 * s2 * alpha) {
                break;
            }
            alpha = alpha / 2.0;
        }

        //            System.out.println(iter + "  " + f + "  " + s + "  " + Math.log10(s) + "  " + alpha + "  " + innerCount);

        Tmat = Tinner;
        f = ft;
        G = (L.transpose().multiply(VgQt).multiply(TinnerInv)).transpose().scalarMultiply(-1.0);
        iter++;
    }

    boolean convergence = s < eps;
    if (!convergence) {
        throw new ConvergenceException();
    }

    if (normalize) {
        //elementwise multiplication by normalizing weights
        final RealMatrix W = getNormalizingWeights(A, true);
        A.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
            @Override
            public double visit(int row, int column, double value) {
                return value * W.getEntry(row, column);
            }
        });
    }

    RealMatrix Phi = Tmat.transpose().multiply(Tmat);
    RotationResults result = new RotationResults(gpFunction.getValue(), L, Phi, Tmat, rotationMethod);
    return result;

}

From source file:com.joptimizer.algebra.CholeskyComparisonTest.java

/**
 * Test with a generic sparse matrix.//from   www.  j  a  v  a2 s  .  c  om
 */
public void testCompareFactorizations1() throws Exception {
    log.debug("testCompareFactorizations1");

    int iterations = 4;
    int m = 1000;
    int dim = m * m;

    DoubleMatrix2D sMatrix = Utils.randomValuesSparseMatrix(m, m, -10, 10, 0.97, 12345L);
    //log.debug("sMatrix: " + Utils.toString(sMatrix.toArray()));
    DoubleMatrix2D QMatrix = Algebra.DEFAULT.mult(sMatrix, Algebra.DEFAULT.transpose(sMatrix));//positive and symmetric
    double[][] QMatrixData = QMatrix.toArray();
    //log.debug("QMatrix: " + Utils.toString(QMatrix.toArray()));
    RealMatrix A = MatrixUtils.createRealMatrix(QMatrix.toArray());
    log.debug("cardinality: " + QMatrix.cardinality());
    int nz = dim - QMatrix.cardinality();
    log.debug("sparsity index: " + 100 * new Double(nz) / dim + " %");

    //try Cholesky 1
    long t1F = System.currentTimeMillis();
    CholeskyFactorization myc1 = null;
    for (int i = 0; i < iterations; i++) {
        //factorization
        myc1 = new CholeskyFactorization(QMatrix);
        myc1.factorize();
    }
    log.debug("Cholesky standard factorization time: " + (System.currentTimeMillis() - t1F));
    RealMatrix L1 = MatrixUtils.createRealMatrix(myc1.getL().toArray());
    double norm1 = A.subtract(L1.multiply(L1.transpose())).getNorm();
    log.debug("norm1                               : " + norm1);
    assertEquals(0., norm1, 1.E-12 * Math.sqrt(dim));
    long t1I = System.currentTimeMillis();
    for (int i = 0; i < iterations; i++) {
        //inversion
        myc1.getInverse();
    }
    log.debug("Cholesky standard inversion time    : " + (System.currentTimeMillis() - t1I));
    RealMatrix AInv1 = MatrixUtils.createRealMatrix(myc1.getInverse().toArray());

    //try Cholesky 2
    long t2F = System.currentTimeMillis();
    CholeskyRCTFactorization myc2 = null;
    for (int i = 0; i < iterations; i++) {
        //factorization
        myc2 = new CholeskyRCTFactorization(QMatrix);
        myc2.factorize();
    }
    log.debug("Cholesky RCT factorization time     : " + (System.currentTimeMillis() - t2F));
    RealMatrix L2 = MatrixUtils.createRealMatrix(myc2.getL().toArray());
    double norm2 = A.subtract(L2.multiply(L2.transpose())).getNorm();
    log.debug("norm2                               : " + norm2);
    assertEquals(0., norm2, 1.E-12 * Math.sqrt(dim));
    assertEquals(0., L1.subtract(L2).getNorm(), 1.E-10 * Math.sqrt(dim));
    long t2I = System.currentTimeMillis();
    for (int i = 0; i < iterations; i++) {
        //inversion
        myc2.getInverse();
    }
    log.debug("Cholesky RCT inversion time         : " + (System.currentTimeMillis() - t2I));
    RealMatrix AInv2 = MatrixUtils.createRealMatrix(myc2.getInverse().toArray());
    assertEquals(0., AInv1.subtract(AInv2).getNorm(), 1.E-10 * Math.sqrt(dim));

    //try Cholesky 3
    long t3F = System.currentTimeMillis();
    CholeskyRCFactorization myc3 = null;
    for (int i = 0; i < iterations; i++) {
        //factorization
        myc3 = new CholeskyRCFactorization(QMatrix);
        myc3.factorize();
    }
    log.debug("Cholesky RC factorization time      : " + (System.currentTimeMillis() - t3F));
    RealMatrix L3 = MatrixUtils.createRealMatrix(myc3.getL().toArray());
    double norm3 = A.subtract(L3.multiply(L3.transpose())).getNorm();
    log.debug("norm3                               : " + norm3);
    assertEquals(0., norm3, 1.E-12 * Math.sqrt(dim));
    assertEquals(0., L1.subtract(L3).getNorm(), 1.E-10 * Math.sqrt(dim));
    long t3I = System.currentTimeMillis();
    for (int i = 0; i < iterations; i++) {
        //inversion
        myc3.getInverse();
    }
    log.debug("Cholesky RC inversion time          : " + (System.currentTimeMillis() - t3I));
    RealMatrix AInv3 = MatrixUtils.createRealMatrix(myc3.getInverse().toArray());
    assertEquals(0., AInv1.subtract(AInv3).getNorm(), 1.E-10 * Math.sqrt(dim));

    //try Cholesky 4
    long t4 = System.currentTimeMillis();
    LDLTFactorization myc4 = null;
    for (int i = 0; i < iterations; i++) {
        myc4 = new LDLTFactorization(new DenseDoubleMatrix2D(QMatrixData));
        myc4.factorize();
    }
    log.debug("Cholesky LDLT factorization time    : " + (System.currentTimeMillis() - t4));
    RealMatrix L4 = MatrixUtils.createRealMatrix(myc4.getL().toArray());
    RealMatrix D4 = MatrixUtils.createRealMatrix(myc4.getD().toArray());
    double norm4 = A.subtract(L4.multiply(D4.multiply(L4.transpose()))).getNorm();
    log.debug("norm4                               : " + norm4);
    assertEquals(0., norm4, 1.E-12 * Math.sqrt(dim));

    //try Cholesky 5
    long t5 = System.currentTimeMillis();
    CholeskySparseFactorization myc5 = null;
    for (int i = 0; i < iterations; i++) {
        myc5 = new CholeskySparseFactorization(new SparseDoubleMatrix2D(QMatrix.toArray()));
        myc5.factorize();
    }
    log.debug("Cholesky sparse factorization time  : " + (System.currentTimeMillis() - t5));
    RealMatrix L5 = MatrixUtils.createRealMatrix(myc5.getL().toArray());
    double norm5 = A.subtract(L5.multiply(L5.transpose())).getNorm();
    log.debug("norm5                               : " + norm5);
    assertEquals(0., norm5, 1.E-12 * Math.sqrt(dim));
    assertEquals(0., L1.subtract(L5).getNorm(), 1.E-10 * Math.sqrt(dim));
}

From source file:com.itemanalysis.psychometrics.factoranalysis.FactorAnalysisTest.java

@Test
public void eigenValues() {
    RealMatrix R = new Array2DRowRealMatrix(readHarman74Data());

    int nFactors = 2;
    int nVariables = 24;
    double[] x = new double[nVariables];
    for (int i = 0; i < nVariables; i++) {
        x[i] = .5;/* w w  w  . j a v  a  2s.  com*/
    }

    for (int i = 0; i < nVariables; i++) {
        R.setEntry(i, i, 1.0 - x[i]);
    }

    EigenDecomposition eigen = new EigenDecomposition(R);
    RealMatrix eigenVectors = eigen.getV().getSubMatrix(0, nVariables - 1, 0, nFactors - 1);

    double[] ev = new double[nFactors];
    for (int i = 0; i < nFactors; i++) {
        ev[i] = Math.sqrt(eigen.getRealEigenvalue(i));
    }
    DiagonalMatrix evMatrix = new DiagonalMatrix(ev);// USE Apache version
    // of Diagonal
    // matrix when
    // upgrade to
    // version 3.2
    RealMatrix LAMBDA = eigenVectors.multiply(evMatrix);
    RealMatrix SIGMA = (LAMBDA.multiply(LAMBDA.transpose()));
    RealMatrix RESID = R.subtract(SIGMA);

    double sum = 0.0;
    double squared = 0.0;
    for (int i = 0; i < RESID.getRowDimension(); i++) {
        for (int j = 0; j < RESID.getColumnDimension(); j++) {
            if (i != j) {
                sum += Math.pow(RESID.getEntry(i, j), 2);
            }
        }
    }

    System.out.println(sum);

    // RealMatrix SIGMA = (LAMBDA.multiply(LAMBDA.transpose()));
    //
    // System.out.println("SIGMA");
    // for(int i=0;i<SIGMA.getRowDimension();i++){
    // for(int j=0;j<SIGMA.getColumnDimension();j++){
    // System.out.print(SIGMA.getEntry(i, j) + " ");
    // }
    // System.out.println();
    // }
}

From source file:edu.ucdenver.bios.powersvc.resource.PowerMatrixHTMLServerResource.java

/**
 * Get matrices used in the power calculation for a "guided" study design
 * as an HTML formatted string.  This method uses the notation of
 * Muller & Stewart 2007//ww w  . j  a  v a2 s .co m
 * @param studyDesign the StudyDesign object
 * @return html string with representation of matrices
 */
@Post("json:html")
public String getMatricesAsHTML(StudyDesign studyDesign) {
    StringBuffer buffer = new StringBuffer();

    buffer.append("<html><head><script type=\"text/javascript\" "
            + "src=\"http://cdn.mathjax.org/mathjax/latest/MathJax.js?"
            + "config=TeX-AMS-MML_HTMLorMML\"></script></head><body>"
            + "<script>MathJax.Hub.Queue([\"Typeset\",MathJax.Hub]);</script>");
    if (studyDesign != null) {
        // calculate cluster size
        List<ClusterNode> clusterNodeList = studyDesign.getClusteringTree();
        int clusterSize = 1;
        if (clusterNodeList != null && clusterNodeList.size() > 0) {
            for (ClusterNode node : clusterNodeList) {
                clusterSize *= node.getGroupSize();
            }
        }

        /*
         * We are clearing and resetting the clustering information
         * here for the sake of reusing the functions in PowerResourceHelper.
         * We need to get the beta, C, U, and theta null matrices WITHOUT
         * the clustering adjustment for the purposes of display.
         */
        studyDesign.setClusteringTree(null);
        FixedRandomMatrix B = PowerResourceHelper.betaMatrixFromStudyDesign(studyDesign);
        FixedRandomMatrix C = PowerResourceHelper.betweenParticipantContrastFromStudyDesign(studyDesign);
        RealMatrix U = PowerResourceHelper.withinParticipantContrastFromStudyDesign(studyDesign);
        RealMatrix thetaNull = PowerResourceHelper.thetaNullMatrixFromStudyDesign(studyDesign, C, U);
        RealMatrix thetaObserved = C.getCombinedMatrix().multiply(B.getCombinedMatrix().multiply(U));

        // add MathJax code for the matrices
        // design matrix
        buffer.append(getBeginEquation());
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_DESIGN,
                PowerResourceHelper.designMatrixFromStudyDesign(studyDesign), false));
        buffer.append(getEndEquation());

        // beta matrix
        buffer.append(getBeginEquation());
        if (clusterSize > 1) {
            buffer.append(getColumnOfOnesTex(clusterSize, true));
            buffer.append(KRONECKER_PRODUCT);
        }
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_BETA, B.getCombinedMatrix(), false));
        buffer.append(getEndEquation());

        // between participant contrast
        buffer.append(getBeginEquation());
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_BETWEEN_CONTRAST, C.getCombinedMatrix(), false));
        buffer.append(getEndEquation());

        // within participant contrast
        buffer.append(getBeginEquation());
        if (clusterSize > 1) {
            buffer.append(getColumnOfOnesTex(clusterSize, false));
            buffer.append(KRONECKER_PRODUCT);
        }
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_WITHIN_CONTRAST, U, false));
        buffer.append(getEndEquation());

        // observed theta
        buffer.append(getBeginEquation());
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_THETA_OBSERVED, thetaObserved, false));
        buffer.append(getEndEquation());

        // theta null matrix
        buffer.append(getBeginEquation());
        buffer.append(realMatrixToTex(DISPLAY_MATRIX_THETA_NULL, thetaNull, false));
        buffer.append(getEndEquation());

        // add matrices for either GLMM(F) or GLMM(F,g) designs
        if (studyDesign.isGaussianCovariate()) {
            RealMatrix sigmaY = PowerResourceHelper.sigmaOutcomesMatrixFromStudyDesign(studyDesign);
            RealMatrix sigmaG = PowerResourceHelper.sigmaCovariateMatrixFromStudyDesign(studyDesign);
            RealMatrix sigmaYG = PowerResourceHelper.sigmaOutcomesCovariateMatrixFromStudyDesign(studyDesign,
                    sigmaG, sigmaY);

            // set the sigma error matrix to [sigmaY - sigmaYG * sigmaG-1 * sigmaGY] 
            RealMatrix sigmaGY = sigmaYG.transpose();
            RealMatrix sigmaGInverse = new LUDecomposition(sigmaG).getSolver().getInverse();
            RealMatrix sigmaE = sigmaY.subtract(sigmaYG.multiply(sigmaGInverse.multiply(sigmaGY)));

            // sigma for Gaussian covariate
            buffer.append(getBeginEquation());
            buffer.append(realMatrixToTex(DISPLAY_MATRIX_SIGMA_GAUSSIAN, sigmaG, false));
            buffer.append(getEndEquation());

            // sigma for Gaussian covariate and outcomes
            buffer.append(getBeginEquation());
            if (clusterSize > 1) {
                buffer.append(getColumnOfOnesTex(clusterSize, false));
                buffer.append(KRONECKER_PRODUCT);
            }
            buffer.append(realMatrixToTex(DISPLAY_MATRIX_SIGMA_OUTCOME_GAUSSIAN, sigmaYG, false));
            buffer.append(getEndEquation());

            // sigma outcomes matrix
            studyDesign.setClusteringTree(clusterNodeList);
            buffer.append(getBeginEquation());
            buffer.append(getSigmaOutcomeMatrixTex(studyDesign));
            buffer.append(getEndEquation());

        } else {
            // sigma error
            studyDesign.setClusteringTree(clusterNodeList);
            buffer.append(getBeginEquation());
            buffer.append(getSigmaErrorMatrixTex(studyDesign));
            buffer.append(getEndEquation());

        }

        buffer.append("<p/>For notation details, please see<p/>");
        buffer.append(createCitations());
        buffer.append(createBrowserNotes());
    } else {
        buffer.append("No study design specified");
    }

    buffer.append("</body></html>");
    return buffer.toString();
}

From source file:citation_prediction.CitationCore.java

/**
 * This function implements the algorithm designed by Josiah Neuberger and William Etcho used to solve for a WSB solution.
 * The general math for the Newton-Raphson method was provided by Dr. Allen Parks and can be found in the function 'getPartialsData'.
 * <br><br>/* w w w  .  j a v  a 2  s  .  co  m*/
 * This algorithm was created under the direction of Supervising University of Mary Washington faculty, Dr. Melody Denhere along with 
 * consultation from Dr. Jeff Solka and Computer Scientists Kristin Ash with the Dahlgren Naval Surface Warfare Center.
 * <br><br>
 * If your interested in more information about the research behind this algorithm please visit:<br>
 * http://josiahneuberger.github.io/citation_prediction/
 * <br><br>
 * 
 * @param data The citation data in days.
 * @param mu The initial mu guess to use in the Newton-Raphson method.
 * @param sigma The initial sigma guess to use in the Newton-Raphson method.
 * @param m The constant value, which is determined by the average number of references in each new paper for a journal.
 * @param t The last time value in the paper's citation history.
 * @param n The total number of citations for this paper.
 * @param l A list structure to store values for each iteration (should be null to start).
 * @param iteration The iteration (should be zero to start)
 * @param max_iteration The maximum number of iterations to try before stopping.
 * @param tolerance The tolerance level, which determines that a solution has been converged on.
 * @return A list containing the WSB solution of (lambda, mu, sigma, iterations).
 */
private LinkedHashMap<String, Double> newtonRaphson(double[][] data, double mu, double sigma, double m,
        double t, double n, LinkedHashMap<String, Double> l, int iteration, int max_iteration,
        double tolerance) {

    double lambda;
    LinkedHashMap<String, Double> r = new LinkedHashMap<String, Double>();

    if (iteration > max_iteration) {
        System.out.println("Does not converge.");

        r.put("lambda", null);
        r.put("mu", null);
        r.put("sigma", null);
        r.put("iterations", null);

        return r;
    } else if (tolerance < 0.00000001) {
        System.out.println("Stopped due to tolerance.");

        r.put("lambda", getLambda(data, mu, sigma, m, t, n));
        r.put("mu", mu);
        r.put("sigma", sigma);
        r.put("iterations", (double) iteration);

        return r;
    } else {

        l = getPartialsData(getIterationData(data, mu, sigma, m));

        double[] array_xn = { mu, sigma };
        double[] array_yn = { l.get("fn"), l.get("gn") };

        RealMatrix xn = MatrixUtils.createColumnRealMatrix(array_xn);
        RealMatrix yn = MatrixUtils.createColumnRealMatrix(array_yn);

        //http://commons.apache.org/proper/commons-math/userguide/linear.html

        double[][] array_jacobian = { { l.get("df_dmu"), l.get("df_dsigma") },
                { l.get("dg_dmu"), l.get("dg_dsigma") } };
        RealMatrix jacobian = MatrixUtils.createRealMatrix(array_jacobian);
        LUDecomposition lud = new LUDecomposition(jacobian);
        DecompositionSolver decS = lud.getSolver();

        l.put("iteration", (double) (iteration + 1));
        //DEBUG: printList(l);

        if (!decS.isNonSingular()) {
            l.put("iteration", (double) (max_iteration + 1));
            System.err.println("ERROR: Jacobian matrix was singular.");
        } else {

            RealMatrix solution = xn.subtract(decS.getInverse().multiply(yn));

            RealMatrix xndiff = solution.subtract(xn);
            tolerance = Math.sqrt(Math.pow(xndiff.getEntry(0, 0), 2) + Math.pow(xndiff.getEntry(1, 0), 2));

            //update values
            l.put("mu", solution.getEntry(0, 0));
            l.put("sigma", solution.getEntry(1, 0));

            //DEBUG: System.out.printf("\"%-20s=%25f\"\n", "NEW MU", l.get("mu"));
            //DEBUG: System.out.printf("\"%-20s=%25f\"\n",  "NEW SIGMA", l.get("sigma"));
        }
        //DEBUG: System.out.println("****************************************");

        return newtonRaphson(data, l.get("mu"), l.get("sigma"), m, l, (l.get("iteration").intValue()),
                tolerance);
    }
}

From source file:edu.cudenver.bios.power.GLMMPowerCalculator.java

/**
 * Perform any preliminary calculations / updates on the input matrices
 * @param params input parameters// w  ww .j a  va2  s  . c o m
 */
private void initialize(GLMMPowerParameters params) {
    debug("entering initialize");

    // TODO: why isn't this done in PowerResourceHelper.studyDesignToPowerParameters?
    // if no power methods are specified, add conditional as a default
    if (params.getPowerMethodList().size() <= 0)
        params.addPowerMethod(PowerMethod.CONDITIONAL_POWER);

    // update sigma error and beta if we have a baseline covariate
    RealMatrix sigmaG = params.getSigmaGaussianRandom();
    debug("sigmaG", sigmaG);

    int numRandom = sigmaG != null ? sigmaG.getRowDimension() : 0;
    if (numRandom == 1) {
        // set the sigma error matrix to [sigmaY - sigmaYG * sigmaG-1 * sigmaGY]
        RealMatrix sigmaY = params.getSigmaOutcome();
        debug("sigmaY", sigmaY);

        RealMatrix sigmaYG = params.getSigmaOutcomeGaussianRandom();
        debug("sigmaYG", sigmaYG);

        RealMatrix sigmaGY = sigmaYG.transpose();
        debug("sigmaYG transpose", sigmaGY);

        RealMatrix sigmaGInverse = new LUDecomposition(sigmaG).getSolver().getInverse();
        debug("sigmaG inverse", sigmaGInverse);

        RealMatrix sigmaError = forceSymmetric(
                sigmaY.subtract(sigmaYG.multiply(sigmaGInverse.multiply(sigmaGY))));
        debug("sigmaError = sigmaY - sigmaYG * sigmaG inverse * sigmaYG transpose", sigmaError);

        if (!MatrixUtils.isPositiveSemidefinite(sigmaError)) {
            throw new IllegalArgumentException(SIGMA_ERROR_NOT_POSITIVE_SEMIDEFINITE_MESSAGE);
        }
        params.setSigmaError(sigmaError);

        // calculate the betaG matrix and fill in the placeholder row for the random predictor
        FixedRandomMatrix beta = params.getBeta();
        beta.updateRandomMatrix(sigmaGInverse.multiply(sigmaGY));
        debug("beta with random part updated to sigmaG inverse * sigmaYG transpose", beta.getCombinedMatrix());
    }

    debug("exiting initialize");
}

From source file:org.knime.al.util.noveltydetection.knfst.KNFST.java

public static RealMatrix projection(final RealMatrix kernelMatrix, final String[] labels)
        throws KNFSTException {

    final ClassWrapper[] classes = ClassWrapper.classes(labels);

    // check labels
    if (classes.length == 1) {
        throw new IllegalArgumentException(
                "not able to calculate a nullspace from data of a single class using KNFST (input variable \"labels\" only contains a single value)");
    }/*from ww w  .j  av  a 2s.  c  o m*/

    // check kernel matrix
    if (!kernelMatrix.isSquare()) {
        throw new IllegalArgumentException("The KernelMatrix must be quadratic!");
    }

    // calculate weights of orthonormal basis in kernel space
    final RealMatrix centeredK = centerKernelMatrix(kernelMatrix);

    final EigenDecomposition eig = new EigenDecomposition(centeredK);
    final double[] eigVals = eig.getRealEigenvalues();
    final ArrayList<Integer> nonZeroEigValIndices = new ArrayList<Integer>();
    for (int i = 0; i < eigVals.length; i++) {
        if (eigVals[i] > 1e-12) {
            nonZeroEigValIndices.add(i);
        }
    }

    int eigIterator = 0;
    final RealMatrix eigVecs = eig.getV();
    RealMatrix basisvecs = null;
    try {
        basisvecs = MatrixUtils.createRealMatrix(eigVecs.getRowDimension(), nonZeroEigValIndices.size());
    } catch (final Exception e) {
        throw new KNFSTException("Something went wrong. Try different parameters or a different kernel.");
    }

    for (final Integer index : nonZeroEigValIndices) {
        final double normalizer = 1 / Math.sqrt(eigVals[index]);
        final RealVector basisVec = eigVecs.getColumnVector(eigIterator).mapMultiply(normalizer);
        basisvecs.setColumnVector(eigIterator++, basisVec);
    }

    // calculate transformation T of within class scatter Sw:
    // T= B'*K*(I-L) and L a block matrix
    final RealMatrix L = kernelMatrix.createMatrix(kernelMatrix.getRowDimension(),
            kernelMatrix.getColumnDimension());
    int start = 0;
    for (final ClassWrapper cl : classes) {
        final int count = cl.getCount();
        L.setSubMatrix(MatrixFunctions.ones(count, count).scalarMultiply(1.0 / count).getData(), start, start);
        start += count;
    }

    // need Matrix M with all entries 1/m to modify basisvecs which allows
    // usage of
    // uncentered kernel values (eye(size(M)).M)*basisvecs
    final RealMatrix M = MatrixFunctions
            .ones(kernelMatrix.getColumnDimension(), kernelMatrix.getColumnDimension())
            .scalarMultiply(1.0 / kernelMatrix.getColumnDimension());
    final RealMatrix I = MatrixUtils.createRealIdentityMatrix(M.getColumnDimension());

    // compute helper matrix H
    final RealMatrix H = ((I.subtract(M)).multiply(basisvecs)).transpose().multiply(kernelMatrix)
            .multiply(I.subtract(L));

    // T = H*H' = B'*Sw*B with B=basisvecs
    final RealMatrix T = H.multiply(H.transpose());

    // calculate weights for null space
    RealMatrix eigenvecs = MatrixFunctions.nullspace(T);

    if (eigenvecs == null) {
        final EigenDecomposition eigenComp = new EigenDecomposition(T);
        final double[] eigenvals = eigenComp.getRealEigenvalues();
        eigenvecs = eigenComp.getV();
        final int minId = MatrixFunctions.argmin(MatrixFunctions.abs(eigenvals));
        final double[] eigenvecsData = eigenvecs.getColumn(minId);
        eigenvecs = MatrixUtils.createColumnRealMatrix(eigenvecsData);
    }

    // System.out.println("eigenvecs:");
    // test.printMatrix(eigenvecs);

    // calculate null space projection
    final RealMatrix proj = ((I.subtract(M)).multiply(basisvecs)).multiply(eigenvecs);

    return proj;
}