Example usage for org.apache.commons.math3.linear RealMatrix getColumnDimension

List of usage examples for org.apache.commons.math3.linear RealMatrix getColumnDimension

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix getColumnDimension.

Prototype

int getColumnDimension();

Source Link

Document

Returns the number of columns in the matrix.

Usage

From source file:com.clust4j.algo.AbstractClusterer.java

final private Array2DRowRealMatrix initData(final RealMatrix data) {
    final int m = data.getRowDimension(), n = data.getColumnDimension();
    final double[][] ref = new double[m][n];
    final HashSet<Double> unique = new HashSet<>();

    // Used to compute variance on the fly for summaries later...
    double[] sum = new double[n];
    double[] sumSq = new double[n];
    double[] maxes = VecUtils.rep(Double.NEGATIVE_INFINITY, n);
    double[] mins = VecUtils.rep(Double.POSITIVE_INFINITY, n);

    // This will store summaries for each column + a header
    ModelSummary summaries = new ModelSummary(
            new Object[] { "Feature #", "Variance", "Std. Dev", "Mean", "Max", "Min" });

    /*// w ww . j  av a 2 s .  com
     * Internally performs the copy
     */
    double entry;
    for (int i = 0; i < m; i++) {
        for (int j = 0; j < n; j++) {
            entry = data.getEntry(i, j);

            if (Double.isNaN(entry)) {
                error(new NaNException("NaN in input data. " + "Select a matrix imputation method for "
                        + "incomplete records"));
            } else {
                // copy the entry
                ref[i][j] = entry;
                unique.add(entry);

                // capture stats...
                sumSq[j] += entry * entry;
                sum[j] += entry;
                maxes[j] = FastMath.max(entry, maxes[j]);
                mins[j] = FastMath.min(entry, mins[j]);

                // if it's the last row, we can compute these:
                if (i == m - 1) {
                    double var = (sumSq[j] - (sum[j] * sum[j]) / (double) m) / ((double) m - 1.0);
                    if (var == 0) {
                        warn("zero variance in feature " + j);
                    }

                    summaries.add(new Object[] { j, // feature num
                            var, // var
                            m < 2 ? Double.NaN : FastMath.sqrt(var), // std dev
                            sum[j] / (double) m, // mean
                            maxes[j], // max
                            mins[j] // min
                    });
                }
            }
        }
    }

    // Log the summaries
    summaryLogger(formatter.format(summaries));

    if (unique.size() == 1)
        this.singular_value = true;

    /*
     * Don't need to copy again, because already internally copied...
     */
    return new Array2DRowRealMatrix(ref, false);
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorMarginalCase1() {
    Scope variables = newScope(new ContinuousVariable("A"), new ContinuousVariable("C"));

    GaussianFactor acMarginal = abcFactor.marginal(variables);

    // then//from  w  w w.  j a va2  s  .  c o  m
    Collection<Variable> newVariables = acMarginal.getVariables().getVariables();
    assertThat(newVariables).hasSize(2);
    assertThat(newVariables).containsAll(variables.getVariables());

    // precision matrix: K_xx - K_xy * K_yy^(-1) * K_yx
    RealMatrix precisionMatrix = acMarginal.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(2);

    double[] row = precisionMatrix.getRowVector(0).toArray();
    assertThat(row[0]).isEqualTo(1, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(4.0d / 3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(1).toArray();
    assertThat(row[0]).isEqualTo(8.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector: h_x - K_xy * K_yy^(-1) * h_y
    RealVector scaledMeanVector = acMarginal.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(2);

    double[] meanValues = scaledMeanVector.toArray();
    assertThat(meanValues[0]).isEqualTo(5.0d / 3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanValues[1]).isEqualTo(0.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // normalization constant: g + 0.5 * (log( det( 2 * PI * K_yy^(-1))) + h_y * K_yy^(-1) * h_y)
    assertThat(acMarginal.getNormalizationConstant()).isEqualTo(8.856392131,
            TestConstants.DOUBLE_VALUE_TOLERANCE);
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorDivision() {
    GaussianFactor quotient = abcFactor.division(abFactor);

    // then//from w  ww.  j a  v a2s .co  m
    Collection<Variable> newVariables = quotient.getVariables().getVariables();
    assertThat(newVariables).hasSize(3);
    assertThat(newVariables).containsAll(abcFactor.getVariables().getVariables());
    assertThat(newVariables).containsAll(abFactor.getVariables().getVariables());

    // precision matrix
    RealMatrix precisionMatrix = quotient.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(3);

    double[] row = precisionMatrix.getRowVector(0).toArray();
    assertThat(row[0]).isEqualTo(-2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(1).toArray();
    assertThat(row[0]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(4.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(7.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(2).toArray();
    assertThat(row[0]).isEqualTo(10.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector
    RealVector scaledMeanVector = quotient.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(3);

    double[] meanVectorValues = scaledMeanVector.toArray();
    assertThat(meanVectorValues[0]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[1]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[2]).isEqualTo(1.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    assertThat(quotient.getNormalizationConstant()).isEqualTo(6.3d, TestConstants.DOUBLE_VALUE_TOLERANCE);
}

From source file:edu.cudenver.bios.power.glmm.GLMMTestUnivariateRepeatedMeasures.java

/**
 * Compute a Univariate Approach to Repeated Measures statistic
 *
 * @param H hypothesis sum of squares matrix
 * @param E error sum of squares matrix/*from w w w.  java2s  .  co m*/
 * @returns F statistic
 */
protected double getUnirep(RealMatrix H, RealMatrix E) {
    if (!H.isSquare() || !E.isSquare() || H.getColumnDimension() != E.getRowDimension())
        throw new IllegalArgumentException(
                "Failed to compute Unirep statistic: hypothesis and error matrices must be square and same dimensions");

    return H.getTrace() / E.getTrace();
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorProduct() {
    // when//ww w  .  ja v a 2 s . co m
    GaussianFactor product = acFactor.product(abFactor);

    // then
    Collection<Variable> newVariables = product.getVariables().getVariables();
    assertThat(newVariables).hasSize(3);
    assertThat(newVariables).containsAll(acFactor.getVariables().getVariables());
    assertThat(newVariables).containsAll(abFactor.getVariables().getVariables());

    // precision matrix
    RealMatrix precisionMatrix = product.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(3);

    double[] row = precisionMatrix.getRowVector(0).toArray();
    assertThat(row[0]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(1).toArray();
    assertThat(row[0]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(2).toArray();
    assertThat(row[0]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector
    RealVector scaledMeanVector = product.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(3);

    double[] meanVectorValues = scaledMeanVector.toArray();
    assertThat(meanVectorValues[0]).isEqualTo(8.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[1]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[2]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    assertThat(product.getNormalizationConstant()).isEqualTo(7.7d, TestConstants.DOUBLE_VALUE_TOLERANCE);

}

From source file:com.itemanalysis.psychometrics.factoranalysis.GPArotation.java

private RotationResults GPFoblq(RealMatrix A, boolean normalize, int maxIter, double eps)
        throws ConvergenceException {
    int ncol = A.getColumnDimension();

    RealMatrix Tinner = null;/*w  w  w.  j  ava  2s .  com*/
    RealMatrix TinnerInv = null;
    RealMatrix Tmat = new IdentityMatrix(ncol);

    if (normalize) {
        //elementwise division by normalizing weights
        final RealMatrix W = getNormalizingWeights(A, true);
        A.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
            @Override
            public double visit(int row, int column, double value) {
                return value / W.getEntry(row, column);
            }
        });
    }

    RealMatrix TmatInv = new LUDecomposition(Tmat).getSolver().getInverse();
    RealMatrix L = A.multiply(TmatInv.transpose());

    //compute gradientAt and function value
    gpFunction.computeValues(L);
    RealMatrix VgQ = gpFunction.getGradient();
    RealMatrix VgQt = VgQ;
    double f = gpFunction.getValue();
    double ft = f;
    RealMatrix G = ((L.transpose().multiply(VgQ).multiply(TmatInv)).transpose()).scalarMultiply(-1.0);

    int iter = 0;
    double alpha = 1.0;
    double s = eps + 0.5;
    double s2 = Math.pow(s, 2);
    int innerMaxIter = 10;
    int innerCount = 0;

    IdentityMatrix I = new IdentityMatrix(G.getRowDimension());
    RealMatrix V1 = MatrixUtils.getVector(ncol, 1.0);

    while (iter < maxIter) {
        RealMatrix M = MatrixUtils.multiplyElements(Tmat, G);
        RealMatrix diagP = new DiagonalMatrix(V1.multiply(M).getRow(0));
        RealMatrix Gp = G.subtract(Tmat.multiply(diagP));
        s = Math.sqrt(Gp.transpose().multiply(Gp).getTrace());
        s2 = Math.pow(s, 2);

        if (s < eps) {
            break;
        }
        alpha = 2.0 * alpha;

        innerCount = 0;
        for (int i = 0; i < innerMaxIter; i++) {
            RealMatrix X = Tmat.subtract(Gp.scalarMultiply(alpha));
            RealMatrix X2 = MatrixUtils.multiplyElements(X, X);
            RealMatrix V = V1.multiply(X2);
            V.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
                @Override
                public double visit(int row, int column, double value) {
                    return 1.0 / Math.sqrt(value);
                }
            });

            //compute new value of T, its inverse, and the rotated loadings
            RealMatrix diagV = new DiagonalMatrix(V.getRow(0));
            Tinner = X.multiply(diagV);
            TinnerInv = new LUDecomposition(Tinner).getSolver().getInverse();
            L = A.multiply(TinnerInv.transpose());

            //compute new values of the gradientAt and the rotation criteria
            gpFunction.computeValues(L);
            VgQt = gpFunction.getGradient();
            ft = gpFunction.getValue();

            innerCount++;
            if (ft < f - 0.5 * s2 * alpha) {
                break;
            }
            alpha = alpha / 2.0;
        }

        //            System.out.println(iter + "  " + f + "  " + s + "  " + Math.log10(s) + "  " + alpha + "  " + innerCount);

        Tmat = Tinner;
        f = ft;
        G = (L.transpose().multiply(VgQt).multiply(TinnerInv)).transpose().scalarMultiply(-1.0);
        iter++;
    }

    boolean convergence = s < eps;
    if (!convergence) {
        throw new ConvergenceException();
    }

    if (normalize) {
        //elementwise multiplication by normalizing weights
        final RealMatrix W = getNormalizingWeights(A, true);
        A.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
            @Override
            public double visit(int row, int column, double value) {
                return value * W.getEntry(row, column);
            }
        });
    }

    RealMatrix Phi = Tmat.transpose().multiply(Tmat);
    RotationResults result = new RotationResults(gpFunction.getValue(), L, Phi, Tmat, rotationMethod);
    return result;

}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testValueObservation() {
    GaussianFactor reducedVector = abcFactor.observation(newScope(new ContinuousVariable("A")),
            new double[] { 2.5d });

    // then// w  w  w.j ava  2 s  . com
    Collection<Variable> newVariables = reducedVector.getVariables().getVariables();
    assertThat(newVariables).hasSize(2);
    assertThat(newVariables).contains(new ContinuousVariable("B"), new ContinuousVariable("C"));

    //  B     C    A
    //6.0d, 7.0d, 3.0d
    //3.0d, 5.5d, 10.0d
    //4.0d, 6.0d, 3.0d
    //
    // X = {B, C}, Y = {A}

    // precision matrix: K_xx
    RealMatrix precisionMatrix = reducedVector.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(2);

    double precision = precisionMatrix.getEntry(0, 0);
    assertThat(precision).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(0, 1);
    assertThat(precision).isEqualTo(7.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(1, 0);
    assertThat(precision).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(1, 1);
    assertThat(precision).isEqualTo(5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector: h_x - K_xy * y
    RealVector scaledMeanVector = reducedVector.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(2);

    double meanValue = scaledMeanVector.getEntry(0);
    assertThat(meanValue).isEqualTo(-5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    meanValue = scaledMeanVector.getEntry(1);
    assertThat(meanValue).isEqualTo(-23.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // normalization constant: g + h_y * y - 0.5 * (y * K_yy * y)
    //                         8.5 + 7.5 - 9,375
    assertThat(reducedVector.getNormalizationConstant()).isEqualTo(6.625d,
            TestConstants.DOUBLE_VALUE_TOLERANCE);
}

From source file:com.itemanalysis.psychometrics.factoranalysis.GeominCriteria.java

public void computeValues(RealMatrix L) {
    //        vgQ.geomin <- function(L, delta=.01){
    //            k <- ncol(L)
    //            p <- nrow(L)
    //            L2 <- L^2 + delta
    //            pro <- exp(rowSums(log(L2))/k)
    //            list(Gq=(2/k)*(L/L2)*matrix(rep(pro,k),p),
    //                    f= sum(pro),
    //                    Method="Geomin")
    //        }//from   w w w . jav  a 2  s  .  co m

    int p = L.getRowDimension();
    int k = L.getColumnDimension();
    final RealMatrix L2 = MatrixUtils.multiplyElements(L, L).scalarAdd(delta);
    final double[] rowSums = new double[p];

    L2.walkInRowOrder(new DefaultRealMatrixPreservingVisitor() {
        @Override
        public void visit(int row, int column, double value) {
            rowSums[row] += Math.log(value);
        }
    });

    double sum = 0.0;
    for (int i = 0; i < p; i++) {
        rowSums[i] = Math.exp(rowSums[i] / (double) k);
        sum += rowSums[i];
    }
    functionValue = sum;

    final RealMatrix M = new Array2DRowRealMatrix(p, k);
    M.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
        @Override
        public double visit(int row, int column, double value) {
            return rowSums[row];
        }
    });

    final double c = (2.0 / (double) k);
    gradient = L.copy();
    gradient.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
        @Override
        public double visit(int row, int column, double value) {
            return c * (value / L2.getEntry(row, column)) * M.getEntry(row, column);
        }
    });

}

From source file:eagle.security.userprofile.model.UserProfileEigenModelerTest.java

@org.junit.Test
public void testBuild() throws Exception {

    UserProfileEigenModeler modeler = new UserProfileEigenModeler();
    String user = "user1";
    final RealMatrix mockMatrix = new Array2DRowRealMatrix(buildMockData());
    List<UserProfileEigenModel> model = modeler.build("default", user, mockMatrix);
    Assert.assertEquals(model.length(), 1);

    UserProfileEigenModel eigenModel = model.head();
    Assert.assertNotNull(eigenModel.statistics());
    Assert.assertNotNull(eigenModel.principalComponents());
    Assert.assertNotNull(eigenModel.maxVector());
    Assert.assertNotNull(eigenModel.minVector());
    Assert.assertEquals(eigenModel.statistics().length, mockMatrix.getColumnDimension());
    Assert.assertTrue(eigenModel.principalComponents().length <= mockMatrix.getColumnDimension());
    Assert.assertTrue(eigenModel.maxVector().getDimension() <= mockMatrix.getColumnDimension());
    Assert.assertTrue(eigenModel.minVector().getDimension() <= mockMatrix.getColumnDimension());
    Assert.assertEquals(true, eigenModel.statistics()[3].isLowVariant());
}

From source file:com.cloudera.oryx.common.math.QRDecomposition.java

/**
 * Calculates the QR-decomposition of the given matrix.
 *
 * @param matrix The matrix to decompose.
 * @param threshold Singularity threshold.
 *///w w w  .ja v  a  2s.  c  om
public QRDecomposition(RealMatrix matrix, double threshold) {
    this.threshold = threshold;

    final int m = matrix.getRowDimension();
    final int n = matrix.getColumnDimension();
    qrt = matrix.transpose().getData();
    rDiag = new double[Math.min(m, n)];
    cachedQ = null;
    cachedQT = null;
    cachedR = null;
    cachedH = null;

    decompose(qrt);

}