Example usage for org.apache.commons.math3.linear RealMatrix getColumnVector

List of usage examples for org.apache.commons.math3.linear RealMatrix getColumnVector

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix getColumnVector.

Prototype

RealVector getColumnVector(int column) throws OutOfRangeException;

Source Link

Document

Get the entries at the given column index as a vector.

Usage

From source file:edu.washington.gs.skyline.model.quantification.LinearModel.java

private static RealMatrix computeMatrixCrossproduct(RealMatrix matrix, int[] columnIndexes) {
    RealMatrix result = MatrixUtils.createRealMatrix(columnIndexes.length, columnIndexes.length);
    for (int iRow = 0; iRow < columnIndexes.length; iRow++) {
        for (int iCol = 0; iCol < columnIndexes.length; iCol++) {
            double dotProduct = matrix.getColumnVector(iRow).dotProduct(matrix.getColumnVector(iCol));
            result.setEntry(iRow, iCol, dotProduct);
        }/*w w  w  .  j  a  v a  2 s .  co m*/
    }
    return result;
}

From source file:com.cloudera.oryx.kmeans.common.ClusterValidityStatistics.java

/**
 * Calculates the normalized van Dongen criterion for the contingency contingencyMatrix.
 *
 * @return the normalized van Dongen criterion for the contingency contingencyMatrix
 *//*from w  w  w .j av  a2s  .  c o  m*/
private static double normVanDongen(RealMatrix contingencyMatrix, double[] rowSums, double[] colSums,
        double n) {
    double rs = 0.0;
    double cs = 0.0;
    double rmax = 0.0;
    double cmax = 0.0;
    for (int i = 0; i < rowSums.length; i++) {
        rs += contingencyMatrix.getRowVector(i).getLInfNorm();
        cs += contingencyMatrix.getColumnVector(i).getLInfNorm();
        rmax = Math.max(rmax, rowSums[i]);
        cmax = Math.max(cmax, colSums[i]);
    }
    double den = 2 * n - rmax - cmax;
    if (den == 0.0) {
        return Double.NaN;
    }
    return (2 * n - rs - cs) / den;
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a/* ww w.  j av a2  s .  c  o  m*/
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:com.caseystella.analytics.outlier.batch.rpca.AugmentedDickeyFuller.java

private void computeADFStatistics() {
    double[] y = diff(ts);
    RealMatrix designMatrix = null;//from w w w .j  av  a 2s  .c  o  m
    int k = lag + 1;
    int n = ts.length - 1;

    RealMatrix z = MatrixUtils.createRealMatrix(laggedMatrix(y, k)); //has rows length(ts) - 1 - k + 1
    RealVector zcol1 = z.getColumnVector(0); //has length length(ts) - 1 - k + 1
    double[] xt1 = subsetArray(ts, k - 1, n - 1); //ts[k:(length(ts) - 1)], has length length(ts) - 1 - k + 1
    double[] trend = sequence(k, n); //trend k:n, has length length(ts) - 1 - k + 1
    if (k > 1) {
        RealMatrix yt1 = z.getSubMatrix(0, ts.length - 1 - k, 1, k - 1); //same as z but skips first column
        //build design matrix as cbind(xt1, 1, trend, yt1)
        designMatrix = MatrixUtils.createRealMatrix(ts.length - 1 - k + 1, 3 + k - 1);
        designMatrix.setColumn(0, xt1);
        designMatrix.setColumn(1, ones(ts.length - 1 - k + 1));
        designMatrix.setColumn(2, trend);
        designMatrix.setSubMatrix(yt1.getData(), 0, 3);

    } else {
        //build design matrix as cbind(xt1, 1, tt)
        designMatrix = MatrixUtils.createRealMatrix(ts.length - 1 - k + 1, 3);
        designMatrix.setColumn(0, xt1);
        designMatrix.setColumn(1, ones(ts.length - 1 - k + 1));
        designMatrix.setColumn(2, trend);
    }
    /*OLSMultipleLinearRegression regression = new OLSMultipleLinearRegression();
    regression.setNoIntercept(true);
    regression.newSampleData(zcol1.toArray(), designMatrix.getData());
    double[] beta = regression.estimateRegressionParameters();
    double[] sd = regression.estimateRegressionParametersStandardErrors();
    */
    RidgeRegression regression = new RidgeRegression(designMatrix.getData(), zcol1.toArray());
    regression.updateCoefficients(.0001);
    double[] beta = regression.getCoefficients();
    double[] sd = regression.getStandarderrors();

    double t = beta[0] / sd[0];
    if (t <= PVALUE_THRESHOLD) {
        this.needsDiff = true;
    } else {
        this.needsDiff = false;
    }
}

From source file:com.github.thorbenlindhauer.math.MathUtil.java

public RealMatrix invert() {
    if (!matrix.isSquare()) {
        throw new FactorOperationException("Cannot invert non-square matrix");
    }/*from w w  w .jav  a2  s  .  c om*/
    ensureLUDecompositionInitialized();

    int matrixDimension = matrix.getRowDimension();

    RealMatrix inverseMatrix = new Array2DRowRealMatrix(matrixDimension, matrixDimension);
    RealMatrix identityMatrix = MatrixUtils.createRealIdentityMatrix(matrixDimension);

    DecompositionSolver solver = luDecomposition.getSolver();

    for (int i = 0; i < matrixDimension; i++) {
        RealVector identityColumn = identityMatrix.getColumnVector(i);
        RealVector inverseColumn = solver.solve(identityColumn);

        inverseMatrix.setColumnVector(i, inverseColumn);
    }

    return inverseMatrix;

}

From source file:io.warp10.script.functions.TOVEC.java

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {

    Object o = stack.pop();//from   w  w w. j ava 2s. c  o m

    if (o instanceof RealMatrix) {
        RealMatrix matrix = (RealMatrix) o;

        if (1 != matrix.getColumnDimension()) {
            throw new WarpScriptException(
                    getName() + " expects a matrix with a single column on top of the stack.");
        }

        RealVector vector = matrix.getColumnVector(0);
        stack.push(vector);
        return stack;
    }

    if (!(o instanceof List)) {
        throw new WarpScriptException(getName() + " expects a list onto the stack.");
    }

    double[] data = new double[((List) o).size()];

    for (int i = 0; i < data.length; i++) {
        Object oo = ((List) o).get(i);
        if (!(oo instanceof Number)) {
            throw new WarpScriptException(getName() + " expects a list of numbers onto the stack.");
        }
        data[i] = ((Number) oo).doubleValue();
    }

    stack.push(MatrixUtils.createRealVector(data));

    return stack;
}

From source file:cooccurrence.Omer_Levy.java

/**
 * Method that will extract top D singular values from CoVariance Matrix 
 * It will then identify the corresponding columns from U and V and add it to new matrices 
 * @param U/*from www.  ja  v a2  s . c o  m*/
 * @param V
 * @param coVariance
 * @param matrixUd
 * @param matrixVd
 * @param coVarD
 * @param indicesD 
 */
private static void getTopD(RealMatrix U, RealMatrix V, RealMatrix coVariance, RealMatrix matrixUd,
        RealMatrix matrixVd, RealMatrix coVarD, ArrayList<Integer> indicesD) {
    TreeMap<Double, Set<Integer>> tmap = new TreeMap<>();
    for (int i = 0; i < coVariance.getRowDimension(); i++) {
        double val = coVariance.getEntry(i, i);
        if (tmap.containsKey(val)) {
            Set<Integer> temp = tmap.get(val);
            temp.add(i);
        } else {
            Set<Integer> temp = new HashSet<>();
            temp.add(i);
            tmap.put(val, temp);
        }
    }
    Iterator iter = tmap.keySet().iterator();
    while (iter.hasNext()) {
        Double val = (Double) iter.next();
        Set<Integer> indices = tmap.get(val);
        for (int i = 0; i < indices.size(); i++) {
            Iterator iterIndices = indices.iterator();
            while (iterIndices.hasNext()) {
                int index = (Integer) iterIndices.next();
                indicesD.add(index);
                coVarD.addToEntry(index, index, val);
                matrixUd.setColumnVector(index, U.getColumnVector(index));
                matrixVd.setColumnVector(index, V.getColumnVector(index));
            }
        }
    }

}

From source file:com.joptimizer.util.ColtUtils.java

/**
 * Returns a lower and an upper bound for the condition number
 * <br>kp(A) = Norm[A, p] / Norm[A^-1, p]   
 * <br>where//from   w w  w.ja v a2 s  .co  m
 * <br>      Norm[A, p] = sup ( Norm[A.x, p]/Norm[x, p] , x !=0 )
 * <br>for a matrix and
 * <br>      Norm[x, 1]  := Sum[Math.abs(x[i]), i]             
 * <br>      Norm[x, 2]  := Math.sqrt(Sum[Math.pow(x[i], 2), i])
 * <br>   Norm[x, 00] := Max[Math.abs(x[i]), i]
 * <br>for a vector.
 *  
 * @param A matrix you want the condition number of
 * @param p norm order (2 or Integer.MAX_VALUE)
 * @return an array with the two bounds (lower and upper bound)
 * 
 * @see Ravindra S. Gajulapalli, Leon S. Lasdon "Scaling Sparse Matrices for Optimization Algorithms"
 */
public static double[] getConditionNumberRange(RealMatrix A, int p) {
    double infLimit = Double.NEGATIVE_INFINITY;
    double supLimit = Double.POSITIVE_INFINITY;
    List<Double> columnNormsList = new ArrayList<Double>();
    switch (p) {
    case 2:
        for (int j = 0; j < A.getColumnDimension(); j++) {
            columnNormsList.add(A.getColumnVector(j).getL1Norm());
        }
        Collections.sort(columnNormsList);
        //kp >= Norm[Ai, p]/Norm[Aj, p], for each i, j = 0,1,...,n, Ak columns of A
        infLimit = columnNormsList.get(columnNormsList.size() - 1) / columnNormsList.get(0);
        break;

    case Integer.MAX_VALUE:
        double normAInf = A.getNorm();
        for (int j = 0; j < A.getColumnDimension(); j++) {
            columnNormsList.add(A.getColumnVector(j).getLInfNorm());
        }
        Collections.sort(columnNormsList);
        //k1 >= Norm[A, +oo]/min{ Norm[Aj, +oo], for each j = 0,1,...,n }, Ak columns of A
        infLimit = normAInf / columnNormsList.get(0);
        break;

    default:
        throw new IllegalArgumentException("p must be 2 or Integer.MAX_VALUE");
    }
    return new double[] { infLimit, supLimit };
}

From source file:de.andreasschoknecht.LS3.LSSMCalculator.java

/**
 * Calculate the LSSM matrix containing the final similarity values between the documents, i.e., the models.
 * For calculating the similarity values the Vtk matrix is scaled with the singular value matrix. Afterwards, the cosine similarity
 * transformed on the interval [0,1] is calculated, which represents the similarity value of two documents.
 *//*from   w w  w . j a  v  a 2  s  .c o m*/
void calculateLSSMMatrix() {
    // scale Vtk with singular value matrix Sk
    RealMatrix scaledVtk = Sk.multiply(Vtk);

    int docsNumber = scaledVtk.getColumnDimension();
    double[][] tmpArray = new double[docsNumber][docsNumber];

    for (int i = 0; i < docsNumber; i++) {
        RealVector documentVector1 = scaledVtk.getColumnVector(i);
        for (int j = i; j < docsNumber; j++) {
            double lssmValue = (documentVector1.cosine(scaledVtk.getColumnVector(j)) + 1) / 2;
            tmpArray[i][j] = round(lssmValue, 2);
            tmpArray[j][i] = round(lssmValue, 2);
        }
    }

    lssmMatrix = new Array2DRowRealMatrix(tmpArray);
}

From source file:de.andreasschoknecht.LS3.Query.java

/**
 * Calculate the LSSM values with respect to a document collection.
 *
 * @param Sk The matrix Sk of singular values
 * @param Vtk The matrix Vtk of the singular value decomposition
 *//*from w ww.j  a  v a  2s  . co m*/
void calculateLSSMValues(RealMatrix Sk, RealMatrix Vtk) {
    // scale Vtk with singular value matrix Sk
    RealMatrix scaledVtk = Sk.multiply(Vtk);

    // the query model as vector
    ArrayRealVector queryVector = new ArrayRealVector(pseudoDocument);

    int docsNumber = scaledVtk.getColumnDimension();
    lssmValues = new double[docsNumber];

    for (int i = 0; i < docsNumber; i++) {
        RealVector columnVector = scaledVtk.getColumnVector(i);
        lssmValues[i] = (queryVector.cosine(columnVector) + 1) / 2;
    }
}