Example usage for org.apache.commons.math3.linear RealMatrix multiply

List of usage examples for org.apache.commons.math3.linear RealMatrix multiply

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix multiply.

Prototype

RealMatrix multiply(RealMatrix m) throws DimensionMismatchException;

Source Link

Document

Returns the result of postmultiplying this by m .

Usage

From source file:cooccurrence.Omer_Levy.java

public static void main(String args[]) {
    String path = "";
    String writePath = "";
    BufferedReader br = null;/*w ww  .  j  a v a2 s  .  co m*/
    ArrayList<String> files = new ArrayList<>();
    //reading all the files in the directory
    //each file is PPMI matrix for an year
    listFilesForFolder(new File(path), files);
    for (String filePath : files) {
        System.out.println(filePath);
        String fileName = new File(filePath).getName();

        //data structure to store the PPMI matrix in the file
        HashMap<String, HashMap<String, Double>> cooccur = new HashMap<>();
        readFileContents(filePath, cooccur); //reading the file and storing the content in the hashmap
        //Because Matrices are identified by row and col id, the following 
        //lists maps id to corresponding string. Note that matrix is symmetric. 
        ArrayList<String> rowStrings = new ArrayList<>(cooccur.keySet());
        ArrayList<String> colStrings = new ArrayList<>(cooccur.keySet());

        //creating matrix with given dimensions and initializing it to 0
        RealMatrix matrixR = MatrixUtils.createRealMatrix(rowStrings.size(), colStrings.size());

        //creating the matrices for storing top rank-d matrices of SVD 
        RealMatrix matrixUd = MatrixUtils.createRealMatrix(D, D);
        RealMatrix matrixVd = MatrixUtils.createRealMatrix(D, D);
        RealMatrix coVarD = MatrixUtils.createRealMatrix(D, D);

        //populating the matrices based on the co-occur hashmap
        populateMatrixR(matrixR, cooccur, rowStrings, colStrings);

        //computing the svd
        SingularValueDecomposition svd = new SingularValueDecomposition(matrixR);

        //extracting the components of SVD factorization
        RealMatrix U = svd.getU();
        RealMatrix V = svd.getV();
        RealMatrix coVariance = svd.getCovariance(-1);

        //list to store indices of top-D singular values of coVar. 
        //Use this with rowsString (colStrings) to get the corresponding word and context
        ArrayList<Integer> indicesD = new ArrayList<>();
        //Extract topD singular value from covariance to store in coVarD and
        //extract corresponding columns from U and V to store in Ud and Vd
        getTopD(U, V, coVariance, matrixUd, matrixVd, coVarD, indicesD);
        //calulate the squareRoot of coVarD
        RealMatrix squareRootCoVarD = squareRoot(coVarD);
        RealMatrix W_svd = matrixUd.multiply(squareRootCoVarD);
        RealMatrix C_svd = matrixVd.multiply(squareRootCoVarD);
    }
}

From source file:edu.washington.gs.skyline.model.quantification.WeightedRegression.java

public static double[] weighted(double[][] x, double[] y, double[] weights, boolean intercept) {
    RealMatrix predictor;// w  w  w .  j  a va  2 s .c  o  m
    if (intercept) {
        int nRows = x.length;
        int nCols = x[0].length + 1;
        predictor = MatrixUtils.createRealMatrix(nRows, nCols);
        for (int iRow = 0; iRow < nRows; iRow++) {
            predictor.setEntry(iRow, 0, 1.0);
            for (int iCol = 1; iCol < nCols; iCol++) {
                predictor.setEntry(iRow, iCol, x[iRow][iCol - 1]);
            }
        }
    } else {
        predictor = MatrixUtils.createRealMatrix(x);
    }
    RealVector responseVector = MatrixUtils.createRealVector(y);
    RealMatrix weightsMatrix = MatrixUtils.createRealDiagonalMatrix(weights);
    RealMatrix predictorTransposed = predictor.transpose();
    RealMatrix predictorTransposedTimesWeights = predictorTransposed
            .multiply(weightsMatrix.multiply(predictor));
    CholeskyDecomposition choleskyDecomposition = new CholeskyDecomposition(predictorTransposedTimesWeights);
    RealVector vectorToSolve = predictorTransposed.operate(weightsMatrix.operate(responseVector));
    RealVector result = choleskyDecomposition.getSolver().solve(vectorToSolve);
    return result.toArray();
}

From source file:com.rapidminer.tools.math.LinearRegression.java

/** Calculates the coefficients of linear ridge regression. */
public static double[] performRegression(Matrix a, Matrix b, double ridge) {
    RealMatrix x = MatrixUtils.createRealMatrix(a.getArray());
    RealMatrix y = MatrixUtils.createRealMatrix(b.getArray());
    int numberOfColumns = x.getColumnDimension();
    double[] coefficients = new double[numberOfColumns];
    RealMatrix xTransposed = x.transpose();
    Matrix result;//from  www .  j a  v a 2 s .c om
    boolean finished = false;
    while (!finished) {
        RealMatrix xTx = xTransposed.multiply(x);

        for (int i = 0; i < numberOfColumns; i++) {
            xTx.addToEntry(i, i, ridge);
        }

        RealMatrix xTy = xTransposed.multiply(y);
        coefficients = xTy.getColumn(0);

        try {
            // do not use Apache LUDecomposition for solve instead because it creates different
            // results
            result = new Matrix(xTx.getData()).solve(new Matrix(coefficients, coefficients.length));
            for (int i = 0; i < numberOfColumns; i++) {
                coefficients[i] = result.get(i, 0);
            }
            finished = true;
        } catch (Exception ex) {
            double ridgeOld = ridge;
            if (ridge > 0) {
                ridge *= 10;
            } else {
                ridge = 0.0000001;
            }
            finished = false;
            logger.warning("Error during calculation: " + ex.getMessage() + ": Increasing ridge factor from "
                    + ridgeOld + " to " + ridge);
        }
    }
    return coefficients;
}

From source file:com.google.location.lbs.gnss.gps.pseudorange.Ecef2EnuConverter.java

/**
 * Converts a vector represented by coordinates ecefX, ecefY, ecefZ in an
 * Earth-Centered Earth-Fixed (ECEF) Cartesian system into a vector in a
 * local east-north-up (ENU) Cartesian system.
 *
 * <p> For example it can be used to rotate a speed vector or position offset vector to ENU.
 *
 * @param ecefX X coordinates in ECEF//from  ww w . j  a  v  a 2s  .  c o m
 * @param ecefY Y coordinates in ECEF
 * @param ecefZ Z coordinates in ECEF
 * @param refLat Latitude in Radians of the Reference Position
 * @param refLng Longitude in Radians of the Reference Position
 * @return the converted values in {@code EnuValues}
 */
public static EnuValues convertEcefToEnu(double ecefX, double ecefY, double ecefZ, double refLat,
        double refLng) {

    RealMatrix rotationMatrix = getRotationMatrix(refLat, refLng);
    RealMatrix ecefCoordinates = new Array2DRowRealMatrix(new double[] { ecefX, ecefY, ecefZ });

    RealMatrix enuResult = rotationMatrix.multiply(ecefCoordinates);
    return new EnuValues(enuResult.getEntry(0, 0), enuResult.getEntry(1, 0), enuResult.getEntry(2, 0));
}

From source file:com.github.dougkelly88.FLIMPlateReaderGUI.InstrumentInterfaceClasses.XYZMotionInterface.java

public static AffineTransform deriveAffineTransform(Point2D.Double[] xplt, Point2D.Double[] stage) {
    // GENERAL CASE:    
    // if S is stage space and P is XPLT plate space, 
    // and T is the matrix to transform between the two:
    // S = TP//from  w ww . jav  a 2 s.c  o m
    // SP^-1 = TPP^-1
    // T = SP^-1;
    // where S is a 2x3 matrix with 3 points, T is a 2x3 matrix and P is a 
    // 3x3 matrix with 3 points and the bottom row occupied by ones...

    double[][] Parr = { { xplt[0].getX(), xplt[1].getX(), xplt[2].getX() },
            { xplt[0].getY(), xplt[1].getY(), xplt[2].getY() }, { 1, 1, 1 } };
    RealMatrix P = MatrixUtils.createRealMatrix(Parr);

    double[][] Sarr = { { stage[0].getX(), stage[1].getX(), stage[2].getX() },
            { stage[0].getY(), stage[1].getY(), stage[2].getY() } };
    RealMatrix S = MatrixUtils.createRealMatrix(Sarr);

    RealMatrix Pinv = (new LUDecomposition(P)).getSolver().getInverse();
    RealMatrix transformationMatrix = S.multiply(Pinv);

    double m00 = transformationMatrix.getEntry(0, 0);
    double m01 = transformationMatrix.getEntry(0, 1);
    double m02 = transformationMatrix.getEntry(0, 2);
    double m10 = transformationMatrix.getEntry(1, 0);
    double m11 = transformationMatrix.getEntry(1, 1);
    double m12 = transformationMatrix.getEntry(1, 2);

    return new AffineTransform(m00, m10, m01, m11, m02, m12);
}

From source file:hulo.localization.utils.ArrayUtils.java

public static double[][] multiply(double[][] X1, double[][] X2) {
    RealMatrix M1 = MatrixUtils.createRealMatrix(X1);
    RealMatrix M2 = MatrixUtils.createRealMatrix(X2);
    RealMatrix M3 = M1.multiply(M2);
    return M3.getData();
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a/*from  www  .j  a v  a  2s  . c o m*/
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:hivemall.utils.math.MatrixUtils.java

/**
 * Find eigenvalues and eigenvectors of given tridiagonal matrix T.
 *
 * @see http://web.csulb.edu/~tgao/math423/s94.pdf
 * @see http://stats.stackexchange.com/questions/20643/finding-matrix-eigenvectors-using-qr-
 *      decomposition//w w  w.  j a  v a 2  s.co  m
 * @param T target tridiagonal matrix
 * @param nIter number of iterations for the QR method
 * @param eigvals eigenvalues are stored here
 * @param eigvecs eigenvectors are stored here
 */
public static void tridiagonalEigen(@Nonnull final RealMatrix T, @Nonnull final int nIter,
        @Nonnull final double[] eigvals, @Nonnull final RealMatrix eigvecs) {
    Preconditions.checkArgument(Arrays.deepEquals(T.getData(), T.transpose().getData()),
            "Target matrix T must be a symmetric (tridiagonal) matrix");
    Preconditions.checkArgument(eigvecs.getRowDimension() == eigvecs.getColumnDimension(),
            "eigvecs must be a square matrix");
    Preconditions.checkArgument(T.getRowDimension() == eigvecs.getRowDimension(),
            "T and eigvecs must be the same shape");
    Preconditions.checkArgument(eigvals.length == eigvecs.getRowDimension(),
            "Number of eigenvalues and eigenvectors must be same");

    int nEig = eigvals.length;

    // initialize eigvecs as an identity matrix
    eigvecs.setSubMatrix(eye(nEig), 0, 0);

    RealMatrix T_ = T.copy();

    for (int i = 0; i < nIter; i++) {
        // QR decomposition for the tridiagonal matrix T
        RealMatrix R = new Array2DRowRealMatrix(nEig, nEig);
        RealMatrix Qt = new Array2DRowRealMatrix(nEig, nEig);
        tridiagonalQR(T_, R, Qt);

        RealMatrix Q = Qt.transpose();
        T_ = R.multiply(Q);
        eigvecs.setSubMatrix(eigvecs.multiply(Q).getData(), 0, 0);
    }

    // diagonal elements correspond to the eigenvalues
    for (int i = 0; i < nEig; i++) {
        eigvals[i] = T_.getEntry(i, i);
    }
}

From source file:gamlss.utilities.MatrixFunctions.java

/**
* <p>Compute the "hat" matrix.//w  w  w .j a v  a 2 s .  c o m
* </p>
* <p>The hat matrix is defined in terms of the design matrix X
*  by X(X<sup>T</sup>X)<sup>-1</sup>X<sup>T</sup>
* </p>
* <p>The implementation here uses the QR decomposition to compute the
* hat matrix as Q I<sub>p</sub>Q<sup>T</sup> where I<sub>p</sub> is the
* p-dimensional identity matrix augmented by 0's.  This computational
* formula is from "The Hat Matrix in Regression and ANOVA",
* David C. Hoaglin and Roy E. Welsch,
* <i>The American Statistician</i>, Vol. 32, No. 1 (Feb., 1978), pp. 17-22.
*@param m - matrix
* @return the hat matrix
*/
public static RealMatrix calculateHat(final BlockRealMatrix m) {
    QRDecomposition qr = new QRDecomposition(m);
    // Create augmented identity matrix
    RealMatrix qM = qr.getQ();
    final int p = qr.getR().getColumnDimension();
    final int n = qM.getColumnDimension();
    Array2DRowRealMatrix augI = new Array2DRowRealMatrix(n, n);
    double[][] augIData = augI.getDataRef();
    for (int i = 0; i < n; i++) {
        for (int j = 0; j < n; j++) {
            if (i == j && i < p) {
                augIData[i][j] = 1d;
            } else {
                augIData[i][j] = 0d;
            }
        }
    }
    // Compute and return Hat matrix
    return qM.multiply(augI).multiply(qM.transpose());
}

From source file:com.itemanalysis.psychometrics.cfa.UnweightedLeastSquares.java

/**
* Empty method - artifact of interface//from   www .j a v a 2 s  . c  om
*
* @param argument
* @return
*/
public double value(double[] argument) {
    SIGMA = model.getImpliedCovariance(argument);
    RealMatrix D = varcov.subtract(SIGMA);
    RealMatrix D2 = D.multiply(D);
    F = 0.5 * D2.getTrace();
    return F;
}