Example usage for org.apache.commons.math3.linear RealMatrix setRowVector

List of usage examples for org.apache.commons.math3.linear RealMatrix setRowVector

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix setRowVector.

Prototype

void setRowVector(int row, RealVector vector) throws OutOfRangeException, MatrixDimensionMismatchException;

Source Link

Document

Sets the specified row of this matrix to the entries of the specified vector .

Usage

From source file:com.vsthost.rnd.commons.math.ext.linear.EMatrixUtils.java

/**
 * Shuffles rows of a matrix using the provided random number generator.
 *
 * @param matrix The matrix of which the rows will be shuffled.
 * @param randomGenerator The random number generator to be used.
 * @return The new shuffled matrix.//from  w  w  w. j  a v a  2s.  com
 */
public static RealMatrix shuffleRows(RealMatrix matrix, RandomGenerator randomGenerator) {
    // Create an index vector to be shuffled:
    int[] index = MathArrays.sequence(matrix.getRowDimension(), 0, 1);
    MathArrays.shuffle(index, randomGenerator);

    // Create a new matrix:
    RealMatrix retval = MatrixUtils.createRealMatrix(matrix.getRowDimension(), matrix.getColumnDimension());

    // Populate:
    for (int row = 0; row < index.length; row++) {
        retval.setRowVector(row, matrix.getRowVector(index[row]));
    }

    // Done, return:
    return retval;
}

From source file:com.vsthost.rnd.commons.math.ext.linear.EMatrixUtils.java

/**
 * Multiplies the matrix' rows using the vector element-by-element.
 *
 * @param matrix The input matrix.//from w  w  w.ja v  a2s  .  c  o m
 * @param vector The vector which will be used to multiply rows of the matrix element-by-element.
 * @return The new matrix of which rows are multiplied with the vector element-by-element.
 */
public static RealMatrix rbrMultiply(RealMatrix matrix, RealVector vector) {
    // Define the return value:
    RealMatrix retval = MatrixUtils.createRealMatrix(matrix.getRowDimension(), matrix.getColumnDimension());

    // Iterate over rows:
    for (int i = 0; i < retval.getRowDimension(); i++) {
        retval.setRowVector(i, matrix.getRowVector(i).ebeMultiply(vector));
    }

    // Done, return:
    return retval;
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a/*from   w w  w .j av a 2s.com*/
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:ellipsoidFit.FitPoints.java

/**
 * Translate the algebraic form of the ellipsoid to the center.
 * //from w  ww. j a  v a 2s .  c  o  m
 * @param center
 *            vector containing the center of the ellipsoid.
 * @param a
 *            the algebraic form of the polynomial.
 * @return the center translated form of the algebraic ellipsoid.
 */
private RealMatrix translateToCenter(RealVector center, RealMatrix a) {
    // Form the corresponding translation matrix.
    RealMatrix t = MatrixUtils.createRealIdentityMatrix(4);

    RealMatrix centerMatrix = new Array2DRowRealMatrix(1, 3);

    centerMatrix.setRowVector(0, center);

    t.setSubMatrix(centerMatrix.getData(), 3, 0);

    // Translate to the center.
    RealMatrix r = t.multiply(a).multiply(t.transpose());

    return r;
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.NelderMeadMinimizer.java

/**
 * Constructs the initial simplex that is the starting point of the optimization given an initial guess at the minimum and a size scale for each parameter.
 * @param initialPoint      The initial guess at the minimum, one component per parameter.
 * @param componentScales   A size scale for each parameter that is used to set how large the initial simplex is.
 * @return                  A matrix containing p + 1 rows, each of which is one set of p parameters, which specify the simplex.
 *///from  w  w w . ja v  a  2 s  .  c  o m
public RealMatrix generateInitialSimplex(RealVector initialPoint, RealVector componentScales) {
    RealMatrix initialSimplex = new Array2DRowRealMatrix(initialPoint.getDimension() + 1,
            initialPoint.getDimension());

    initialSimplex.setRowVector(0, initialPoint);

    for (int i = 1; i < initialSimplex.getRowDimension(); i++) {
        RealVector newVector = initialPoint.copy();
        newVector.setEntry(i - 1, newVector.getEntry(i - 1) + componentScales.getEntry(i - 1));
        initialSimplex.setRowVector(i, newVector);
    }

    return initialSimplex;
}

From source file:gamlss.utilities.WLSMultipleLinearRegression.java

/**
 * //from  www.j a v a 2s  . c  o m
 * @param y
 * @param x
 * @param w
 */
private void newSampleDataNoCopy(ArrayRealVector y, RealMatrix x, ArrayRealVector w) {
    for (int row = 0; row < x.getRowDimension(); row++) {
        x.setRowVector(row, x.getRowVector(row).mapMultiplyToSelf(w.getEntry(row)));
    }
    //double[][] xw=x.getData();
    //double[] yw= y.ebeMultiply(w).getDataRef();
    //validateSampleData(xw, yw); //we have already checked this in the gamlss algorithm. 
    newYSampleData(y.ebeMultiply(w));
    newXSampleData(x.getData(), w);
}

From source file:gamlss.utilities.WLSMultipleLinearRegression.java

/**
 * /* w  w  w . j  a v a 2  s  .  co m*/
 * @param y
 * @param x
 * @param w
 */
private void newSampleDataCopyOriginal(ArrayRealVector y, RealMatrix x, ArrayRealVector w) {
    this.y = y.copy(); //we need this for the fitted values and residuals.
    if (this.isNoIntercept()) {
        this.X = x.copy();

    } else {
        setDesignMatrix(x);//add 1 for the Intercept;
    }
    for (int row = 0; row < x.getRowDimension(); row++) {
        x.setRowVector(row, x.getRowVector(row).mapMultiplyToSelf(w.getEntry(row)));
    }
    //double[][] xw=x.getData();
    //double[] yw= y.ebeMultiply(w).getDataRef();
    //validateSampleData(xw, yw); //we have already checked this in the gamlss algorithm. 
    newYSampleData(y.ebeMultiply(w));
    newXSampleData(x.getData(), w);
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.DifferentialEvolutionMinimizer.java

/**
 * Performs a minimization of a function starting with an single initial guess; a full population is generated based on this guess and the parameter bounds.
 *
 * @param f                     The function to be minimized.
 * @param parameterLowerBounds  The lower bounds of each parameter.  Generated parameter values less than these values will be discarded.
 * @param parameterUpperBounds  The upper bounds of each parameter.  Generated parameter values greater than these values will be discarded.
 * @param populationSize        The size of the population of parameters sets to use for optimization.
 * @param scaleFactor           Factor controlling the scale of crossed over entries during crossover events.
 * @param maxIterations         The maximum number of iterations to allow before returning a result.
 * @param crossoverFrequency    The frequency of crossover from 0 to 1.  At any given parameter, this is the probability of initiating a crossover as well as the probability of ending one after it has started.
 * @param tol                   Relative function value tolerance controlling termination; if the maximal and minimal population values differ by less than this factor times the maximal value, optimization will terminate.
 * @param initialGuess          A guess at the value of each parameter at the optimum.
 * @return                      The parameter values at the minimal function value found.
 *///from   w  w  w . j ava2 s  . c  o m
public RealVector minimizeWithInitial(ObjectiveFunction f, RealVector parameterLowerBounds,
        RealVector parameterUpperBounds, int populationSize, double scaleFactor, int maxIterations,
        double crossoverFrequency, double tol, RealVector initialGuess) {

    int numberOfParameters = parameterLowerBounds.getDimension();

    //generate the initial population

    RealMatrix population = new Array2DRowRealMatrix(populationSize, numberOfParameters);

    for (int i = 1; i < populationSize; i++) {

        for (int j = 0; j < numberOfParameters; j++) {

            population.setEntry(i, j,
                    RandomGenerator.rand()
                            * (parameterUpperBounds.getEntry(j) - parameterLowerBounds.getEntry(j))
                            + parameterLowerBounds.getEntry(j));

        }
    }

    population.setRowVector(0, initialGuess);

    return minimizeWithPopulation(population, f, parameterLowerBounds, parameterUpperBounds, populationSize,
            scaleFactor, maxIterations, crossoverFrequency, tol);

}

From source file:edu.stanford.cfuller.colocalization3d.correction.Correction.java

/**
 * Applies an existing correction to a single x-y position in the Image plane.
 *
 * @param x     The x-position at which to apply the correction.
 * @param y     The y-position at which to apply the correction.
 * @return      A RealVector containing 3 elements-- the magnitude of the correction in the x, y, and z dimensions, in that order.
 *///www. j a  va 2s. c o m
public RealVector correctPosition(double x, double y) throws UnableToCorrectException {

    RealVector corrections = new ArrayRealVector(3, 0.0);

    RealVector distsToCentroids = this.getPositionsForCorrection().getColumnVector(0).mapSubtract(x)
            .mapToSelf(new Power(2));
    distsToCentroids = distsToCentroids
            .add(this.getPositionsForCorrection().getColumnVector(1).mapSubtract(y).mapToSelf(new Power(2)));
    distsToCentroids.mapToSelf(new Sqrt());

    RealVector distRatio = distsToCentroids.ebeDivide(this.getDistanceCutoffs());

    RealVector distRatioBin = new ArrayRealVector(distRatio.getDimension(), 0.0);

    for (int i = 0; i < distRatio.getDimension(); i++) {
        if (distRatio.getEntry(i) <= 1)
            distRatioBin.setEntry(i, 1.0);
    }

    RealVector weights = distRatio.map(new Power(2.0)).mapMultiplyToSelf(-3).mapAddToSelf(1)
            .add(distRatio.map(new Power(3.0)).mapMultiplyToSelf(2));

    weights = weights.ebeMultiply(distRatioBin);

    double sumWeights = 0;

    int countWeights = 0;

    for (int i = 0; i < weights.getDimension(); i++) {
        if (weights.getEntry(i) > 0) {
            sumWeights += weights.getEntry(i);
            countWeights++;
        }
    }

    if (countWeights == 0) { // this means there were no points in the correction dataset near the position being corrected.
        throw (new UnableToCorrectException(
                "Incomplete coverage in correction dataset at (x,y) = (" + x + ", " + y + ")."));
    }

    RealMatrix cX = new Array2DRowRealMatrix(countWeights, this.getCorrectionX().getColumnDimension());
    RealMatrix cY = new Array2DRowRealMatrix(countWeights, this.getCorrectionX().getColumnDimension());
    RealMatrix cZ = new Array2DRowRealMatrix(countWeights, this.getCorrectionX().getColumnDimension());

    RealVector xVec = new ArrayRealVector(countWeights, 0.0);
    RealVector yVec = new ArrayRealVector(countWeights, 0.0);

    RealVector keptWeights = new ArrayRealVector(countWeights, 0.0);

    int keptCounter = 0;

    for (int i = 0; i < weights.getDimension(); i++) {
        if (weights.getEntry(i) > 0) {

            cX.setRowVector(keptCounter, this.getCorrectionX().getRowVector(i));
            cY.setRowVector(keptCounter, this.getCorrectionY().getRowVector(i));
            cZ.setRowVector(keptCounter, this.getCorrectionZ().getRowVector(i));

            xVec.setEntry(keptCounter, x - this.getPositionsForCorrection().getEntry(i, 0));
            yVec.setEntry(keptCounter, y - this.getPositionsForCorrection().getEntry(i, 1));

            keptWeights.setEntry(keptCounter, weights.getEntry(i));

            keptCounter++;
        }
    }

    double xCorr = 0;
    double yCorr = 0;
    double zCorr = 0;

    RealMatrix allCorrectionParameters = new Array2DRowRealMatrix(countWeights, numberOfCorrectionParameters);

    RealVector ones = new ArrayRealVector(countWeights, 1.0);

    allCorrectionParameters.setColumnVector(0, ones);
    allCorrectionParameters.setColumnVector(1, xVec);
    allCorrectionParameters.setColumnVector(2, yVec);
    allCorrectionParameters.setColumnVector(3, xVec.map(new Power(2)));
    allCorrectionParameters.setColumnVector(4, yVec.map(new Power(2)));
    allCorrectionParameters.setColumnVector(5, xVec.ebeMultiply(yVec));

    for (int i = 0; i < countWeights; i++) {

        xCorr += allCorrectionParameters.getRowVector(i).dotProduct(cX.getRowVector(i))
                * keptWeights.getEntry(i);
        yCorr += allCorrectionParameters.getRowVector(i).dotProduct(cY.getRowVector(i))
                * keptWeights.getEntry(i);
        zCorr += allCorrectionParameters.getRowVector(i).dotProduct(cZ.getRowVector(i))
                * keptWeights.getEntry(i);

    }

    xCorr /= sumWeights;
    yCorr /= sumWeights;
    zCorr /= sumWeights;

    corrections.setEntry(0, xCorr);
    corrections.setEntry(1, yCorr);
    corrections.setEntry(2, zCorr);

    return corrections;
}

From source file:lirmm.inria.fr.math.BigSparseRealMatrixTest.java

@Test
public void testSetRowVector() {
    RealMatrix m = new BigSparseRealMatrix(subTestData);
    RealVector mRow3 = new ArrayRealVector(subRow3[0]);
    Assert.assertNotSame(mRow3, m.getRowMatrix(0));
    m.setRowVector(0, mRow3);
    Assert.assertEquals(mRow3, m.getRowVector(0));
    try {//  w ww. jav  a 2  s . c  om
        m.setRowVector(-1, mRow3);
        Assert.fail("Expecting OutOfRangeException");
    } catch (OutOfRangeException ex) {
        // expected
    }
    try {
        m.setRowVector(0, new ArrayRealVector(5));
        Assert.fail("Expecting MatrixDimensionMismatchException");
    } catch (MatrixDimensionMismatchException ex) {
        // expected
    }
}