Example usage for org.apache.commons.math3.linear RealMatrix operate

List of usage examples for org.apache.commons.math3.linear RealMatrix operate

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix operate.

Prototype

RealVector operate(RealVector v) throws DimensionMismatchException;

Source Link

Document

Returns the result of multiplying this by the vector v .

Usage

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a//from  w  w w . j a va  2 s  .  c o  m
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:com.winterwell.maths.stats.algorithms.KalmanFilterTest.java

@Test
public void testConstantAcceleration() {
    // simulates a vehicle, accelerating at a constant rate (0.1 m/s)

    // discrete time interval
    double dt = 0.1d;
    // position measurement noise (meter)
    double measurementNoise = 10d;
    // acceleration noise (meter/sec^2)
    double accelNoise = 0.2d;

    // A = [ 1 dt ]
    //     [ 0  1 ]
    RealMatrix A = new Array2DRowRealMatrix(new double[][] { { 1, dt }, { 0, 1 } });

    // B = [ dt^2/2 ]
    //     [ dt     ]
    RealMatrix Bnull = new Array2DRowRealMatrix(new double[][] { { FastMath.pow(dt, 2d) / 2d }, { dt } });

    // H = [ 1 0 ]
    RealMatrix H = new Array2DRowRealMatrix(new double[][] { { 1d, 0d } });

    // x = [ 0 0 ]
    RealVector x = new ArrayRealVector(new double[] { 0, 0 });

    RealMatrix tmp = new Array2DRowRealMatrix(
            new double[][] { { FastMath.pow(dt, 4d) / 4d, FastMath.pow(dt, 3d) / 2d },
                    { FastMath.pow(dt, 3d) / 2d, FastMath.pow(dt, 2d) } });

    // Q = [ dt^4/4 dt^3/2 ]
    //     [ dt^3/2 dt^2   ]
    RealMatrix Q = tmp.scalarMultiply(FastMath.pow(accelNoise, 2));

    // P0 = [ 1 1 ]
    //      [ 1 1 ]
    RealMatrix P0 = new Array2DRowRealMatrix(new double[][] { { 1, 1 }, { 1, 1 } });

    // R = [ measurementNoise^2 ]
    RealMatrix R = new Array2DRowRealMatrix(new double[] { FastMath.pow(measurementNoise, 2) });

    // constant control input, increase velocity by 0.1 m/s per cycle
    double uv = 0.1d * dt;
    RealVector u = new ArrayRealVector(new double[] { uv * uv / 2, uv });

    ProcessModel pm = new DefaultProcessModel(A, Bnull, Q, x, P0);
    MeasurementModel mm = new DefaultMeasurementModel(H, R);
    KalmanFilter filter = new KalmanFilter(pm, mm);

    Assert.assertEquals(1, filter.getMeasurementDimension());
    Assert.assertEquals(2, filter.getStateDimension());

    Gaussian state = new Gaussian(mtj(x), mtj(P0));
    MatrixUtils.equals(mtj(P0), state.getCovar());

    // check the initial state
    double[] expectedInitialState = new double[] { 0.0, 0.0 };
    //        assertVectorEquals(expectedInitialState, filter.getStateEstimation());

    RandomGenerator rand = new JDKRandomGenerator();

    RealVector tmpPNoise = new ArrayRealVector(new double[] { FastMath.pow(dt, 2d) / 2d, dt });

    // iterate 60 steps
    for (int i = 0; i < 60; i++) {
        state = filter.predict(state, mtj(u));

        // Simulate the process
        RealVector pNoise = tmpPNoise.mapMultiply(accelNoise * rand.nextGaussian());

        // x = A * x + B * u + pNoise
        x = A.operate(x).add(u).add(pNoise);

        // Simulate the measurement
        double mNoise = measurementNoise * rand.nextGaussian();

        // z = H * x + m_noise
        RealVector z = H.operate(x).mapAdd(mNoise);

        state = filter.correct(state, mtj(z));

        // state estimate shouldn't be larger than the measurement noise
        double diff = FastMath.abs(x.getEntry(0) - state.getMean().get(0));
        Assert.assertTrue(Precision.compareTo(diff, measurementNoise, 1e-6) < 0);
    }/*from   ww w .  j  a v a  2s  . c  o m*/

    // error covariance of the velocity should be already very low (< 0.1)
    Assert.assertTrue(Precision.compareTo(state.getCovar().get(1, 1), 0.1d, 1e-6) < 0);
}

From source file:com.joptimizer.optimizers.LPPrimalDualMethodNetlibTest.java

private void checkSolution(LPNetlibProblem problem, LPOptimizationRequest or, LPOptimizationResponse response)
        throws Exception {

    double expectedvalue = problem.optimalValue;
    log.debug("expectedvalue : " + expectedvalue);
    double[] sol = response.getSolution();
    RealVector cVector = new ArrayRealVector(or.getC().toArray());
    RealVector solVector = new ArrayRealVector(sol);
    double value = cVector.dotProduct(solVector);
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + value);

    //check constraints
    assertEquals(or.getLb().size(), sol.length);
    assertEquals(or.getUb().size(), sol.length);
    RealVector x = MatrixUtils.createRealVector(sol);

    //x >= lb// www  .j a  v  a2s.co  m
    double maxLbmx = -Double.MAX_VALUE;
    for (int i = 0; i < or.getLb().size(); i++) {
        double di = Double.isNaN(or.getLb().getQuick(i)) ? -Double.MAX_VALUE : or.getLb().getQuick(i);
        maxLbmx = Math.max(maxLbmx, di - x.getEntry(i));
        assertTrue(di <= x.getEntry(i) + or.getTolerance());
    }
    log.debug("max(lb - x): " + maxLbmx);

    //x <= ub
    double maxXmub = -Double.MAX_VALUE;
    for (int i = 0; i < or.getUb().size(); i++) {
        double di = Double.isNaN(or.getUb().getQuick(i)) ? Double.MAX_VALUE : or.getUb().getQuick(i);
        maxXmub = Math.max(maxXmub, x.getEntry(i) - di);
        assertTrue(di + or.getTolerance() >= x.getEntry(i));
    }
    log.debug("max(x - ub): " + maxXmub);

    //G.x <h
    if (or.getG() != null && or.getG().rows() > 0) {
        RealMatrix GMatrix = MatrixUtils.createRealMatrix(or.getG().toArray());
        RealVector hvector = MatrixUtils.createRealVector(or.getH().toArray());
        RealVector Gxh = GMatrix.operate(x).subtract(hvector);
        double maxGxh = -Double.MAX_VALUE;
        for (int i = 0; i < Gxh.getDimension(); i++) {
            maxGxh = Math.max(maxGxh, Gxh.getEntry(i));
            assertTrue(Gxh.getEntry(i) - or.getTolerance() <= 0);
        }
        log.debug("max(G.x - h): " + maxGxh);
    }

    //A.x = b
    if (or.getA() != null && or.getA().rows() > 0) {
        RealMatrix AMatrix = MatrixUtils.createRealMatrix(or.getA().toArray());
        RealVector bvector = MatrixUtils.createRealVector(or.getB().toArray());
        RealVector Axb = AMatrix.operate(x).subtract(bvector);
        double norm = Axb.getNorm();
        log.debug("||A.x -b||: " + norm);
        assertEquals(0., norm, or.getToleranceFeas());
    }

    double percDelta = Math.abs((expectedvalue - value) / expectedvalue);
    log.debug("percDelta: " + percDelta);
    //assertEquals(0., percDelta, or.getTolerance());
    //assertEquals(expectedvalue, value, or.getTolerance());
    assertTrue(value < expectedvalue + or.getTolerance());//can even beat other optimizers! the rebel yell...
}

From source file:edu.oregonstate.eecs.mcplan.abstraction.Experiments.java

private static void writeClustering(final MetricConstrainedKMeans kmeans, final File root, final int iter)
        throws FileNotFoundException {
    Csv.write(new PrintStream(new File(root, "M" + iter + ".csv")), kmeans.metric);
    {/*from   ww w  .j av a 2  s.c om*/
        final Csv.Writer writer = new Csv.Writer(new PrintStream(new File(root, "mu" + iter + ".csv")));
        for (int i = 0; i < kmeans.d; ++i) {
            for (int j = 0; j < kmeans.k; ++j) {
                writer.cell(kmeans.mu()[j].getEntry(i));
            }
            writer.newline();
        }
    }
    // Lt.operate( x ) maps x to the space defined by the metric
    final RealMatrix Lt = new CholeskyDecomposition(kmeans.metric).getLT();
    {
        final Csv.Writer writer = new Csv.Writer(new PrintStream(new File(root, "X" + iter + ".csv")));
        writer.cell("cluster").cell("label");
        for (int i = 0; i < kmeans.metric.getColumnDimension(); ++i) {
            writer.cell("x" + i);
        }
        for (int i = 0; i < kmeans.metric.getColumnDimension(); ++i) {
            writer.cell("Ax" + i);
        }
        writer.newline();
        for (int cluster = 0; cluster < kmeans.k; ++cluster) {
            for (int i = 0; i < kmeans.N; ++i) {
                if (kmeans.assignments()[i] == cluster) {
                    writer.cell(cluster);
                    final RealVector phi = kmeans.X_.get(i); //Phi.get( i );
                    writer.cell("?"); // TODO: write label
                    for (int j = 0; j < phi.getDimension(); ++j) {
                        writer.cell(phi.getEntry(j));
                    }
                    final RealVector trans = Lt.operate(phi);
                    for (int j = 0; j < trans.getDimension(); ++j) {
                        writer.cell(trans.getEntry(j));
                    }
                    writer.newline();
                }
            }
        }
    }
}

From source file:com.joptimizer.optimizers.LPPresolverTest.java

private void doPresolving(double[] c, double[][] A, double[] b, double[] lb, double[] ub, double s,
        double[] expectedSolution, double expectedValue, double expectedTolerance) throws Exception {

    RealMatrix AMatrix = MatrixUtils.createRealMatrix(A);
    SingularValueDecomposition dec = new SingularValueDecomposition(AMatrix);
    int rankA = dec.getRank();
    log.debug("p: " + AMatrix.getRowDimension());
    log.debug("n: " + AMatrix.getColumnDimension());
    log.debug("rank: " + rankA);

    LPPresolver lpPresolver = new LPPresolver();
    lpPresolver.setNOfSlackVariables((short) s);
    lpPresolver.setExpectedSolution(expectedSolution);//this is just for test!
    //lpPresolver.setExpectedTolerance(expectedTolerance);//this is just for test!
    lpPresolver.presolve(c, A, b, lb, ub);
    int n = lpPresolver.getPresolvedN();
    double[] presolvedC = lpPresolver.getPresolvedC().toArray();
    double[][] presolvedA = lpPresolver.getPresolvedA().toArray();
    double[] presolvedB = lpPresolver.getPresolvedB().toArray();
    double[] presolvedLb = lpPresolver.getPresolvedLB().toArray();
    double[] presolvedUb = lpPresolver.getPresolvedUB().toArray();
    double[] presolvedYlb = lpPresolver.getPresolvedYlb().toArray();
    double[] presolvedYub = lpPresolver.getPresolvedYub().toArray();
    double[] presolvedZlb = lpPresolver.getPresolvedZlb().toArray();
    double[] presolvedZub = lpPresolver.getPresolvedZub().toArray();
    log.debug("n  : " + n);
    log.debug("presolvedC  : " + ArrayUtils.toString(presolvedC));
    log.debug("presolvedA  : " + ArrayUtils.toString(presolvedA));
    log.debug("presolvedB  : " + ArrayUtils.toString(presolvedB));
    log.debug("presolvedLb : " + ArrayUtils.toString(presolvedLb));
    log.debug("presolvedUb : " + ArrayUtils.toString(presolvedUb));
    log.debug("presolvedYlb: " + ArrayUtils.toString(presolvedYlb));
    log.debug("presolvedYub: " + ArrayUtils.toString(presolvedYub));
    log.debug("presolvedZlb: " + ArrayUtils.toString(presolvedZlb));
    log.debug("presolvedZub: " + ArrayUtils.toString(presolvedZub));

    //check objective function
    double delta = expectedTolerance;
    RealVector presolvedX = MatrixUtils.createRealVector(lpPresolver.presolve(expectedSolution));
    log.debug("presolved value: " + MatrixUtils.createRealVector(presolvedC).dotProduct(presolvedX));
    RealVector postsolvedX = MatrixUtils.createRealVector(lpPresolver.postsolve(presolvedX.toArray()));
    double value = MatrixUtils.createRealVector(c).dotProduct(postsolvedX);
    assertEquals(expectedValue, value, delta);

    //check postsolved constraints
    for (int i = 0; i < lb.length; i++) {
        double di = Double.isNaN(lb[i]) ? -Double.MAX_VALUE : lb[i];
        assertTrue(di <= postsolvedX.getEntry(i) + delta);
    }//from   www. j a v  a2  s .c o  m
    for (int i = 0; i < ub.length; i++) {
        double di = Double.isNaN(ub[i]) ? Double.MAX_VALUE : ub[i];
        assertTrue(di + delta >= postsolvedX.getEntry(i));
    }
    RealVector Axmb = AMatrix.operate(postsolvedX).subtract(MatrixUtils.createRealVector(b));
    assertEquals(0., Axmb.getNorm(), expectedTolerance);

    //check presolved constraints
    assertEquals(presolvedLb.length, presolvedX.getDimension());
    assertEquals(presolvedUb.length, presolvedX.getDimension());
    AMatrix = MatrixUtils.createRealMatrix(presolvedA);
    RealVector bvector = MatrixUtils.createRealVector(presolvedB);
    for (int i = 0; i < presolvedLb.length; i++) {
        double di = Double.isNaN(presolvedLb[i]) ? -Double.MAX_VALUE : presolvedLb[i];
        assertTrue(di <= presolvedX.getEntry(i) + delta);
    }
    for (int i = 0; i < presolvedUb.length; i++) {
        double di = Double.isNaN(presolvedUb[i]) ? Double.MAX_VALUE : presolvedUb[i];
        assertTrue(di + delta >= presolvedX.getEntry(i));
    }
    Axmb = AMatrix.operate(presolvedX).subtract(bvector);
    assertEquals(0., Axmb.getNorm(), expectedTolerance);

    //check rank(A): must be A pXn with rank(A)=p < n
    AMatrix = MatrixUtils.createRealMatrix(presolvedA);
    dec = new SingularValueDecomposition(AMatrix);
    rankA = dec.getRank();
    log.debug("p: " + AMatrix.getRowDimension());
    log.debug("n: " + AMatrix.getColumnDimension());
    log.debug("rank: " + rankA);
    assertEquals(AMatrix.getRowDimension(), rankA);
    assertTrue(rankA < AMatrix.getColumnDimension());
}

From source file:com.joptimizer.optimizers.LPPresolverNetlibTest.java

/**
 * Tests the presolving of a netlib problem.
 * If checkExpectedSolution, the presolving is checked step by step against 
 * a (beforehand) known solution of the problem. 
 * NOTE: this known solution might differ from the solution given by the presolver 
 * (e.g. in the presence of a weakly dominated column @see {@link LPPresolver#removeDominatedColumns}, 
 * or if it is calculated with simplex method 
 * or if bounds are not exactly the same), so sometimes it is not a good test. 
 *//*from   w w w . j a  v  a2s  .  c o m*/
public void doTesting(String problemName, boolean checkExpectedSolution, double myExpectedTolerance)
        throws Exception {
    log.debug("doTesting: " + problemName);

    int s = (int) Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator + problemName
            + File.separator + "standardS.txt")[0];
    double[] c = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "netlib" + File.separator + problemName + File.separator + "standardC.txt");
    double[][] A = Utils.loadDoubleMatrixFromFile(
            "lp" + File.separator + "netlib" + File.separator + problemName + File.separator + "standardA.csv",
            ",".charAt(0));
    double[] b = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "netlib" + File.separator + problemName + File.separator + "standardB.txt");
    double[] lb = Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator + problemName
            + File.separator + "standardLB.txt");
    double[] ub = Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator + problemName
            + File.separator + "standardUB.txt");
    double[] expectedSolution = Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator
            + problemName + File.separator + "standardSolution.txt");
    double expectedValue = Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator
            + problemName + File.separator + "standardValue.txt")[0];
    double expectedTolerance = Utils.loadDoubleArrayFromFile("lp" + File.separator + "netlib" + File.separator
            + problemName + File.separator + "standardTolerance.txt")[0];

    //in order to compare with tha Math results, we must have the same bounds
    for (int i = 0; i < lb.length; i++) {
        if (Double.isNaN(lb[i])) {
            lb[i] = -9999999d; //the same as the notebook value
        }
    }
    for (int i = 0; i < ub.length; i++) {
        if (Double.isNaN(ub[i])) {
            ub[i] = +9999999d; //the same as the notebook value
        }
    }

    RealMatrix AMatrix = MatrixUtils.createRealMatrix(A);
    RealVector bVector = MatrixUtils.createRealVector(b);
    //expectedTolerance = Math.max(expectedTolerance,   AMatrix.operate(MatrixUtils.createRealVector(expectedSolution)).subtract(bVector).getNorm());
    expectedTolerance = Math.max(1.e-9, expectedTolerance);

    // must be: A pXn with rank(A)=p < n
    QRSparseFactorization qr = new QRSparseFactorization(new SparseDoubleMatrix2D(A));
    qr.factorize();
    log.debug("p        : " + AMatrix.getRowDimension());
    log.debug("n        : " + AMatrix.getColumnDimension());
    log.debug("full rank: " + qr.hasFullRank());

    LPPresolver lpPresolver = new LPPresolver();
    lpPresolver.setNOfSlackVariables((short) s);
    if (checkExpectedSolution) {
        lpPresolver.setExpectedSolution(expectedSolution);// this is just for test!
        lpPresolver.setExpectedTolerance(myExpectedTolerance);// this is just for test!
    }
    // lpPresolver.setAvoidFillIn(true);
    // lpPresolver.setZeroTolerance(1.e-13);
    lpPresolver.presolve(c, A, b, lb, ub);
    int n = lpPresolver.getPresolvedN();
    DoubleMatrix1D presolvedC = lpPresolver.getPresolvedC();
    DoubleMatrix2D presolvedA = lpPresolver.getPresolvedA();
    DoubleMatrix1D presolvedB = lpPresolver.getPresolvedB();
    DoubleMatrix1D presolvedLb = lpPresolver.getPresolvedLB();
    DoubleMatrix1D presolvedUb = lpPresolver.getPresolvedUB();
    DoubleMatrix1D presolvedYlb = lpPresolver.getPresolvedYlb();
    DoubleMatrix1D presolvedYub = lpPresolver.getPresolvedYub();
    DoubleMatrix1D presolvedZlb = lpPresolver.getPresolvedZlb();
    DoubleMatrix1D presolvedZub = lpPresolver.getPresolvedZub();
    log.debug("n  : " + n);
    if (log.isDebugEnabled() && n > 0) {
        log.debug("presolvedC  : " + ArrayUtils.toString(presolvedC.toArray()));
        log.debug("presolvedA  : " + ArrayUtils.toString(presolvedA.toArray()));
        log.debug("presolvedB  : " + ArrayUtils.toString(presolvedB.toArray()));
        log.debug("presolvedLb : " + ArrayUtils.toString(presolvedLb.toArray()));
        log.debug("presolvedUb : " + ArrayUtils.toString(presolvedUb.toArray()));
        log.debug("presolvedYlb: " + ArrayUtils.toString(presolvedYlb.toArray()));
        log.debug("presolvedYub: " + ArrayUtils.toString(presolvedYub.toArray()));
        log.debug("presolvedZlb: " + ArrayUtils.toString(presolvedZlb.toArray()));
        log.debug("presolvedZub: " + ArrayUtils.toString(presolvedZub.toArray()));
    }

    if (n == 0) {
        // deterministic problem
        double[] sol = lpPresolver.postsolve(new double[] {});
        assertEquals(expectedSolution.length, sol.length);
        for (int i = 0; i < sol.length; i++) {
            // log.debug("i: " + i);
            assertEquals(expectedSolution[i], sol[i], 1.e-9);
        }
    } else {

        Utils.writeDoubleArrayToFile(presolvedC.toArray(),
                "target" + File.separator + "presolvedC_" + problemName + ".txt");
        Utils.writeDoubleMatrixToFile(presolvedA.toArray(),
                "target" + File.separator + "presolvedA_" + problemName + ".csv");
        Utils.writeDoubleArrayToFile(presolvedB.toArray(),
                "target" + File.separator + "presolvedB_" + problemName + ".txt");
        Utils.writeDoubleArrayToFile(presolvedLb.toArray(),
                "target" + File.separator + "presolvedLB_" + problemName + ".txt");
        Utils.writeDoubleArrayToFile(presolvedUb.toArray(),
                "target" + File.separator + "presolvedUB_" + problemName + ".txt");

        // check objective function
        double delta = expectedTolerance;
        RealVector presolvedES = MatrixUtils.createRealVector(lpPresolver.presolve(expectedSolution));
        double presolvedEV = MatrixUtils.createRealVector(presolvedC.toArray()).dotProduct(presolvedES);// in general it is different from the optimal value
        log.debug("presolved expected value: " + presolvedEV);
        RealVector postsolvedES = MatrixUtils.createRealVector(lpPresolver.postsolve(presolvedES.toArray()));
        double postsolvedEV = MatrixUtils.createRealVector(c).dotProduct(postsolvedES);
        //assertEquals(expectedValue, postsolvedEV, delta);
        assertTrue(Math.abs((expectedValue - postsolvedEV) / expectedValue) < delta);

        // check postsolved constraints
        for (int i = 0; i < lb.length; i++) {
            double di = Double.isNaN(lb[i]) ? -Double.MAX_VALUE : lb[i];
            assertTrue(di <= postsolvedES.getEntry(i) + delta);
        }
        for (int i = 0; i < ub.length; i++) {
            double di = Double.isNaN(ub[i]) ? Double.MAX_VALUE : ub[i];
            assertTrue(di + delta >= postsolvedES.getEntry(i));
        }
        RealVector Axmb = AMatrix.operate(postsolvedES).subtract(bVector);
        assertEquals(0., Axmb.getNorm(), 1.5 * expectedTolerance);

        // check presolved constraints
        assertEquals(presolvedLb.size(), presolvedES.getDimension());
        assertEquals(presolvedUb.size(), presolvedES.getDimension());
        AMatrix = MatrixUtils.createRealMatrix(presolvedA.toArray());//reassigned to avoid memory consumption
        bVector = MatrixUtils.createRealVector(presolvedB.toArray());
        for (int i = 0; i < presolvedLb.size(); i++) {
            double di = Double.isNaN(presolvedLb.getQuick(i)) ? -Double.MAX_VALUE : presolvedLb.getQuick(i);
            assertTrue(di <= presolvedES.getEntry(i) + delta);
        }
        for (int i = 0; i < presolvedUb.size(); i++) {
            double di = Double.isNaN(presolvedUb.getQuick(i)) ? Double.MAX_VALUE : presolvedUb.getQuick(i);
            assertTrue(di + delta >= presolvedES.getEntry(i));
        }
        Axmb = AMatrix.operate(presolvedES).subtract(bVector);
        assertEquals(0., Axmb.getNorm(), 1.5 * expectedTolerance);

        //check for 0-rows
        List<Integer> zeroRows = new ArrayList<Integer>();
        for (int i = 0; i < presolvedA.rows(); i++) {
            boolean isNotZero = false;
            for (int j = 0; !isNotZero && j < presolvedA.columns(); j++) {
                isNotZero = Double.compare(0., presolvedA.getQuick(i, j)) != 0;
            }
            if (!isNotZero) {
                zeroRows.add(zeroRows.size(), i);
            }
        }
        if (!zeroRows.isEmpty()) {
            log.debug("All 0 entries in rows " + ArrayUtils.toString(zeroRows));
            fail();
        }

        //check for 0-columns
        List<Integer> zeroCols = new ArrayList<Integer>();
        for (int j = 0; j < presolvedA.columns(); j++) {
            boolean isNotZero = false;
            for (int i = 0; !isNotZero && i < presolvedA.rows(); i++) {
                isNotZero = Double.compare(0., presolvedA.getQuick(i, j)) != 0;
            }
            if (!isNotZero) {
                zeroCols.add(zeroCols.size(), j);
            }
        }
        if (!zeroCols.isEmpty()) {
            log.debug("All 0 entries in columns " + ArrayUtils.toString(zeroCols));
            fail();
        }

        // check rank(A): must be A pXn with rank(A)=p < n
        qr = new QRSparseFactorization(new SparseDoubleMatrix2D(presolvedA.toArray()));
        qr.factorize();
        boolean isFullRank = qr.hasFullRank();
        log.debug("p        : " + AMatrix.getRowDimension());
        log.debug("n        : " + AMatrix.getColumnDimension());
        log.debug("full rank: " + isFullRank);
        assertTrue(AMatrix.getRowDimension() < AMatrix.getColumnDimension());
        assertTrue(isFullRank);
    }
}

From source file:org.grouplens.samantha.modeler.reinforce.LinearUCB.java

public double predict(LearningInstance instance) {
    RealMatrix A = variableSpace.getMatrixVarByName(LinearUCBKey.A.get());
    RealVector B = variableSpace.getScalarVarByName(LinearUCBKey.B.get());
    RealMatrix invA = new LUDecomposition(A).getSolver().getInverse();
    RealVector theta = invA.operate(B);
    RealVector x = extractDenseVector(theta.getDimension(), instance);
    double bound = Math.sqrt(x.dotProduct(invA.operate(x)));
    double mean = x.dotProduct(theta);
    double pred = mean + alpha * bound;
    if (Double.isNaN(pred)) {
        logger.error("Prediction is NaN, model parameter A probably goes wrong.");
        pred = 0.0;//from  w ww .  j  a v a 2 s  . co m
    }
    return pred;
}

From source file:org.orekit.frames.TransformTest.java

@Test
public void testLinear() {

    RandomGenerator random = new Well19937a(0x14f6411217b148d8l);
    for (int n = 0; n < 100; ++n) {
        Transform t = randomTransform(random);

        // build an equivalent linear transform by extracting raw translation/rotation
        RealMatrix linearA = MatrixUtils.createRealMatrix(3, 4);
        linearA.setSubMatrix(t.getRotation().getMatrix(), 0, 0);
        Vector3D rt = t.getRotation().applyTo(t.getTranslation());
        linearA.setEntry(0, 3, rt.getX());
        linearA.setEntry(1, 3, rt.getY());
        linearA.setEntry(2, 3, rt.getZ());

        // build an equivalent linear transform by observing transformed points
        RealMatrix linearB = MatrixUtils.createRealMatrix(3, 4);
        Vector3D p0 = t.transformPosition(Vector3D.ZERO);
        Vector3D pI = t.transformPosition(Vector3D.PLUS_I).subtract(p0);
        Vector3D pJ = t.transformPosition(Vector3D.PLUS_J).subtract(p0);
        Vector3D pK = t.transformPosition(Vector3D.PLUS_K).subtract(p0);
        linearB.setColumn(0, new double[] { pI.getX(), pI.getY(), pI.getZ() });
        linearB.setColumn(1, new double[] { pJ.getX(), pJ.getY(), pJ.getZ() });
        linearB.setColumn(2, new double[] { pK.getX(), pK.getY(), pK.getZ() });
        linearB.setColumn(3, new double[] { p0.getX(), p0.getY(), p0.getZ() });

        // both linear transforms should be equal
        Assert.assertEquals(0.0, linearB.subtract(linearA).getNorm(), 1.0e-15 * linearA.getNorm());

        for (int i = 0; i < 100; ++i) {
            Vector3D p = randomVector(1.0e3, random);
            Vector3D q = t.transformPosition(p);

            double[] qA = linearA.operate(new double[] { p.getX(), p.getY(), p.getZ(), 1.0 });
            Assert.assertEquals(q.getX(), qA[0], 1.0e-13 * p.getNorm());
            Assert.assertEquals(q.getY(), qA[1], 1.0e-13 * p.getNorm());
            Assert.assertEquals(q.getZ(), qA[2], 1.0e-13 * p.getNorm());

            double[] qB = linearB.operate(new double[] { p.getX(), p.getY(), p.getZ(), 1.0 });
            Assert.assertEquals(q.getX(), qB[0], 1.0e-10 * p.getNorm());
            Assert.assertEquals(q.getY(), qB[1], 1.0e-10 * p.getNorm());
            Assert.assertEquals(q.getZ(), qB[2], 1.0e-10 * p.getNorm());

        }//  w  ww .  j av a 2 s .  c  om

    }

}

From source file:rcdemo.math.ClosedHermiteSpline.java

public static RealMatrix generate(RealVector p) {
    int n = p.getDimension();
    RealMatrix A = MatrixUtils.createRealMatrix(n, n);
    RealMatrix B = MatrixUtils.createRealMatrix(n, n);
    for (int i = 0; i < n; i++) {
        int ip = (i + 1) % n;
        int im = (i - 1 + n) % n;
        A.setEntry(i, im, 2);/*from   w ww  . j a  va2 s.  c  o m*/
        A.setEntry(i, i, 8);
        A.setEntry(i, ip, 2);
        B.setEntry(i, im, -6);
        B.setEntry(i, ip, 6);
    }
    RealVector Bp = B.operate(p);
    DecompositionSolver solver = new CholeskyDecomposition(A).getSolver();
    RealVector m = solver.solve(Bp);
    RealMatrix a = MatrixUtils.createRealMatrix(4, n);
    for (int i = 0; i < n; i++) {
        int ip = (i + 1) % n;
        a.setEntry(0, i, p.getEntry(i));
        a.setEntry(1, i, m.getEntry(i));
        a.setEntry(2, i, p.getEntry(ip));
        a.setEntry(3, i, m.getEntry(ip));
    }
    return a;
}

From source file:shapeCompare.PairingTools.java

public static RealVector alignPointFromShape2toShape1(RealVector trans2toOrigin, RealVector trans,
        RealMatrix rot, RealVector point) {

    RealVector translatedToOriginPoint = point.add(trans2toOrigin.copy());
    RealVector rotatedPoint = rot.operate(translatedToOriginPoint);
    RealVector translatedBackPoint = rotatedPoint.subtract(trans2toOrigin);

    RealVector translatedToShape1 = translatedBackPoint.add(trans);

    return translatedToShape1;
}