Example usage for org.apache.commons.math3.linear RealMatrix operate

List of usage examples for org.apache.commons.math3.linear RealMatrix operate

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix operate.

Prototype

RealVector operate(RealVector v) throws DimensionMismatchException;

Source Link

Document

Returns the result of multiplying this by the vector v .

Usage

From source file:com.joptimizer.optimizers.LPStandardConverterTest.java

/**
 * Standardization (to the strictly standard form) of a problem on the form:
 * min(c) s.t.//from  w  w  w  .j av  a2  s.com
 * A.x = b
 * lb <= x <= ub
 * 
 * This is the presolved (with JOptimizer) pilot4 netlib problem.
 * @TODO: the strict conversion is net yet ready.
 */
public void xxxtestCAbLbUb5Strict() throws Exception {
    log.debug("testCAbLbUb5Strict");

    String problemId = "5";

    double[] c = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "c" + problemId + ".txt");
    double[][] A = Utils.loadDoubleMatrixFromFile(
            "lp" + File.separator + "standardization" + File.separator + "A" + problemId + ".csv",
            ",".charAt(0));
    double[] b = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "b" + problemId + ".txt");
    double[] lb = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "lb" + problemId + ".txt");
    double[] ub = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "ub" + problemId + ".txt");
    double[] expectedSol = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "sol" + problemId + ".txt");
    double expectedValue = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "value" + problemId + ".txt")[0];
    double expectedTol = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "tolerance" + problemId + ".txt")[0];

    int nOfSlackVariables = 0;
    for (int i = 0; i < c.length; i++) {
        double lbi = lb[i];
        int lbCompare = Double.compare(lbi, 0.);
        if (lbCompare != 0 && !Double.isNaN(lbi)) {
            nOfSlackVariables++;
        }
        if (!Double.isNaN(ub[i])) {
            nOfSlackVariables++;
        }
    }
    int expectedS = nOfSlackVariables;

    //standard form conversion
    boolean strictlyStandardForm = true;
    LPStandardConverter lpConverter = new LPStandardConverter(strictlyStandardForm);
    lpConverter.toStandardForm(c, null, null, A, b, lb, ub);

    int n = lpConverter.getStandardN();
    int s = lpConverter.getStandardS();
    c = lpConverter.getStandardC().toArray();
    A = lpConverter.getStandardA().toArray();
    b = lpConverter.getStandardB().toArray();
    lb = lpConverter.getStandardLB().toArray();
    ub = (lpConverter.getStandardUB() == null) ? null : ub;
    log.debug("n : " + n);
    log.debug("s : " + s);
    log.debug("c : " + ArrayUtils.toString(c));
    log.debug("A : " + ArrayUtils.toString(A));
    log.debug("b : " + ArrayUtils.toString(b));
    log.debug("lb : " + ArrayUtils.toString(lb));
    //log.debug("ub : " + ArrayUtils.toString(ub));

    //check consistency
    assertEquals(expectedS, s);
    assertEquals(lb.length, n);
    assertTrue(ub == null);

    //check constraints
    RealMatrix AStandard = new Array2DRowRealMatrix(A);
    RealVector bStandard = new ArrayRealVector(b);
    double[] expectedStandardSol = lpConverter.getStandardComponents(expectedSol);
    RealVector expectedStandardSolVector = new ArrayRealVector(expectedStandardSol);

    for (int i = 0; i < expectedStandardSolVector.getDimension(); i++) {
        assertTrue(expectedStandardSolVector.getEntry(i) + 1.E-8 >= 0.);
    }

    RealVector Axmb = AStandard.operate(expectedStandardSolVector).subtract(bStandard);
    for (int i = 0; i < Axmb.getDimension(); i++) {
        assertEquals(0., Axmb.getEntry(i), expectedTol);
    }

    Utils.writeDoubleArrayToFile(new double[] { s },
            "target" + File.separator + "standardS_" + problemId + ".txt");
    Utils.writeDoubleArrayToFile(c, "target" + File.separator + "standardC_" + problemId + ".txt");
    Utils.writeDoubleMatrixToFile(A, "target" + File.separator + "standardA_" + problemId + ".csv");
    Utils.writeDoubleArrayToFile(b, "target" + File.separator + "standardB_" + problemId + ".txt");
    Utils.writeDoubleArrayToFile(lb, "target" + File.separator + "standardLB_" + problemId + ".txt");
    //ub is null Utils.writeDoubleArrayToFile(ub, "target" + File.separator   + "standardUB_"+problemId+".txt");
}

From source file:com.joptimizer.algebra.CholeskySparseFactorizationTest.java

/**
 * This test shows that the correct check of the inversion accuracy must be done with
 * the scaled residual, not with the simple norm ||A.x-b||
 *//*from  w w  w. jav  a2  s. c om*/
public void testScaledResidual() throws Exception {
    log.debug("testScaledResidual");

    String matrixId = "1";
    double[][] A = Utils
            .loadDoubleMatrixFromFile("factorization" + File.separator + "matrix" + matrixId + ".csv");
    RealMatrix Q = MatrixUtils.createRealMatrix(A);
    int dim = Q.getRowDimension();

    RealVector b = new ArrayRealVector(new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 });

    CholeskySparseFactorization cs = new CholeskySparseFactorization(new SparseDoubleMatrix2D(Q.getData()));
    cs.factorize();
    RealVector x = new ArrayRealVector(cs.solve(F1.make(b.toArray())).toArray());

    //scaledResidual = ||Ax-b||_oo/( ||A||_oo . ||x||_oo + ||b||_oo )
    // with ||x||_oo = max(x[i])
    double residual = Utils.calculateScaledResidual(A, x.toArray(), b.toArray());
    log.debug("residual: " + residual);
    assertTrue(residual < Utils.getDoubleMachineEpsilon());

    //b - A.x
    //checking the simple norm, this will fail
    double n1 = b.subtract(Q.operate(x)).getNorm();
    log.debug("||b - A.x||: " + n1);
    //assertTrue(n1 < 1.E-8);
}

From source file:com.joptimizer.optimizers.LPStandardConverterTest.java

/**
 * Standardization (to the strictly standard form) of a problem on the form:
 * min(c) s.t./* w  ww.  j a va2 s. co m*/
 * G.x < h
 * A.x = b
 * lb <= x <= ub
 * @TODO: the strict conversion is net yet ready.
 */
public void xxxtestCGhAbLbUb1Strict() throws Exception {
    log.debug("testCGhAbLbUb1Strict");

    String problemId = "1";

    double[] c = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "c" + problemId + ".txt");
    double[][] G = Utils.loadDoubleMatrixFromFile(
            "lp" + File.separator + "standardization" + File.separator + "G" + problemId + ".csv",
            ",".charAt(0));
    double[] h = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "h" + problemId + ".txt");
    ;
    double[][] A = Utils.loadDoubleMatrixFromFile(
            "lp" + File.separator + "standardization" + File.separator + "A" + problemId + ".csv",
            ",".charAt(0));
    double[] b = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "b" + problemId + ".txt");
    double[] lb = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "lb" + problemId + ".txt");
    double[] ub = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "ub" + problemId + ".txt");
    double[] expectedSol = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "sol" + problemId + ".txt");
    double expectedValue = Utils.loadDoubleArrayFromFile(
            "lp" + File.separator + "standardization" + File.separator + "value" + problemId + ".txt")[0];
    double expectedTolerance = MatrixUtils.createRealMatrix(A)
            .operate(MatrixUtils.createRealVector(expectedSol)).subtract(MatrixUtils.createRealVector(b))
            .getNorm();

    int nOfSlackVariables = 0;
    for (int i = 0; i < c.length; i++) {
        double lbi = lb[i];
        int lbCompare = Double.compare(lbi, 0.);
        if (lbCompare != 0 && !Double.isNaN(lbi)) {
            nOfSlackVariables++;
        }
        if (!Double.isNaN(ub[i])) {
            nOfSlackVariables++;
        }
    }
    int expectedS = G.length + nOfSlackVariables;

    //standard form conversion
    boolean strictlyStandardForm = true;
    LPStandardConverter lpConverter = new LPStandardConverter(strictlyStandardForm);
    lpConverter.toStandardForm(c, G, h, A, b, lb, ub);

    int n = lpConverter.getStandardN();
    int s = lpConverter.getStandardS();
    c = lpConverter.getStandardC().toArray();
    A = lpConverter.getStandardA().toArray();
    b = lpConverter.getStandardB().toArray();
    lb = lpConverter.getStandardLB().toArray();
    ub = (lpConverter.getStandardUB() == null) ? null : ub;
    log.debug("n : " + n);
    log.debug("s : " + s);
    log.debug("c : " + ArrayUtils.toString(c));
    log.debug("A : " + ArrayUtils.toString(A));
    log.debug("b : " + ArrayUtils.toString(b));
    log.debug("lb : " + ArrayUtils.toString(lb));
    //log.debug("ub : " + ArrayUtils.toString(ub));

    //check consistency
    assertEquals(expectedS, s);
    assertEquals(lb.length, n);
    assertTrue(ub == null);

    //check constraints
    RealMatrix GOrig = new Array2DRowRealMatrix(G);
    RealVector hOrig = new ArrayRealVector(h);
    RealMatrix AStandard = new Array2DRowRealMatrix(A);
    RealVector bStandard = new ArrayRealVector(b);
    RealVector expectedSolVector = new ArrayRealVector(expectedSol);
    double[] expectedStandardSol = lpConverter.getStandardComponents(expectedSol);
    RealVector expectedStandardSolVector = new ArrayRealVector(expectedStandardSol);

    for (int i = 0; i < expectedStandardSolVector.getDimension(); i++) {
        assertTrue(expectedStandardSolVector.getEntry(i) >= 0.);
    }

    RealVector Axmb = AStandard.operate(expectedStandardSolVector).subtract(bStandard);
    assertEquals(0., Axmb.getNorm(), expectedTolerance);

    Utils.writeDoubleArrayToFile(new double[] { s },
            "target" + File.separator + "standardS_" + problemId + ".txt");
    Utils.writeDoubleArrayToFile(c, "target" + File.separator + "standardC_" + problemId + ".txt");
    Utils.writeDoubleMatrixToFile(A, "target" + File.separator + "standardA_" + problemId + ".csv");
    Utils.writeDoubleArrayToFile(b, "target" + File.separator + "standardB_" + problemId + ".txt");
    Utils.writeDoubleArrayToFile(lb, "target" + File.separator + "standardLB_" + problemId + ".txt");
    //ub is null Utils.writeDoubleArrayToFile(ub, "target" + File.separator   + "standardUB_"+problemId+".txt");
}

From source file:lirmm.inria.fr.math.BigSparseRealMatrixTest.java

/**
 * test issue MATH-209/* www .j  a  v a 2s .  c o m*/
 */
@Test
public void testMath209() {
    RealMatrix a = new BigSparseRealMatrix(new double[][] { { 1, 2 }, { 3, 4 }, { 5, 6 } });
    double[] b = a.operate(new double[] { 1, 1 });
    Assert.assertEquals(a.getRowDimension(), b.length);
    Assert.assertEquals(3.0, b[0], 1.0e-12);
    Assert.assertEquals(7.0, b[1], 1.0e-12);
    Assert.assertEquals(11.0, b[2], 1.0e-12);
}

From source file:lirmm.inria.fr.math.BigSparseRealMatrixTest.java

/**
 * test operate//w  w  w .ja  va  2s  .co m
 */
@Test
public void testOperate() {
    RealMatrix m = new BigSparseRealMatrix(id);
    TestUtils.assertEquals("identity operate", testVector, m.operate(testVector), entryTolerance);
    TestUtils.assertEquals("identity operate", testVector, m.operate(new ArrayRealVector(testVector)).toArray(),
            entryTolerance);
    m = new BigSparseRealMatrix(bigSingular);
    try {
        m.operate(testVector);
        Assert.fail("Expecting illegalArgumentException");
    } catch (MathIllegalArgumentException ex) {
        // ignored
    }
}

From source file:com.datumbox.framework.machinelearning.regression.MatrixLinearRegression.java

@Override
protected void estimateModelParameters(Dataset trainingData) {

    ModelParameters modelParameters = knowledgeBase.getModelParameters();

    int n = trainingData.size();
    int d = trainingData.getColumnSize() + 1;//plus one for the constant

    //initialization
    modelParameters.setN(n);//from  ww  w  . ja v a2 s .  c  om
    modelParameters.setD(d);

    Map<Object, Double> thitas = modelParameters.getThitas();
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();

    MatrixDataset matrixDataset = MatrixDataset.newInstance(trainingData, true, featureIds);

    RealVector Y = matrixDataset.getY();
    RealMatrix X = matrixDataset.getX();

    //(X'X)^-1
    RealMatrix Xt = X.transpose();
    LUDecomposition lud = new LUDecomposition(Xt.multiply(X));
    RealMatrix XtXinv = lud.getSolver().getInverse();
    lud = null;

    //(X'X)^-1 * X'Y
    RealVector coefficients = XtXinv.multiply(Xt).operate(Y);
    Xt = null;

    //put the features coefficients in the thita map
    thitas.put(Dataset.constantColumnName, coefficients.getEntry(0));
    for (Map.Entry<Object, Integer> entry : featureIds.entrySet()) {
        Object feature = entry.getKey();
        Integer featureId = entry.getValue();

        thitas.put(feature, coefficients.getEntry(featureId));
    }

    if (knowledgeBase.getTrainingParameters().getCalculatePvalue()) { //calculate them only if 
        //get the predictions and subtact the Y vector. Sum the squared differences to get the error
        double SSE = 0.0;
        for (double v : X.operate(coefficients).subtract(Y).toArray()) {
            SSE += v * v;
        }
        Y = null;

        //standard error matrix
        double MSE = SSE / (n - d); //mean square error = SSE / dfResidual
        RealMatrix SE = XtXinv.scalarMultiply(MSE);
        XtXinv = null;

        //creating a flipped map of ids to features
        Map<Integer, Object> idsFeatures = PHPfunctions.array_flip(featureIds);

        Map<Object, Double> pvalues = new HashMap<>(); //This is not small, but it does not make sense to store it in the db
        for (int i = 0; i < d; ++i) {
            double error = SE.getEntry(i, i);
            Object feature = idsFeatures.get(i);
            if (error <= 0.0) {
                //double tstat = Double.MAX_VALUE;
                pvalues.put(feature, 0.0);
            } else {
                double tstat = coefficients.getEntry(i) / Math.sqrt(error);
                pvalues.put(feature, 1.0 - ContinuousDistributions.StudentsCdf(tstat, n - d)); //n-d degrees of freedom
            }
        }
        SE = null;
        coefficients = null;
        idsFeatures = null;
        matrixDataset = null;

        modelParameters.setFeaturePvalues(pvalues);
    }
}

From source file:com.datumbox.framework.core.machinelearning.regression.MatrixLinearRegression.java

/** {@inheritDoc} */
@Override/*from  w w  w . j a va2s  .c  o  m*/
protected void _fit(Dataframe trainingData) {
    ModelParameters modelParameters = knowledgeBase.getModelParameters();
    int n = trainingData.size();
    int d = trainingData.xColumnSize();

    Map<Object, Double> thitas = modelParameters.getThitas();
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();
    //Map<Integer, Integer> recordIdsReference = null;
    DataframeMatrix matrixDataset = DataframeMatrix.newInstance(trainingData, true, null, featureIds);

    RealVector Y = matrixDataset.getY();
    RealMatrix X = matrixDataset.getX();

    //(X'X)^-1
    RealMatrix Xt = X.transpose();
    LUDecomposition lud = new LUDecomposition(Xt.multiply(X));
    //W = (X'X)^-1 * X'Y
    RealMatrix XtXinv = lud.getSolver().getInverse();
    RealVector coefficients = XtXinv.multiply(Xt).operate(Y);
    // instead of matrix inversion calculate (X'X) * W = X'Y
    //RealVector coefficients = lud.getSolver().solve(Xt.operate(Y));
    //lud =null;

    //Xt = null;

    //put the features coefficients in the thita map
    thitas.put(Dataframe.COLUMN_NAME_CONSTANT, coefficients.getEntry(0));
    for (Map.Entry<Object, Integer> entry : featureIds.entrySet()) {
        Object feature = entry.getKey();
        Integer featureId = entry.getValue();

        thitas.put(feature, coefficients.getEntry(featureId));
    }

    //get the predictions and subtact the Y vector. Sum the squared differences to get the error
    double SSE = 0.0;
    for (double v : X.operate(coefficients).subtract(Y).toArray()) {
        SSE += v * v;
    }
    //Y = null;

    //standard error matrix
    double MSE = SSE / (n - (d + 1)); //mean square error = SSE / dfResidual
    RealMatrix SE = XtXinv.scalarMultiply(MSE);
    //XtXinv = null;

    //creating a flipped map of ids to features
    Map<Integer, Object> idsFeatures = PHPMethods.array_flip(featureIds);

    Map<Object, Double> pvalues = new HashMap<>(); //This is not small, but it does not make sense to store it in the storage
    for (int i = 0; i < (d + 1); ++i) {
        double error = SE.getEntry(i, i);
        Object feature = idsFeatures.get(i);
        if (error <= 0.0) {
            //double tstat = Double.MAX_VALUE;
            pvalues.put(feature, 0.0);
        } else {
            double tstat = coefficients.getEntry(i) / Math.sqrt(error);
            pvalues.put(feature, 1.0 - ContinuousDistributions.studentsCdf(tstat, n - (d + 1))); //n-d degrees of freedom
        }
    }
    //SE=null;
    //coefficients=null;
    //idsFeatures=null;
    //matrixDataset = null;

    modelParameters.setFeaturePvalues(pvalues);

}

From source file:com.joptimizer.optimizers.LPPrimalDualMethodTest.java

/**
 * Problem in the form//  w ww  . j a  va  2 s .co m
 * min(c.x) s.t.
 * G.x < h
 * The objective function of the presolved problem has a 0-gradient.
 */
public void testCGh4() throws Exception {
    log.debug("testCGh4");

    String problemId = "4";

    //the original problem: ok until precision 1.E-7
    double[] c = Utils.loadDoubleArrayFromFile("lp" + File.separator + "c" + problemId + ".txt");
    double[][] G = Utils.loadDoubleMatrixFromFile("lp" + File.separator + "G" + problemId + ".csv",
            ",".charAt(0));
    double[] h = Utils.loadDoubleArrayFromFile("lp" + File.separator + "h" + problemId + ".txt");
    ;
    double[] expectedSol = Utils.loadDoubleArrayFromFile("lp" + File.separator + "sol" + problemId + ".txt");
    double expectedValue = Utils
            .loadDoubleArrayFromFile("lp" + File.separator + "value" + problemId + ".txt")[0];

    //double norm = MatrixUtils.createRealMatrix(A).operate(MatrixUtils.createRealVector(expectedSol)).subtract(MatrixUtils.createRealVector(b)).getNorm();
    //assertTrue(norm < 1.e-10);

    LPOptimizationRequest or = new LPOptimizationRequest();
    or.setC(c);
    or.setG(G);
    or.setH(h);
    or.setCheckKKTSolutionAccuracy(true);
    or.setToleranceKKT(1.E-7);
    or.setToleranceFeas(1.E-6);
    or.setTolerance(1.E-5);
    or.setDumpProblem(true);
    or.setRescalingDisabled(true);//this fails with false

    //optimization
    //LPPrimalDualMethodOLD opt = new LPPrimalDualMethodOLD();
    LPPrimalDualMethod opt = new LPPrimalDualMethod();

    opt.setLPOptimizationRequest(or);
    int returnCode = opt.optimize();

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    LPOptimizationResponse response = opt.getLPOptimizationResponse();
    double[] sol = response.getSolution();
    RealVector cVector = new ArrayRealVector(c);
    RealVector solVector = new ArrayRealVector(sol);
    double value = cVector.dotProduct(solVector);
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + value);

    //check constraints
    RealVector x = MatrixUtils.createRealVector(sol);
    RealMatrix GMatrix = MatrixUtils.createRealMatrix(G);
    RealVector hvector = MatrixUtils.createRealVector(h);
    RealVector Gxh = GMatrix.operate(x).subtract(hvector);
    for (int i = 0; i < Gxh.getDimension(); i++) {
        assertTrue(Gxh.getEntry(i) <= 0);//not strictly because some constraint has been treated as a bound
    }
    //check value
    assertEquals(expectedValue, value, or.getTolerance());

}

From source file:com.joptimizer.optimizers.LPPrimalDualMethodTest.java

/**
 * Problem in the form//from   w w w .j  a  va 2  s.  co  m
 * min(c.x) s.t.
 * G.x < h
 * A.x = b
 * 
 * This is the same as testCGhAbLbUb2, but lb and ub are into G.
 * The presolved problem has a deterministic solution, that is, all the variables have a fixed value.
 */
public void testCGhAb3() throws Exception {
    log.debug("testCGhAb3");

    String problemId = "3";

    log.debug("problemId: " + problemId);
    double[] c = Utils.loadDoubleArrayFromFile("lp" + File.separator + "c" + problemId + ".txt");
    double[][] G = Utils.loadDoubleMatrixFromFile("lp" + File.separator + "G" + problemId + ".csv",
            ",".charAt(0));
    double[] h = Utils.loadDoubleArrayFromFile("lp" + File.separator + "h" + problemId + ".txt");
    ;
    double[][] A = Utils.loadDoubleMatrixFromFile("lp" + File.separator + "A" + problemId + ".csv",
            ",".charAt(0));
    double[] b = Utils.loadDoubleArrayFromFile("lp" + File.separator + "b" + problemId + ".txt");
    double[] expectedSol = Utils.loadDoubleArrayFromFile("lp" + File.separator + "sol" + problemId + ".txt");
    double expectedvalue = Utils
            .loadDoubleArrayFromFile("lp" + File.separator + "value" + problemId + ".txt")[0];
    double minLb = 0;
    double maxUb = 1.0E15;//it is so high because of the very high values of the elements of h

    LPOptimizationRequest or = new LPOptimizationRequest();
    or.setC(c);
    or.setG(G);
    or.setH(h);
    or.setA(A);
    or.setB(b);
    or.setCheckKKTSolutionAccuracy(true);
    //or.setToleranceKKT(1.e-5);
    //      or.setToleranceFeas(5.E-5);
    //      or.setTolerance(1.E-7);
    or.setDumpProblem(true);
    //or.setPresolvingDisabled(true);

    //optimization
    LPPrimalDualMethod opt = new LPPrimalDualMethod(minLb, maxUb);

    opt.setLPOptimizationRequest(or);
    int returnCode = opt.optimize();

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    LPOptimizationResponse response = opt.getLPOptimizationResponse();
    double[] sol = response.getSolution();
    RealVector cVector = new ArrayRealVector(c);
    RealVector solVector = new ArrayRealVector(sol);
    double value = cVector.dotProduct(solVector);
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + value);

    //check constraints
    RealVector x = MatrixUtils.createRealVector(sol);
    RealMatrix GMatrix = MatrixUtils.createRealMatrix(G);
    RealVector hvector = MatrixUtils.createRealVector(h);
    RealMatrix AMatrix = MatrixUtils.createRealMatrix(A);
    RealVector bvector = MatrixUtils.createRealVector(b);
    RealVector Gxh = GMatrix.operate(x).subtract(hvector);
    for (int i = 0; i < Gxh.getDimension(); i++) {
        assertTrue(Gxh.getEntry(i) <= 0);
    }
    RealVector Axb = AMatrix.operate(x).subtract(bvector);
    assertEquals(0., Axb.getNorm(), or.getToleranceFeas());

    assertEquals(expectedSol.length, sol.length);
    //      for(int i=0; i<sol.length; i++){
    //         assertEquals(expectedSol[0], sol[0], or.getTolerance());
    //      }
    assertEquals(expectedvalue, value, or.getTolerance());

}

From source file:com.joptimizer.optimizers.LPPrimalDualMethodTest.java

/**
 * Problem in the form/*from www .j av a 2  s.  c  o  m*/
 * min(c.x) s.t.
 * G.x < h
 * A.x = b
 * 
 * This is a good for testing with a small size problem.
 * Submitted 01/09/2013 by Chris Myers.
 * 
 * @see JOptimizerTest#testCGhAb1()
 */
public void testCGhAb1() throws Exception {
    log.debug("testCGhAb1");

    String problemId = "1";

    //the original problem: ok until precision 1.E-7
    double[] c = Utils.loadDoubleArrayFromFile("lp" + File.separator + "c" + problemId + ".txt");
    double[][] G = Utils.loadDoubleMatrixFromFile("lp" + File.separator + "G" + problemId + ".csv",
            ",".charAt(0));
    double[] h = Utils.loadDoubleArrayFromFile("lp" + File.separator + "h" + problemId + ".txt");
    ;
    double[][] A = Utils.loadDoubleMatrixFromFile("lp" + File.separator + "A" + problemId + ".csv",
            ",".charAt(0));
    double[] b = Utils.loadDoubleArrayFromFile("lp" + File.separator + "b" + problemId + ".txt");
    double[] expectedSol = Utils.loadDoubleArrayFromFile("lp" + File.separator + "sol" + problemId + ".txt");
    double expectedvalue = Utils
            .loadDoubleArrayFromFile("lp" + File.separator + "value" + problemId + ".txt")[0];

    //double norm = MatrixUtils.createRealMatrix(A).operate(MatrixUtils.createRealVector(expectedSol)).subtract(MatrixUtils.createRealVector(b)).getNorm();
    //assertTrue(norm < 1.e-10);

    LPOptimizationRequest or = new LPOptimizationRequest();
    or.setC(c);
    or.setG(G);
    or.setH(h);
    or.setA(A);
    or.setB(b);
    or.setCheckKKTSolutionAccuracy(true);
    or.setToleranceKKT(1.E-7);
    or.setToleranceFeas(1.E-7);
    or.setTolerance(1.E-7);
    or.setDumpProblem(true);
    or.setAlpha(0.75);
    or.setInitialPoint(new double[] { 0.9999998735888544, -999.0000001264111, 1000.0, 0.9999998735888544, 0.0,
            -999.0000001264111, 0.9999999661257591, 0.9999998735888544, 1000.0, 0.0, 0.9999998735888544, 0.0,
            0.9999998735888544, 0.9999998735888544, 0.9999998735888544, 0.0, 0.0, 0.9999998735888544, -1000.0,
            0.9999999198573067, 9.253690467190285E-8, 1000.0, -999.0000001264111, 0.9999998735888544, -1000.0,
            -1000.0 });

    //optimization
    //LPPrimalDualMethodOLD opt = new LPPrimalDualMethodOLD();
    LPPrimalDualMethod opt = new LPPrimalDualMethod();

    opt.setLPOptimizationRequest(or);
    int returnCode = opt.optimize();

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    LPOptimizationResponse response = opt.getLPOptimizationResponse();
    double[] sol = response.getSolution();
    RealVector cVector = new ArrayRealVector(c);
    RealVector solVector = new ArrayRealVector(sol);
    double value = cVector.dotProduct(solVector);
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + value);

    //check constraints
    RealVector x = MatrixUtils.createRealVector(sol);
    RealMatrix GMatrix = MatrixUtils.createRealMatrix(G);
    RealVector hvector = MatrixUtils.createRealVector(h);
    RealMatrix AMatrix = MatrixUtils.createRealMatrix(A);
    RealVector bvector = MatrixUtils.createRealVector(b);
    RealVector Gxh = GMatrix.operate(x).subtract(hvector);
    for (int i = 0; i < Gxh.getDimension(); i++) {
        assertTrue(Gxh.getEntry(i) <= 0);//not strictly because some constraint has been treated as a bound
    }
    RealVector Axb = AMatrix.operate(x).subtract(bvector);
    assertEquals(0., Axb.getNorm(), or.getToleranceFeas());

    //check value
    assertEquals(expectedvalue, value, or.getTolerance());

}