Example usage for org.apache.commons.math3.linear RealVector toArray

List of usage examples for org.apache.commons.math3.linear RealVector toArray

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector toArray.

Prototype

public double[] toArray() 

Source Link

Document

Convert the vector to an array of double s.

Usage

From source file:com.joptimizer.optimizers.NewtonLEConstrainedISPTest.java

public void testOptimize1() throws Exception {
    log.debug("testOptimize1");

    // START SNIPPET: NewtonLEConstrainedISP-1

    //commons-math client code
    RealMatrix Pmatrix = new Array2DRowRealMatrix(
            new double[][] { { 1.68, 0.34, 0.38 }, { 0.34, 3.09, -1.59 }, { 0.38, -1.59, 1.54 } });
    RealVector qVector = new ArrayRealVector(new double[] { 0.018, 0.025, 0.01 });

    // Objective function
    double theta = 0.01522;
    RealMatrix P = Pmatrix.scalarMultiply(theta);
    RealVector q = qVector.mapMultiply(-1);
    PDQuadraticMultivariateRealFunction objectiveFunction = new PDQuadraticMultivariateRealFunction(P.getData(),
            q.toArray(), 0);

    OptimizationRequest or = new OptimizationRequest();
    or.setF0(objectiveFunction);/*from   w  w w.  j a  va 2s.co  m*/
    or.setInitialPoint(new double[] { 0.1, 0.1, 0.1 });//LE-infeasible starting point
    or.setA(new double[][] { { 1, 1, 1 } });
    or.setB(new double[] { 1 });

    // optimization
    NewtonLEConstrainedISP opt = new NewtonLEConstrainedISP();
    opt.setOptimizationRequest(or);
    int returnCode = opt.optimize();

    // END SNIPPET: NewtonLEConstrainedISP-1

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    OptimizationResponse response = opt.getOptimizationResponse();
    double[] sol = response.getSolution();
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + objectiveFunction.value(sol));
    assertEquals(0.04632311555988555, sol[0], 0.000000000000001);
    assertEquals(0.5086308460954377, sol[1], 0.000000000000001);
    assertEquals(0.44504603834467693, sol[2], 0.000000000000001);
}

From source file:com.joptimizer.optimizers.NewtonUnconstrainedTest.java

/**
 * Quadratic objective./* w  w w . j a v a  2s.  c  om*/
 */
public void testOptimize() throws Exception {
    log.debug("testOptimize");
    // START SNIPPET: newtonUnconstrained-1

    RealMatrix PMatrix = new Array2DRowRealMatrix(
            new double[][] { { 1.68, 0.34, 0.38 }, { 0.34, 3.09, -1.59 }, { 0.38, -1.59, 1.54 } });
    RealVector qVector = new ArrayRealVector(new double[] { 0.018, 0.025, 0.01 });

    // Objective function.
    double theta = 0.01522;
    RealMatrix P = PMatrix.scalarMultiply(theta);
    RealVector q = qVector.mapMultiply(-1);
    PDQuadraticMultivariateRealFunction objectiveFunction = new PDQuadraticMultivariateRealFunction(P.getData(),
            q.toArray(), 0);

    OptimizationRequest or = new OptimizationRequest();
    or.setF0(objectiveFunction);
    or.setInitialPoint(new double[] { 0.04, 0.50, 0.46 });
    or.setTolerance(1.e-8);

    //optimization
    NewtonUnconstrained opt = new NewtonUnconstrained();
    opt.setOptimizationRequest(or);
    int returnCode = opt.optimize();

    // END SNIPPET: newtonUnconstrained-1

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    OptimizationResponse response = opt.getOptimizationResponse();
    double[] sol = response.getSolution();
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + objectiveFunction.value(sol));

    // we know the analytic solution of the problem
    // sol = -PInv * q
    CholeskyDecomposition cFact = new CholeskyDecomposition(P);
    RealVector benchSol = cFact.getSolver().solve(q).mapMultiply(-1);
    log.debug("benchSol   : " + ArrayUtils.toString(benchSol.toArray()));
    log.debug("benchValue : " + objectiveFunction.value(benchSol.toArray()));

    assertEquals(benchSol.getEntry(0), sol[0], 0.00000000000001);
    assertEquals(benchSol.getEntry(1), sol[1], 0.00000000000001);
    assertEquals(benchSol.getEntry(2), sol[2], 0.00000000000001);
}

From source file:game.plugins.algorithms.RealFeaturesTree.java

protected double information(Dataset dataset) {
    RealVector prob = getProbabilities(dataset);
    double info = 0;
    for (double p : prob.toArray())
        if (p > 0)
            info -= p * Utils.log2(p);/*from w  w  w.  java2 s  .co  m*/
    return info;
}

From source file:gamlss.utilities.WLSMultipleLinearRegression.java

/**
* {@inheritDoc}//from   w  w w  .j  a v  a2 s .  co m
* return null if not applicable
*/
@Override
public double[] estimateResiduals() {
    if (this.copyOriginal) {
        RealVector e = this.y.subtract(this.X.operate(beta));
        return e.toArray();
    }
    return null;
}

From source file:eagle.security.userprofile.model.eigen.UserProfileEigenModeler.java

private void computeStats(RealMatrix m) {

    if (m.getColumnDimension() != this.cmdTypes.length) {
        LOG.error("Please fix the commands list in config file");
        return;//from   w ww .ja v a2s. c  om
    }
    statistics = new UserCommandStatistics[m.getColumnDimension()];
    for (int i = 0; i < m.getColumnDimension(); i++) {
        UserCommandStatistics stats = new UserCommandStatistics();
        stats.setCommandName(this.cmdTypes[i]);
        RealVector colData = m.getColumnVector(i);
        StandardDeviation deviation = new StandardDeviation();
        double stddev = deviation.evaluate(colData.toArray());
        //LOG.info("stddev is nan?" + (stddev == Double.NaN? "yes":"no"));
        if (stddev <= lowVarianceVal)
            stats.setLowVariant(true);
        else
            stats.setLowVariant(false);
        stats.setStddev(stddev);
        Mean mean = new Mean();
        double mu = mean.evaluate(colData.toArray());
        //LOG.info("mu is nan?" + (mu == Double.NaN? "yes":"no"));
        stats.setMean(mu);
        statistics[i] = stats;
    }
}

From source file:com.joptimizer.algebra.CholeskySparseFactorizationTest.java

/**
 * Tests vector and matrix solve method.
 *///  w ww  . ja  v  a 2 s . c  om
public void testFromFile3() throws Exception {
    log.debug("testFromFile3");
    String matrixId = "3";
    double[][] G = Utils
            .loadDoubleMatrixFromFile("factorization" + File.separator + "matrix" + matrixId + ".csv");
    RealMatrix Q = MatrixUtils.createRealMatrix(G);
    int dim = Q.getRowDimension();

    CholeskySparseFactorization myc = new CholeskySparseFactorization(new SparseDoubleMatrix2D(G));
    myc.factorize();

    //solve for a vector
    RealVector b = new ArrayRealVector(Utils.randomValuesVector(dim, -0.5, 0.5, new Long(dim)).toArray());
    RealVector x = new ArrayRealVector(myc.solve(F1.make(b.toArray())).toArray());

    //b - Q.x
    double n1 = b.subtract(Q.operate(x)).getNorm();
    double sr1 = Utils.calculateScaledResidual(G, x.toArray(), b.toArray());
    log.debug("||b - Q.x||: " + n1);
    log.debug("scaled res : " + sr1);
    assertTrue(n1 < 1.E-8);
    assertTrue(sr1 < Utils.getDoubleMachineEpsilon());

    //solve for a matrix
    RealMatrix B = new Array2DRowRealMatrix(
            Utils.randomValuesMatrix(dim, 10, -0.5, 0.5, new Long(dim)).toArray());
    RealMatrix X = new Array2DRowRealMatrix(myc.solve(F2.make(B.getData())).toArray());

    //B - Q.X
    double n2 = B.subtract(Q.multiply(X)).getNorm();
    double sr2 = Utils.calculateScaledResidual(G, X.getData(), B.getData());
    log.debug("||B - Q.X||: " + n2);
    log.debug("scaled res : " + sr2);
    assertTrue(n2 < 1.E-8);
    assertTrue(sr2 < Utils.getDoubleMachineEpsilon());
}

From source file:com.joptimizer.algebra.CholeskyFactorizationTest.java

/**
 * This test shows that the correct check of the inversion accuracy must be done with
 * the scaled residual, not with the simple norm ||A.x-b||
 *///from w  w  w.  ja  v a 2 s . com
public void testScaledResidual() throws Exception {
    log.debug("testScaledResidual");

    String matrixId = "1";
    double[][] A = Utils
            .loadDoubleMatrixFromFile("factorization" + File.separator + "matrix" + matrixId + ".csv");
    RealMatrix Q = MatrixUtils.createRealMatrix(A);
    int dim = Q.getRowDimension();

    RealVector b = new ArrayRealVector(new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 });

    CholeskyFactorization cs = new CholeskyFactorization(DoubleFactory2D.dense.make(Q.getData()));
    cs.factorize();
    RealVector x = new ArrayRealVector(cs.solve(DoubleFactory1D.dense.make(b.toArray())).toArray());

    //scaledResidual = ||Ax-b||_oo/( ||A||_oo . ||x||_oo + ||b||_oo )
    // with ||x||_oo = max(x[i])
    double scaledResidual = Utils.calculateScaledResidual(Q.getData(), x.toArray(), b.toArray());
    log.debug("scaledResidual: " + scaledResidual);
    assertTrue(scaledResidual < Utils.getDoubleMachineEpsilon());

    //b - A.x
    //checking the simple norm, this will fail
    double n1 = b.subtract(Q.operate(x)).getNorm();
    log.debug("||b - A.x||: " + n1);
    //assertTrue(n1 < 1.E-8);
}

From source file:com.joptimizer.functions.SOCPLogarithmicBarrier.java

/**
 * Create the barrier function for the Phase I.
 * It is an instance of this class for the constraints: 
 * <br>||Ai.x+bi|| < ci.x+di+t, i=1,...,m
 * @see "S.Boyd and L.Vandenberghe, Convex Optimization, 11.6.2"
 *///from   ww  w. j a  v a 2  s.  c  om
public BarrierFunction createPhase1BarrierFunction() {

    final int dimPh1 = dim + 1;
    List<SOCPConstraintParameters> socpConstraintParametersPh1List = new ArrayList<SOCPConstraintParameters>();
    SOCPLogarithmicBarrier bfPh1 = new SOCPLogarithmicBarrier(socpConstraintParametersPh1List, dimPh1);

    for (int i = 0; i < socpConstraintParametersList.size(); i++) {
        SOCPConstraintParameters param = socpConstraintParametersList.get(i);
        RealMatrix A = param.getA();
        RealVector b = param.getB();
        RealVector c = param.getC();
        double d = param.getD();

        RealMatrix APh1 = MatrixUtils.createRealMatrix(A.getRowDimension(), dimPh1);
        APh1.setSubMatrix(A.getData(), 0, 0);
        RealVector bPh1 = b;
        RealVector cPh1 = new ArrayRealVector(c.getDimension() + 1);
        cPh1.setSubVector(0, c);
        cPh1.setEntry(c.getDimension(), 1);
        double dPh1 = d;

        SOCPConstraintParameters paramsPh1 = new SOCPConstraintParameters(APh1.getData(), bPh1.toArray(),
                cPh1.toArray(), dPh1);
        socpConstraintParametersPh1List.add(socpConstraintParametersPh1List.size(), paramsPh1);
    }

    return bfPh1;
}

From source file:eu.crisis_economics.abm.markets.clearing.heterogeneous.NelderMeadClearingAlgorithm.java

@Override
public double applyToNetwork(final MixedClearingNetwork network) {
    Preconditions.checkNotNull(network);
    final SimplexOptimizer optimizer = new SimplexOptimizer(relErrorTarget, absErrorTarget);

    final ResidualCostFunction aggregateCostFunction = super.getResidualScalarCostFunction(network);
    final RealVector start = new ArrayRealVector(network.getNumberOfEdges());
    for (int i = 0; i < network.getNumberOfEdges(); ++i)
        start.setEntry(i, network.getEdges().get(i).getMaximumRateAdmissibleByBothParties());
    start.set(1.);//from  w  ww .  j  ava 2s .c o m

    final PointValuePair result = optimizer.optimize(new MaxEval(maximumEvaluations),
            new ObjectiveFunction(aggregateCostFunction), GoalType.MINIMIZE, new InitialGuess(start.toArray()),
            new NelderMeadSimplex(network.getNumberOfEdges()));

    final double residualCost = result.getValue();
    System.out.println("Network cleared: residual cost: " + residualCost + ".");

    return residualCost;
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorMarginalCase2() {
    Scope variables = newScope(new ContinuousVariable("A"));

    GaussianFactor aMarginal = abcFactor.marginal(variables);

    // then/*w w  w  .j a v a2  s .com*/
    Collection<Variable> newVariables = aMarginal.getVariables().getVariables();
    assertThat(newVariables).hasSize(1);
    assertThat(newVariables).contains(new ContinuousVariable("A"));

    // precision matrix: K_xx - K_xy * K_yy^(-1) * K_yx
    RealMatrix precisionMatrix = aMarginal.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(1);

    double precision = precisionMatrix.getRowVector(0).toArray()[0];
    assertThat(precision).isEqualTo(-(14.0d / 3.0d), TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector: h_x - K_xy * K_yy^(-1) * h_y
    RealVector scaledMeanVector = aMarginal.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(1);

    double meanValue = scaledMeanVector.toArray()[0];
    assertThat(meanValue).isEqualTo(4.0d / 3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // normalization constant: g + 0.5 * (log( det( 2 * PI * K_yy^(-1))) + h_y * K_yy^(-1) * h_y)
    assertThat(aMarginal.getNormalizationConstant()).isEqualTo(9.324590408d,
            TestConstants.DOUBLE_VALUE_TOLERANCE);
}