Example usage for org.apache.commons.math3.linear RealMatrix scalarMultiply

List of usage examples for org.apache.commons.math3.linear RealMatrix scalarMultiply

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix scalarMultiply.

Prototype

RealMatrix scalarMultiply(double d);

Source Link

Document

Returns the result of multiplying each entry of this by d .

Usage

From source file:edu.cudenver.bios.power.glmm.GLMMTestPillaiBartlett.java

/**
 * Compute a Pillai Bartlett Trace statistic
 * //www  .j  a  va2 s  .c o m
 * @param H hypothesis sum of squares matrix
 * @param E error sum of squares matrix
 * @returns F statistic
 */
private double getPillaiBartlettTrace(RealMatrix H, RealMatrix E) {
    if (!H.isSquare() || !E.isSquare() || H.getColumnDimension() != E.getRowDimension())
        throw new IllegalArgumentException(
                "Failed to compute Pillai-Bartlett Trace: hypothesis and error matrices must be square and same dimensions");

    double a = C.getRowDimension();
    double b = U.getColumnDimension();
    double s = (a < b) ? a : b;
    double p = beta.getColumnDimension();

    RealMatrix adjustedH = H;
    if ((s == 1 && p > 1) || fMethod == FApproximation.PILLAI_ONE_MOMENT_OMEGA_MULT
            || fMethod == FApproximation.MULLER_TWO_MOMENT_OMEGA_MULT) {
        adjustedH = H.scalarMultiply(((double) (totalN - rank) / (double) totalN));
    }

    RealMatrix T = adjustedH.add(E);
    RealMatrix inverseT = new LUDecomposition(T).getSolver().getInverse();

    RealMatrix HinverseT = adjustedH.multiply(inverseT);

    return HinverseT.getTrace();
}

From source file:edu.cudenver.bios.power.glmm.GLMMTestWilksLambda.java

/**
 * Compute a Wilks Lamba statistic/*from  w w w  . j a  v  a2 s.c om*/
 * 
 * @param H hypothesis sum of squares matrix
 * @param E error sum of squares matrix
 * @returns F statistic
 */
private double getWilksLambda(RealMatrix H, RealMatrix E, DistributionType type)
        throws IllegalArgumentException {
    if (!H.isSquare() || !E.isSquare() || H.getColumnDimension() != E.getRowDimension())
        throw new IllegalArgumentException(
                "Failed to compute Wilks Lambda: hypothesis and error matrices must be square and same dimensions");

    double a = C.getRowDimension();
    double b = U.getColumnDimension();
    double s = (a < b) ? a : b;
    double p = beta.getColumnDimension();

    RealMatrix adjustedH = H;
    if (type != DistributionType.DATA_ANALYSIS_NULL
            && ((s == 1 && p > 1) || fMethod == FApproximation.RAO_TWO_MOMENT_OMEGA_MULT)) {
        adjustedH = H.scalarMultiply((totalN - rank) / totalN);
    }

    RealMatrix T = adjustedH.add(E);
    RealMatrix inverseT = new LUDecomposition(T).getSolver().getInverse();

    RealMatrix EinverseT = E.multiply(inverseT);

    double lambda = new LUDecomposition(EinverseT).getDeterminant();
    return lambda;
}

From source file:com.winterwell.maths.stats.algorithms.KalmanFilterTest.java

@Test
public void testConstantAcceleration() {
    // simulates a vehicle, accelerating at a constant rate (0.1 m/s)

    // discrete time interval
    double dt = 0.1d;
    // position measurement noise (meter)
    double measurementNoise = 10d;
    // acceleration noise (meter/sec^2)
    double accelNoise = 0.2d;

    // A = [ 1 dt ]
    //     [ 0  1 ]
    RealMatrix A = new Array2DRowRealMatrix(new double[][] { { 1, dt }, { 0, 1 } });

    // B = [ dt^2/2 ]
    //     [ dt     ]
    RealMatrix Bnull = new Array2DRowRealMatrix(new double[][] { { FastMath.pow(dt, 2d) / 2d }, { dt } });

    // H = [ 1 0 ]
    RealMatrix H = new Array2DRowRealMatrix(new double[][] { { 1d, 0d } });

    // x = [ 0 0 ]
    RealVector x = new ArrayRealVector(new double[] { 0, 0 });

    RealMatrix tmp = new Array2DRowRealMatrix(
            new double[][] { { FastMath.pow(dt, 4d) / 4d, FastMath.pow(dt, 3d) / 2d },
                    { FastMath.pow(dt, 3d) / 2d, FastMath.pow(dt, 2d) } });

    // Q = [ dt^4/4 dt^3/2 ]
    //     [ dt^3/2 dt^2   ]
    RealMatrix Q = tmp.scalarMultiply(FastMath.pow(accelNoise, 2));

    // P0 = [ 1 1 ]
    //      [ 1 1 ]
    RealMatrix P0 = new Array2DRowRealMatrix(new double[][] { { 1, 1 }, { 1, 1 } });

    // R = [ measurementNoise^2 ]
    RealMatrix R = new Array2DRowRealMatrix(new double[] { FastMath.pow(measurementNoise, 2) });

    // constant control input, increase velocity by 0.1 m/s per cycle
    double uv = 0.1d * dt;
    RealVector u = new ArrayRealVector(new double[] { uv * uv / 2, uv });

    ProcessModel pm = new DefaultProcessModel(A, Bnull, Q, x, P0);
    MeasurementModel mm = new DefaultMeasurementModel(H, R);
    KalmanFilter filter = new KalmanFilter(pm, mm);

    Assert.assertEquals(1, filter.getMeasurementDimension());
    Assert.assertEquals(2, filter.getStateDimension());

    Gaussian state = new Gaussian(mtj(x), mtj(P0));
    MatrixUtils.equals(mtj(P0), state.getCovar());

    // check the initial state
    double[] expectedInitialState = new double[] { 0.0, 0.0 };
    //        assertVectorEquals(expectedInitialState, filter.getStateEstimation());

    RandomGenerator rand = new JDKRandomGenerator();

    RealVector tmpPNoise = new ArrayRealVector(new double[] { FastMath.pow(dt, 2d) / 2d, dt });

    // iterate 60 steps
    for (int i = 0; i < 60; i++) {
        state = filter.predict(state, mtj(u));

        // Simulate the process
        RealVector pNoise = tmpPNoise.mapMultiply(accelNoise * rand.nextGaussian());

        // x = A * x + B * u + pNoise
        x = A.operate(x).add(u).add(pNoise);

        // Simulate the measurement
        double mNoise = measurementNoise * rand.nextGaussian();

        // z = H * x + m_noise
        RealVector z = H.operate(x).mapAdd(mNoise);

        state = filter.correct(state, mtj(z));

        // state estimate shouldn't be larger than the measurement noise
        double diff = FastMath.abs(x.getEntry(0) - state.getMean().get(0));
        Assert.assertTrue(Precision.compareTo(diff, measurementNoise, 1e-6) < 0);
    }//from w ww .  j  a  v a2 s . com

    // error covariance of the velocity should be already very low (< 0.1)
    Assert.assertTrue(Precision.compareTo(state.getCovar().get(1, 1), 0.1d, 1e-6) < 0);
}

From source file:edu.oregonstate.eecs.mcplan.ml.GaussianMixtureModel.java

@Override
public void run() {
    init();/*w  w  w.j  ava2 s . co  m*/
    System.out.println("Init");
    for (int i = 0; i < mu().length; ++i) {
        System.out.println("Mu " + i + ": " + mu()[i]);
        System.out.println("Sigma " + i + ": " + Sigma()[i]);
    }

    int iterations = 0;
    while (!converged_ && iterations++ < max_iterations_) {
        // Expectation
        makeMixture();
        for (int i = 0; i < n_; ++i) {
            for (int j = 0; j < k_; ++j) {
                c_[i][j] = posterior(data_[i], j);
            }
            Fn.normalize_inplace(c_[i]);
        }

        // Maximization
        for (int j = 0; j < k_; ++j) {
            double Z = 0.0;
            final RealVector mu_j = new ArrayRealVector(d_);
            RealMatrix Sigma_j = new Array2DRowRealMatrix(d_, d_);
            for (int i = 0; i < n_; ++i) {
                final double c_ij = c_[i][j];
                Z += c_ij;
                final RealVector x_i = new ArrayRealVector(data_[i]);
                // mu_j += c_ij * x_i
                mu_j.combineToSelf(1.0, 1.0, x_i.mapMultiply(c_ij));
                final RealVector v = x_i.subtract(mu_[j]);
                // Sigma_j += c_ij * |v><v|
                Sigma_j = Sigma_j.add(v.outerProduct(v).scalarMultiply(c_ij));
            }
            final double Zinv = 1.0 / Z;
            final double pi_j = Z / n_;
            mu_j.mapMultiplyToSelf(Zinv);
            Sigma_j = Sigma_j.scalarMultiply(Zinv);
            //            converged &= hasConverged( j, pi_j, mu_j, Sigma_j );
            pi_[j] = pi_j;
            mu_[j] = mu_j;
            Sigma_[j] = Sigma_j;
        }
        //         debug();

        final double log_likelihood = logLikelihood();
        if (Math.abs(log_likelihood - old_log_likelihood_) < epsilon_) {
            converged_ = true;
        }
        old_log_likelihood_ = log_likelihood;
    }
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.DoseResponseLMOptimizer.java

/**
 * Copy of super source code. Decompose a matrix A as A.P = Q.R using
 * Householder transforms.//from  w w w.  ja v a2s  .c  o  m
 * <p>
 * As suggested in the P. Lascaux and R. Theodor book
 * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave; l'art
 * de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing the
 * Householder transforms with u<sub>k</sub> unit vectors such that:
 * <pre>
 * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
 * </pre> we use <sub>k</sub> non-unit vectors such that:
 * <pre>
 * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
 * </pre> where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub>
 * e<sub>k</sub>. The beta<sub>k</sub> coefficients are provided upon exit
 * as recomputing them from the v<sub>k</sub> vectors would be costly.</p>
 * <p>
 * This decomposition handles rank deficient cases since the tranformations
 * are performed in non-increasing columns norms order thanks to columns
 * pivoting. The diagonal elements of the R matrix are therefore also in
 * non-increasing absolute values order.</p>
 *
 * @param jacobian Weighted Jacobian matrix at the current point.
 * @param solvedCols Number of solved point.
 * @return data used in other methods of this class.
 * @throws ConvergenceException if the decomposition cannot be performed.
 */
private InternalData qrDecomposition(RealMatrix jacobian, int solvedCols) throws ConvergenceException {
    // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
    // hence the multiplication by -1.
    final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();

    final int nR = weightedJacobian.length;
    final int nC = weightedJacobian[0].length;

    final int[] permutation = new int[nC];
    final double[] diagR = new double[nC];
    final double[] jacNorm = new double[nC];
    final double[] beta = new double[nC];

    // initializations
    for (int k = 0; k < nC; ++k) {
        permutation[k] = k;
        double norm2 = 0;
        for (int i = 0; i < nR; ++i) {
            double akk = weightedJacobian[i][k];
            norm2 += akk * akk;
        }
        jacNorm[k] = FastMath.sqrt(norm2);
    }

    // transform the matrix column after column
    for (int k = 0; k < nC; ++k) {

        // select the column with the greatest norm on active components
        int nextColumn = -1;
        double ak2 = Double.NEGATIVE_INFINITY;
        for (int i = k; i < nC; ++i) {
            double norm2 = 0;
            for (int j = k; j < nR; ++j) {
                double aki = weightedJacobian[j][permutation[i]];
                norm2 += aki * aki;
            }
            if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
                throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
                        nR, nC);
            }
            if (norm2 > ak2) {
                nextColumn = i;
                ak2 = norm2;
            }
        }
        if (ak2 <= getRankingThreshold()) {
            return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
        }
        int pk = permutation[nextColumn];
        permutation[nextColumn] = permutation[k];
        permutation[k] = pk;

        // choose alpha such that Hk.u = alpha ek
        double akk = weightedJacobian[k][pk];
        double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
        double betak = 1.0 / (ak2 - akk * alpha);
        beta[pk] = betak;

        // transform the current column
        diagR[pk] = alpha;
        weightedJacobian[k][pk] -= alpha;

        // transform the remaining columns
        for (int dk = nC - 1 - k; dk > 0; --dk) {
            double gamma = 0;
            for (int j = k; j < nR; ++j) {
                gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
            }
            gamma *= betak;
            for (int j = k; j < nR; ++j) {
                weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
            }
        }
    }

    return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
}

From source file:com.itemanalysis.psychometrics.mixture.MvNormalMixtureModel.java

public double mStep() {
    RealMatrix[] M = new Array2DRowRealMatrix[groups];
    RealMatrix[] S = new Array2DRowRealMatrix[groups];
    double[] T1 = new double[groups];
    RealMatrix[] T2 = new Array2DRowRealMatrix[groups];
    RealMatrix[] T3 = new Array2DRowRealMatrix[groups];
    double pp = 0.0;
    RealMatrix rowData = null;
    RealMatrix temp = null;/* w w w. j  av  a 2  s.c o  m*/

    try {
        //estimate new means, covariances, and mixing proportions
        for (int g = 0; g < groups; g++) {
            T2[g] = new Array2DRowRealMatrix(dimensions, 1);
            T3[g] = new Array2DRowRealMatrix(dimensions, dimensions);
            for (int i = 0; i < sampleSize; i++) {
                pp = posteriorProbability(g, i);
                rowData = data.getRowMatrix(i).transpose();
                T1[g] += pp;
                T2[g] = T2[g].add(rowData.scalarMultiply(pp));
                temp = rowData.scalarMultiply(pp).multiply(rowData.transpose());
                T3[g] = T3[g].add(temp);
            }

        }

        for (int g = 0; g < groups; g++) {
            M[g] = T2[g].scalarMultiply(1.0 / T1[g]);
            temp = T2[g].scalarMultiply(1.0 / T1[g]).multiply(T2[g].transpose());
            S[g] = T3[g].subtract(temp).scalarMultiply(1.0 / T1[g]);
        }
    } catch (SingularMatrixException ex) {
        statusMessage = "Singular Matrix";
    }

    //apply constraints
    if (sameVarianceWithin)
        setCommonVarianceWithinClass(S);
    if (sameCovarianceWithin && !localIndependence)
        setCommonCovarianceWithinClass(S);
    if (localIndependence)
        setLocalIndependence(S);
    if (sameCovarianceBetween)
        setCommonCovariance(S);

    piKp1 = computeMixingProportions();

    //set values of new estimates for eStep (i.e. posterior probability) and computation of loglikelihood
    MvNormalComponentDistribution dist = null;
    for (int g = 0; g < groups; g++) {
        dist = (MvNormalComponentDistribution) compDistribution[g];
        dist.setMixingProportion(piKp1[g]);
        dist.setMean(M[g]);
        dist.setCovariance(S[g]);
    }

    double ll = loglikelihood();
    return ll;
}

From source file:com.datumbox.framework.machinelearning.regression.MatrixLinearRegression.java

@Override
protected void estimateModelParameters(Dataset trainingData) {

    ModelParameters modelParameters = knowledgeBase.getModelParameters();

    int n = trainingData.size();
    int d = trainingData.getColumnSize() + 1;//plus one for the constant

    //initialization
    modelParameters.setN(n);/*from  w w  w  . j  a v a2  s.c o  m*/
    modelParameters.setD(d);

    Map<Object, Double> thitas = modelParameters.getThitas();
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();

    MatrixDataset matrixDataset = MatrixDataset.newInstance(trainingData, true, featureIds);

    RealVector Y = matrixDataset.getY();
    RealMatrix X = matrixDataset.getX();

    //(X'X)^-1
    RealMatrix Xt = X.transpose();
    LUDecomposition lud = new LUDecomposition(Xt.multiply(X));
    RealMatrix XtXinv = lud.getSolver().getInverse();
    lud = null;

    //(X'X)^-1 * X'Y
    RealVector coefficients = XtXinv.multiply(Xt).operate(Y);
    Xt = null;

    //put the features coefficients in the thita map
    thitas.put(Dataset.constantColumnName, coefficients.getEntry(0));
    for (Map.Entry<Object, Integer> entry : featureIds.entrySet()) {
        Object feature = entry.getKey();
        Integer featureId = entry.getValue();

        thitas.put(feature, coefficients.getEntry(featureId));
    }

    if (knowledgeBase.getTrainingParameters().getCalculatePvalue()) { //calculate them only if 
        //get the predictions and subtact the Y vector. Sum the squared differences to get the error
        double SSE = 0.0;
        for (double v : X.operate(coefficients).subtract(Y).toArray()) {
            SSE += v * v;
        }
        Y = null;

        //standard error matrix
        double MSE = SSE / (n - d); //mean square error = SSE / dfResidual
        RealMatrix SE = XtXinv.scalarMultiply(MSE);
        XtXinv = null;

        //creating a flipped map of ids to features
        Map<Integer, Object> idsFeatures = PHPfunctions.array_flip(featureIds);

        Map<Object, Double> pvalues = new HashMap<>(); //This is not small, but it does not make sense to store it in the db
        for (int i = 0; i < d; ++i) {
            double error = SE.getEntry(i, i);
            Object feature = idsFeatures.get(i);
            if (error <= 0.0) {
                //double tstat = Double.MAX_VALUE;
                pvalues.put(feature, 0.0);
            } else {
                double tstat = coefficients.getEntry(i) / Math.sqrt(error);
                pvalues.put(feature, 1.0 - ContinuousDistributions.StudentsCdf(tstat, n - d)); //n-d degrees of freedom
            }
        }
        SE = null;
        coefficients = null;
        idsFeatures = null;
        matrixDataset = null;

        modelParameters.setFeaturePvalues(pvalues);
    }
}

From source file:com.datumbox.framework.core.machinelearning.regression.MatrixLinearRegression.java

/** {@inheritDoc} */
@Override//from  w  ww .  j a  v a  2s  . c o  m
protected void _fit(Dataframe trainingData) {
    ModelParameters modelParameters = knowledgeBase.getModelParameters();
    int n = trainingData.size();
    int d = trainingData.xColumnSize();

    Map<Object, Double> thitas = modelParameters.getThitas();
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();
    //Map<Integer, Integer> recordIdsReference = null;
    DataframeMatrix matrixDataset = DataframeMatrix.newInstance(trainingData, true, null, featureIds);

    RealVector Y = matrixDataset.getY();
    RealMatrix X = matrixDataset.getX();

    //(X'X)^-1
    RealMatrix Xt = X.transpose();
    LUDecomposition lud = new LUDecomposition(Xt.multiply(X));
    //W = (X'X)^-1 * X'Y
    RealMatrix XtXinv = lud.getSolver().getInverse();
    RealVector coefficients = XtXinv.multiply(Xt).operate(Y);
    // instead of matrix inversion calculate (X'X) * W = X'Y
    //RealVector coefficients = lud.getSolver().solve(Xt.operate(Y));
    //lud =null;

    //Xt = null;

    //put the features coefficients in the thita map
    thitas.put(Dataframe.COLUMN_NAME_CONSTANT, coefficients.getEntry(0));
    for (Map.Entry<Object, Integer> entry : featureIds.entrySet()) {
        Object feature = entry.getKey();
        Integer featureId = entry.getValue();

        thitas.put(feature, coefficients.getEntry(featureId));
    }

    //get the predictions and subtact the Y vector. Sum the squared differences to get the error
    double SSE = 0.0;
    for (double v : X.operate(coefficients).subtract(Y).toArray()) {
        SSE += v * v;
    }
    //Y = null;

    //standard error matrix
    double MSE = SSE / (n - (d + 1)); //mean square error = SSE / dfResidual
    RealMatrix SE = XtXinv.scalarMultiply(MSE);
    //XtXinv = null;

    //creating a flipped map of ids to features
    Map<Integer, Object> idsFeatures = PHPMethods.array_flip(featureIds);

    Map<Object, Double> pvalues = new HashMap<>(); //This is not small, but it does not make sense to store it in the storage
    for (int i = 0; i < (d + 1); ++i) {
        double error = SE.getEntry(i, i);
        Object feature = idsFeatures.get(i);
        if (error <= 0.0) {
            //double tstat = Double.MAX_VALUE;
            pvalues.put(feature, 0.0);
        } else {
            double tstat = coefficients.getEntry(i) / Math.sqrt(error);
            pvalues.put(feature, 1.0 - ContinuousDistributions.studentsCdf(tstat, n - (d + 1))); //n-d degrees of freedom
        }
    }
    //SE=null;
    //coefficients=null;
    //idsFeatures=null;
    //matrixDataset = null;

    modelParameters.setFeaturePvalues(pvalues);

}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.algorithm.DBEA.java

/**
 * Updates the ideal point and intercepts given the new solution.
 * /*from   w w w.ja  va 2s . com*/
 * @param solution the new solution
 */
void updateIdealPointAndIntercepts(Solution solution) {
    if (!solution.violatesConstraints()) {
        // update the ideal point
        for (int j = 0; j < problem.getNumberOfObjectives(); j++) {
            idealPoint[j] = Math.min(idealPoint[j], solution.getObjective(j));
            intercepts[j] = Math.max(intercepts[j], solution.getObjective(j));
        }

        // compute the axis intercepts
        Population feasibleSolutions = getFeasibleSolutions(population);
        feasibleSolutions.add(solution);

        Population nondominatedSolutions = getNondominatedFront(feasibleSolutions);

        if (!nondominatedSolutions.isEmpty()) {
            // find the points with the largest value in each objective
            Population extremePoints = new Population();

            for (int i = 0; i < problem.getNumberOfObjectives(); i++) {
                extremePoints.add(largestObjectiveValue(i, nondominatedSolutions));
            }

            if (numberOfUniqueSolutions(extremePoints) != problem.getNumberOfObjectives()) {
                for (int i = 0; i < problem.getNumberOfObjectives(); i++) {
                    intercepts[i] = extremePoints.get(i).getObjective(i);
                }
            } else {
                try {
                    RealMatrix b = new Array2DRowRealMatrix(problem.getNumberOfObjectives(), 1);
                    RealMatrix A = new Array2DRowRealMatrix(problem.getNumberOfObjectives(),
                            problem.getNumberOfObjectives());

                    for (int i = 0; i < problem.getNumberOfObjectives(); i++) {
                        b.setEntry(i, 0, 1.0);

                        for (int j = 0; j < problem.getNumberOfObjectives(); j++) {
                            A.setEntry(i, j, extremePoints.get(i).getObjective(j));
                        }
                    }

                    double numerator = new LUDecomposition(A).getDeterminant();
                    b.scalarMultiply(numerator);
                    RealMatrix normal = MatrixUtils.inverse(A).multiply(b);

                    for (int i = 0; i < problem.getNumberOfObjectives(); i++) {
                        intercepts[i] = numerator / normal.getEntry(i, 0);

                        if (intercepts[i] <= 0 || Double.isNaN(intercepts[i])
                                || Double.isInfinite(intercepts[i])) {
                            intercepts[i] = extremePoints.get(i).getObjective(i);
                        }
                    }
                } catch (RuntimeException e) {
                    for (int i = 0; i < problem.getNumberOfObjectives(); i++) {
                        intercepts[i] = extremePoints.get(i).getObjective(i);
                    }
                }
            }
        }
    }
}

From source file:com.joptimizer.optimizers.JOptimizerTest.java

/**
 * Quadratic objective, no constraints./*from  ww  w  .  j a va2 s.c  o  m*/
 */
public void testNewtownUnconstrained() throws Exception {
    Log.d(MainActivity.JOPTIMIZER_LOGTAG, "testNewtownUnconstrained");
    RealMatrix PMatrix = new Array2DRowRealMatrix(
            new double[][] { { 1.68, 0.34, 0.38 }, { 0.34, 3.09, -1.59 }, { 0.38, -1.59, 1.54 } });
    RealVector qVector = new ArrayRealVector(new double[] { 0.018, 0.025, 0.01 });

    // Objective function.
    double theta = 0.01522;
    RealMatrix P = PMatrix.scalarMultiply(theta);
    RealVector q = qVector.mapMultiply(-1);
    PDQuadraticMultivariateRealFunction objectiveFunction = new PDQuadraticMultivariateRealFunction(P.getData(),
            q.toArray(), 0);

    OptimizationRequest or = new OptimizationRequest();
    or.setF0(objectiveFunction);
    or.setInitialPoint(new double[] { 0.04, 0.50, 0.46 });

    //optimization
    JOptimizer opt = new JOptimizer();
    opt.setOptimizationRequest(or);
    int returnCode = opt.optimize();

    if (returnCode == OptimizationResponse.FAILED) {
        fail();
    }

    OptimizationResponse response = opt.getOptimizationResponse();
    double[] sol = response.getSolution();
    Log.d(MainActivity.JOPTIMIZER_LOGTAG, "sol   : " + ArrayUtils.toString(sol));
    Log.d(MainActivity.JOPTIMIZER_LOGTAG, "value : " + objectiveFunction.value(sol));

    // we already know the analytic solution of the problem
    // sol = -invQ * C
    RealMatrix QInv = Utils.squareMatrixInverse(P);
    RealVector benchSol = QInv.operate(q).mapMultiply(-1);
    Log.d(MainActivity.JOPTIMIZER_LOGTAG, "benchSol   : " + ArrayUtils.toString(benchSol.toArray()));
    Log.d(MainActivity.JOPTIMIZER_LOGTAG, "benchValue : " + objectiveFunction.value(benchSol.toArray()));

    assertEquals(benchSol.getEntry(0), sol[0], 0.000000000000001);
    assertEquals(benchSol.getEntry(1), sol[1], 0.000000000000001);
    assertEquals(benchSol.getEntry(2), sol[2], 0.000000000000001);
}