Example usage for org.apache.commons.math3.linear RealMatrix getNorm

List of usage examples for org.apache.commons.math3.linear RealMatrix getNorm

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealMatrix getNorm.

Prototype

double getNorm();

Source Link

Document

Returns the <a href="http://mathworld.wolfram.com/MaximumAbsoluteRowSumNorm.html"> maximum absolute row sum norm</a> of the matrix.

Usage

From source file:com.cloudera.oryx.als.serving.generation.Generation.java

private static Solver recomputeSolver(LongObjectMap<float[]> M, Lock readLock) {
    readLock.lock();/*from  w  ww.j av a2 s  .  c  o  m*/
    try {
        if (M == null || M.isEmpty()) {
            return null;
        }
        RealMatrix MTM = MatrixUtils.transposeTimesSelf(M);
        double infNorm = MTM.getNorm();
        if (infNorm < 1.0) {
            log.warn("X'*X or Y'*Y has small inf norm ({}); try decreasing model.lambda", infNorm);
            throw new IllConditionedSolverException("infNorm: " + infNorm);
        }
        return MatrixUtils.getSolver(MTM);
    } finally {
        readLock.unlock();
    }
}

From source file:net.myrrix.online.generation.Generation.java

private static Solver recomputeSolver(FastByIDMap<float[]> M, Lock readLock) {
    readLock.lock();/* w  w w. j a va2  s .  c o m*/
    try {
        if (M == null || M.isEmpty()) {
            return null;
        }
        RealMatrix MTM = MatrixUtils.transposeTimesSelf(M);
        double infNorm = MTM.getNorm();
        if (infNorm < 1.0) {
            log.warn("X'*X or Y'*Y has small inf norm ({}); try decreasing model.als.lambda", infNorm);
            throw new IllConditionedSolverException("infNorm: " + infNorm);
        }
        return MatrixUtils.getSolver(MTM);
    } finally {
        readLock.unlock();
    }
}

From source file:lirmm.inria.fr.math.TestUtils.java

/** verifies that two matrices are close (1-norm) */
public static void assertEquals(String msg, RealMatrix expected, RealMatrix observed, double tolerance) {

    Assert.assertNotNull(msg + "\nObserved should not be null", observed);

    if (expected.getColumnDimension() != observed.getColumnDimension()
            || expected.getRowDimension() != observed.getRowDimension()) {
        StringBuilder messageBuffer = new StringBuilder(msg);
        messageBuffer.append("\nObserved has incorrect dimensions.");
        messageBuffer//from   w w  w.j  a  v a  2 s .  c o  m
                .append("\nobserved is " + observed.getRowDimension() + " x " + observed.getColumnDimension());
        messageBuffer
                .append("\nexpected " + expected.getRowDimension() + " x " + expected.getColumnDimension());
        Assert.fail(messageBuffer.toString());
    }

    RealMatrix delta = expected.subtract(observed);
    if (delta.getNorm() >= tolerance) {
        StringBuilder messageBuffer = new StringBuilder(msg);
        messageBuffer.append("\nExpected: " + expected);
        messageBuffer.append("\nObserved: " + observed);
        messageBuffer.append("\nexpected - observed: " + delta);
        Assert.fail(messageBuffer.toString());
    }
}

From source file:com.analog.lyric.dimple.test.solvers.sumproduct.TestSampledFactors.java

/**
 * Adapted from MATLAB test4 in tests/algoGaussian/testSampledFactors.m
 *//*ww w .j  av  a  2  s  .c  o m*/
@Test
public void sampledComplexProduct() {
    // NOTE: test may fail if seed is changed! We keep the number of samples down so that the test doesn't
    // take too long. Increasing the samples produces better results.

    testRand.setSeed(42);

    try (CurrentModel cur = using(new FactorGraph())) {
        final Complex a = complex("a");
        final Complex b = complex("b");
        final Complex c = product(a, b);

        double[] aMean = new double[] { 10, 10 };
        RealMatrix aCovariance = randCovariance(2);
        a.setPrior(new MultivariateNormal(aMean, aCovariance.getData()));

        double[] bMean = new double[] { -20, 20 };
        RealMatrix bCovariance = randCovariance(2);
        b.setPrior(new MultivariateNormalParameters(bMean, bCovariance.getData()));

        GaussianRandomGenerator normalGenerator = new GaussianRandomGenerator(testRand);
        CorrelatedRandomVectorGenerator aGenerator = new CorrelatedRandomVectorGenerator(aMean, aCovariance,
                1e-12, normalGenerator);
        CorrelatedRandomVectorGenerator bGenerator = new CorrelatedRandomVectorGenerator(bMean, bCovariance,
                1e-12, normalGenerator);

        StorelessCovariance expectedCov = new StorelessCovariance(2);

        final int nSamples = 10000;

        RealVector expectedMean = MatrixUtils.createRealVector(new double[2]);
        double[] cSample = new double[2];

        for (int i = 0; i < nSamples; ++i) {
            double[] aSample = aGenerator.nextVector();
            double[] bSample = bGenerator.nextVector();

            // Compute complex product
            cSample[0] = aSample[0] * bSample[0] - aSample[1] * bSample[1];
            cSample[1] = aSample[0] * bSample[1] + aSample[1] * bSample[0];

            expectedMean.addToEntry(0, cSample[0]);
            expectedMean.addToEntry(1, cSample[1]);

            expectedCov.increment(cSample);
        }

        expectedMean.mapDivideToSelf(nSamples); // normalize

        SumProductSolverGraph sfg = requireNonNull(cur.graph.setSolverFactory(new SumProductSolver()));
        sfg.setOption(GibbsOptions.numSamples, nSamples);

        sfg.solve();

        MultivariateNormalParameters cBelief = requireNonNull(c.getBelief());

        RealVector observedMean = MatrixUtils.createRealVector(cBelief.getMean());
        double scaledMeanDistance = expectedMean.getDistance(observedMean) / expectedMean.getNorm();

        //         System.out.format("expectedMean = %s\n", expectedMean);
        //         System.out.format("observedMean = %s\n", observedMean);
        //         System.out.println(scaledMeanDistance);

        assertEquals(0.0, scaledMeanDistance, .02);

        RealMatrix expectedCovariance = expectedCov.getCovarianceMatrix();
        RealMatrix observedCovariance = MatrixUtils.createRealMatrix(cBelief.getCovariance());
        RealMatrix diffCovariance = expectedCovariance.subtract(observedCovariance);

        double scaledCovarianceDistance = diffCovariance.getNorm() / expectedCovariance.getNorm();

        //         System.out.println(expectedCovariance);
        //         System.out.println(expectedCovariance.getNorm());
        //         System.out.println(diffCovariance);
        //         System.out.println(diffCovariance.getNorm());
        //         System.out.println(diffCovariance.getNorm() / expectedCovariance.getNorm());

        assertEquals(0.0, scaledCovarianceDistance, .2);
    }
}

From source file:inputHandling.DataGenCorrCommons.java

@Override
public TupleList genData(int dimensions, int tupleCount) {
    logger.info("Generating uniform correlated Data with " + tupleCount + " Tuples in " + dimensions
            + " dimensions, Coeff.: -" + this.coeff);
    genMatrices(dimensions);//from  ww w .j a  v a 2 s .  com
    RealMatrix covariance = MatrixUtils.createRealMatrix(cov);
    RandomGenerator rg = new JDKRandomGenerator(Math.round(seed));
    UniformRandomGenerator rawGenerator = new UniformRandomGenerator(rg);
    double small = 1.0e-12 * covariance.getNorm();
    CorrelatedRandomVectorGenerator generator = new CorrelatedRandomVectorGenerator(mean, covariance, small,
            rawGenerator);

    // Generate the Tuples
    TupleList tupleList = new TupleList(dimensions);
    for (int j = 0; j < tupleCount; j++) {
        double[] randomVector = generator.nextVector();
        Tuple tuple = new Tuple(randomVector);
        tupleList.add(tuple);
    }
    return tupleList;
}

From source file:inputHandling.DataGenCorrGaussCommons.java

@Override
public TupleList genData(int dimensions, int tupleCount) {
    logger.info("Generating gaussian correlated Data with " + tupleCount + " Tuples in " + dimensions
            + " dimensions, Coeff.: -" + this.coeff);
    genMatrices(dimensions);//from   w  w  w  . j a  v  a2s .  c  o  m
    RealMatrix covariance = MatrixUtils.createRealMatrix(cov);
    RandomGenerator rg = new JDKRandomGenerator(Math.round(seed));
    GaussianRandomGenerator rawGenerator = new GaussianRandomGenerator(rg);
    double small = 1.0e-12 * covariance.getNorm();
    CorrelatedRandomVectorGenerator generator = new CorrelatedRandomVectorGenerator(mean, covariance, small,
            rawGenerator);

    // Generate the Tuples
    TupleList tupleList = new TupleList(dimensions);
    for (int j = 0; j < tupleCount; j++) {
        double[] randomVector = generator.nextVector();
        Tuple tuple = new Tuple(randomVector);
        tupleList.add(tuple);
    }
    return tupleList;
}

From source file:inputHandling.DataGenAntiCorrCommons.java

@Override
public TupleList genData(int dimensions, int tupleCount) {
    logger.info("Generating uniform anti-correlated Data with " + tupleCount + " Tuples in " + dimensions
            + " dimensions, Coeff.: -" + this.coeff);
    genMatrices(dimensions);//ww  w .j  a  va  2  s  .c  o  m
    RealMatrix covariance = MatrixUtils.createRealMatrix(cov);
    RandomGenerator rg = new JDKRandomGenerator(Math.round(seed));
    UniformRandomGenerator rawGenerator = new UniformRandomGenerator(rg);
    double small = 1.0e-12 * covariance.getNorm();
    CorrelatedRandomVectorGenerator generator = new CorrelatedRandomVectorGenerator(mean, covariance, small,
            rawGenerator);

    TupleList tupleList = new TupleList(dimensions);
    // Invert the Values, to receive Anti-Correlation
    for (int j = 0; j < tupleCount; j++) {
        double[] randomVector = generator.nextVector();
        for (int i = 0; i < dimensions; i++) {
            if (j % 2 == 0 && i % 2 == 0)
                randomVector[i] = 2 * mean[i] - randomVector[i];
            else if (j % 2 != 0 && i % 2 != 0)
                randomVector[i] = 2 * mean[i] - randomVector[i];
            else
                randomVector[i] = randomVector[i];
        }
        Tuple tuple = new Tuple(randomVector);
        tupleList.add(tuple);
    }
    return tupleList;
}

From source file:inputHandling.DataGenAntiCorrGaussCommons.java

@Override
public TupleList genData(int dimensions, int tupleCount) {
    logger.info("Generating gaussian anti-correlated Data with " + tupleCount + " Tuples in " + dimensions
            + " dimensions, Coeff.: -" + this.coeff);
    genMatrices(dimensions);/*from   ww w  .ja v  a 2  s.c  om*/
    RealMatrix covariance = MatrixUtils.createRealMatrix(cov);
    RandomGenerator rg = new JDKRandomGenerator(Math.round(seed));
    GaussianRandomGenerator rawGenerator = new GaussianRandomGenerator(rg);
    double small = 1.0e-12 * covariance.getNorm();
    CorrelatedRandomVectorGenerator generator = new CorrelatedRandomVectorGenerator(mean, covariance, small,
            rawGenerator);

    TupleList tupleList = new TupleList(dimensions);
    // Invert the Values, to receive Anti-Correlation
    for (int j = 0; j < tupleCount; j++) {
        double[] randomVector = generator.nextVector();
        for (int i = 0; i < dimensions; i++) {
            if (j % 2 == 0 && i % 2 == 0)
                randomVector[i] = 2 * mean[i] - randomVector[i];
            else if (j % 2 != 0 && i % 2 != 0)
                randomVector[i] = 2 * mean[i] - randomVector[i];
            else
                randomVector[i] = randomVector[i];
        }
        Tuple tuple = new Tuple(randomVector);
        tupleList.add(tuple);
    }
    return tupleList;
}

From source file:com.joptimizer.util.ColtUtils.java

/**
 * Returns a lower and an upper bound for the condition number
 * <br>kp(A) = Norm[A, p] / Norm[A^-1, p]   
 * <br>where// ww w.  j a v  a 2  s . c om
 * <br>      Norm[A, p] = sup ( Norm[A.x, p]/Norm[x, p] , x !=0 )
 * <br>for a matrix and
 * <br>      Norm[x, 1]  := Sum[Math.abs(x[i]), i]             
 * <br>      Norm[x, 2]  := Math.sqrt(Sum[Math.pow(x[i], 2), i])
 * <br>   Norm[x, 00] := Max[Math.abs(x[i]), i]
 * <br>for a vector.
 *  
 * @param A matrix you want the condition number of
 * @param p norm order (2 or Integer.MAX_VALUE)
 * @return an array with the two bounds (lower and upper bound)
 * 
 * @see Ravindra S. Gajulapalli, Leon S. Lasdon "Scaling Sparse Matrices for Optimization Algorithms"
 */
public static double[] getConditionNumberRange(RealMatrix A, int p) {
    double infLimit = Double.NEGATIVE_INFINITY;
    double supLimit = Double.POSITIVE_INFINITY;
    List<Double> columnNormsList = new ArrayList<Double>();
    switch (p) {
    case 2:
        for (int j = 0; j < A.getColumnDimension(); j++) {
            columnNormsList.add(A.getColumnVector(j).getL1Norm());
        }
        Collections.sort(columnNormsList);
        //kp >= Norm[Ai, p]/Norm[Aj, p], for each i, j = 0,1,...,n, Ak columns of A
        infLimit = columnNormsList.get(columnNormsList.size() - 1) / columnNormsList.get(0);
        break;

    case Integer.MAX_VALUE:
        double normAInf = A.getNorm();
        for (int j = 0; j < A.getColumnDimension(); j++) {
            columnNormsList.add(A.getColumnVector(j).getLInfNorm());
        }
        Collections.sort(columnNormsList);
        //k1 >= Norm[A, +oo]/min{ Norm[Aj, +oo], for each j = 0,1,...,n }, Ak columns of A
        infLimit = normAInf / columnNormsList.get(0);
        break;

    default:
        throw new IllegalArgumentException("p must be 2 or Integer.MAX_VALUE");
    }
    return new double[] { infLimit, supLimit };
}

From source file:org.orekit.frames.TransformTest.java

@Test
public void testLinear() {

    RandomGenerator random = new Well19937a(0x14f6411217b148d8l);
    for (int n = 0; n < 100; ++n) {
        Transform t = randomTransform(random);

        // build an equivalent linear transform by extracting raw translation/rotation
        RealMatrix linearA = MatrixUtils.createRealMatrix(3, 4);
        linearA.setSubMatrix(t.getRotation().getMatrix(), 0, 0);
        Vector3D rt = t.getRotation().applyTo(t.getTranslation());
        linearA.setEntry(0, 3, rt.getX());
        linearA.setEntry(1, 3, rt.getY());
        linearA.setEntry(2, 3, rt.getZ());

        // build an equivalent linear transform by observing transformed points
        RealMatrix linearB = MatrixUtils.createRealMatrix(3, 4);
        Vector3D p0 = t.transformPosition(Vector3D.ZERO);
        Vector3D pI = t.transformPosition(Vector3D.PLUS_I).subtract(p0);
        Vector3D pJ = t.transformPosition(Vector3D.PLUS_J).subtract(p0);
        Vector3D pK = t.transformPosition(Vector3D.PLUS_K).subtract(p0);
        linearB.setColumn(0, new double[] { pI.getX(), pI.getY(), pI.getZ() });
        linearB.setColumn(1, new double[] { pJ.getX(), pJ.getY(), pJ.getZ() });
        linearB.setColumn(2, new double[] { pK.getX(), pK.getY(), pK.getZ() });
        linearB.setColumn(3, new double[] { p0.getX(), p0.getY(), p0.getZ() });

        // both linear transforms should be equal
        Assert.assertEquals(0.0, linearB.subtract(linearA).getNorm(), 1.0e-15 * linearA.getNorm());

        for (int i = 0; i < 100; ++i) {
            Vector3D p = randomVector(1.0e3, random);
            Vector3D q = t.transformPosition(p);

            double[] qA = linearA.operate(new double[] { p.getX(), p.getY(), p.getZ(), 1.0 });
            Assert.assertEquals(q.getX(), qA[0], 1.0e-13 * p.getNorm());
            Assert.assertEquals(q.getY(), qA[1], 1.0e-13 * p.getNorm());
            Assert.assertEquals(q.getZ(), qA[2], 1.0e-13 * p.getNorm());

            double[] qB = linearB.operate(new double[] { p.getX(), p.getY(), p.getZ(), 1.0 });
            Assert.assertEquals(q.getX(), qB[0], 1.0e-10 * p.getNorm());
            Assert.assertEquals(q.getY(), qB[1], 1.0e-10 * p.getNorm());
            Assert.assertEquals(q.getZ(), qB[2], 1.0e-10 * p.getNorm());

        }//from  w  ww.j a  v a  2  s  .c  o  m

    }

}