Example usage for org.apache.commons.math3.linear RealVector add

List of usage examples for org.apache.commons.math3.linear RealVector add

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector add.

Prototype

public RealVector add(RealVector v) throws DimensionMismatchException 

Source Link

Document

Compute the sum of this vector and v .

Usage

From source file:edu.byu.nlp.stats.DirichletMLEOptimizableTest.java

/**
 * Computes a Newton-Raphson update in-place to alpha.
 *///from  w  ww.ja  va 2s .c o m
private static RealVector newtonRaphsonUpdate(final double[][] data, double[] alpha) {
    // We'll compute the gold-standard value the "long" way (taking the inverse of the Hessian)
    RealMatrix hessian = new Array2DRowRealMatrix(alpha.length, alpha.length);
    for (int r = 0; r < hessian.getRowDimension(); r++) {
        for (int c = 0; c < hessian.getColumnDimension(); c++) {
            hessian.addToEntry(r, c, data.length * Gamma.trigamma(DoubleArrays.sum(alpha)));
            if (r == c) {
                hessian.addToEntry(r, c, -data.length * Gamma.trigamma(alpha[r]));
            }
        }
    }
    RealVector derivative = new ArrayRealVector(alpha.length);
    for (int k = 0; k < alpha.length; k++) {
        derivative.setEntry(k,
                data.length * (Gamma.digamma(DoubleArrays.sum(alpha)) - Gamma.digamma(alpha[k])));
        for (double[] theta : data) {
            derivative.addToEntry(k, theta[k]);
        }
    }

    RealMatrix hessianInverse = new LUDecomposition(hessian).getSolver().getInverse();
    RealVector negDiff = hessianInverse.preMultiply(derivative);
    negDiff.mapMultiplyToSelf(-1.0);

    RealVector expected = new ArrayRealVector(alpha, true);
    return expected.add(negDiff);
}

From source file:com.cloudera.oryx.kmeans.common.LloydsUpdateStrategy.java

/**
 * Compute the {@code Vector} that is the centroid of the given weighted points.
 * //w  w w .  ja  va2 s . c  om
 * @param points The weighted points
 * @return The centroid of the weighted points
 */
public static <W extends Weighted<RealVector>> RealVector centroid(Iterable<W> points) {
    RealVector center = null;
    double sz = 0.0;
    for (W v : points) {
        RealVector weighted = v.thing().mapMultiply(v.weight());
        if (center == null) {
            center = weighted;
        } else {
            center = center.add(weighted);
        }
        sz += v.weight();
    }
    return center.mapDivide(sz);
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a/*from w w w  .jav a 2s  . c  o  m*/
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:hivemall.utils.math.MatrixUtils.java

/**
 * Lanczos tridiagonalization for a symmetric matrix C to make s * s tridiagonal matrix T.
 *
 * @see http://www.cas.mcmaster.ca/~qiao/publications/spie05.pdf
 * @param C target symmetric matrix//from  ww  w .  j av a  2  s .c  o m
 * @param a initial vector
 * @param T result is stored here
 */
public static void lanczosTridiagonalization(@Nonnull final RealMatrix C, @Nonnull final double[] a,
        @Nonnull final RealMatrix T) {
    Preconditions.checkArgument(Arrays.deepEquals(C.getData(), C.transpose().getData()),
            "Target matrix C must be a symmetric matrix");
    Preconditions.checkArgument(C.getColumnDimension() == a.length,
            "Column size of A and length of a should be same");
    Preconditions.checkArgument(T.getRowDimension() == T.getColumnDimension(), "T must be a square matrix");

    int s = T.getRowDimension();

    // initialize T with zeros
    T.setSubMatrix(new double[s][s], 0, 0);

    RealVector a0 = new ArrayRealVector(a.length);
    RealVector r = new ArrayRealVector(a);

    double beta0 = 1.d;

    for (int i = 0; i < s; i++) {
        RealVector a1 = r.mapDivide(beta0);
        RealVector Ca1 = C.operate(a1);

        double alpha1 = a1.dotProduct(Ca1);

        r = Ca1.add(a1.mapMultiply(-1.d * alpha1)).add(a0.mapMultiply(-1.d * beta0));

        double beta1 = r.getNorm();

        T.setEntry(i, i, alpha1);
        if (i - 1 >= 0) {
            T.setEntry(i, i - 1, beta0);
        }
        if (i + 1 < s) {
            T.setEntry(i, i + 1, beta1);
        }

        a0 = a1.copy();
        beta0 = beta1;
    }
}

From source file:com.cloudera.oryx.kmeans.common.MiniBatchUpdateStrategy.java

@Override
public <W extends Weighted<RealVector>> Centers update(List<W> points, Centers centers) {
    int[] perCenterStepCounts = new int[centers.size()];
    WeightedSampler<RealVector, W> sampler = new WeightedSampler<RealVector, W>(points, random);
    for (int iter = 0; iter < numIterations; iter++) {
        // Compute closest cent for each mini-batch
        List<List<RealVector>> centerAssignments = Lists.newArrayList();
        for (int i = 0; i < centers.size(); i++) {
            centerAssignments.add(Lists.<RealVector>newArrayList());
        }/*from w ww .  j  a  v a 2 s.  c  om*/
        for (int i = 0; i < miniBatchSize; i++) {
            RealVector sample = sampler.sample();
            int closestId = centers.getDistance(sample).getClosestCenterId();
            centerAssignments.get(closestId).add(sample);
        }
        // Apply the mini-batch
        List<RealVector> nextCenters = Lists.newArrayList();
        for (int i = 0; i < centerAssignments.size(); i++) {
            RealVector currentCenter = centers.get(i);
            for (int j = 0; j < centerAssignments.get(i).size(); j++) {
                double eta = 1.0 / (++perCenterStepCounts[i] + 1.0);
                currentCenter = currentCenter.mapMultiply(1.0 - eta);
                currentCenter = currentCenter.add(centerAssignments.get(i).get(j).mapMultiply(eta));
            }
            nextCenters.add(currentCenter);
        }
        centers = new Centers(nextCenters);
    }
    return centers;
}

From source file:hivemall.anomaly.SDAR2D.java

/**
 * @param x series of input in LIFO order
 * @param k AR window size//  w w w. j a  v a 2  s.  c om
 * @return x_hat predicted x
 * @link https://en.wikipedia.org/wiki/Matrix_multiplication#Outer_product
 */
@Nonnull
public RealVector update(@Nonnull final ArrayRealVector[] x, final int k) {
    Preconditions.checkArgument(x.length >= 1, "x.length MUST be greater than 1: " + x.length);
    Preconditions.checkArgument(k >= 0, "k MUST be greater than or equals to 0: ", k);
    Preconditions.checkArgument(k < _C.length,
            "k MUST be less than |C| but " + "k=" + k + ", |C|=" + _C.length);

    final ArrayRealVector x_t = x[0];
    final int dims = x_t.getDimension();

    if (_initialized == false) {
        this._mu = x_t.copy();
        this._sigma = new BlockRealMatrix(dims, dims);
        assert (_sigma.isSquare());
        this._initialized = true;
        return new ArrayRealVector(dims);
    }
    Preconditions.checkArgument(k >= 1, "k MUST be greater than 0: ", k);

    // old parameters are accessible to compute the Hellinger distance
    this._muOld = _mu.copy();
    this._sigmaOld = _sigma.copy();

    // update mean vector
    // \hat{mu} := (1-r) \hat{} + r x_t
    this._mu = _mu.mapMultiply(1.d - _r).add(x_t.mapMultiply(_r));

    // compute residuals (x - \hat{})
    final RealVector[] xResidual = new RealVector[k + 1];
    for (int j = 0; j <= k; j++) {
        xResidual[j] = x[j].subtract(_mu);
    }

    // update covariance matrices
    // C_j := (1-r) C_j + r (x_t - \hat{}) (x_{t-j} - \hat{})'
    final RealMatrix[] C = this._C;
    final RealVector rxResidual0 = xResidual[0].mapMultiply(_r); // r (x_t - \hat{})
    for (int j = 0; j <= k; j++) {
        RealMatrix Cj = C[j];
        if (Cj == null) {
            C[j] = rxResidual0.outerProduct(x[j].subtract(_mu));
        } else {
            C[j] = Cj.scalarMultiply(1.d - _r).add(rxResidual0.outerProduct(x[j].subtract(_mu)));
        }
    }

    // solve A in the following Yule-Walker equation
    // C_j = _{i=1}^{k} A_i C_{j-i} where j = 1..k, C_{-i} = C_i'
    /*
     * /C_1\     /A_1\  /C_0     |C_1'    |C_2'    | .  .  .   |C_{k-1}' \
     * |---|     |---|  |--------+--------+--------+           +---------|
     * |C_2|     |A_2|  |C_1     |C_0     |C_1'    |               .     |
     * |---|     |---|  |--------+--------+--------+               .     |
     * |C_3|  =  |A_3|  |C_2     |C_1     |C_0     |               .     |
     * | . |     | . |  |--------+--------+--------+                     |
     * | . |     | . |  |   .                            .               |
     * | . |     | . |  |   .                            .               |
     * |---|     |---|  |--------+                              +--------|
     * \C_k/     \A_k/  \C_{k-1} | .  .  .                      |C_0     /
     */
    RealMatrix[][] rhs = MatrixUtils.toeplitz(C, k);
    RealMatrix[] lhs = Arrays.copyOfRange(C, 1, k + 1);
    RealMatrix R = MatrixUtils.combinedMatrices(rhs, dims);
    RealMatrix L = MatrixUtils.combinedMatrices(lhs);
    RealMatrix A = MatrixUtils.solve(L, R, false);

    // estimate x
    // \hat{x} = \hat{} + _{i=1}^k A_i (x_{t-i} - \hat{})
    RealVector x_hat = _mu.copy();
    for (int i = 0; i < k; i++) {
        int offset = i * dims;
        RealMatrix Ai = A.getSubMatrix(offset, offset + dims - 1, 0, dims - 1);
        x_hat = x_hat.add(Ai.operate(xResidual[i + 1]));
    }

    // update model covariance
    //  := (1-r)  + r (x - \hat{x}) (x - \hat{x})'
    RealVector xEstimateResidual = x_t.subtract(x_hat);
    this._sigma = _sigma.scalarMultiply(1.d - _r)
            .add(xEstimateResidual.mapMultiply(_r).outerProduct(xEstimateResidual));

    return x_hat;
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.core.operator.real.AdaptiveMetropolis.java

@Override
public Solution[] evolve(Solution[] parents) {
    int k = parents.length;
    int n = parents[0].getNumberOfVariables();
    RealMatrix x = new Array2DRowRealMatrix(k, n);

    for (int i = 0; i < k; i++) {
        x.setRow(i, EncodingUtils.getReal(parents[i]));
    }//from   w  ww  .  ja  v a  2s  .c  o m

    try {
        //perform Cholesky factorization and get the upper triangular matrix
        double jumpRate = Math.pow(jumpRateCoefficient / Math.sqrt(n), 2.0);

        RealMatrix chol = new CholeskyDecomposition(
                new Covariance(x.scalarMultiply(jumpRate)).getCovarianceMatrix()).getLT();

        //produce the offspring
        Solution[] offspring = new Solution[numberOfOffspring];

        for (int i = 0; i < numberOfOffspring; i++) {
            Solution child = parents[PRNG.nextInt(parents.length)].copy();

            //apply adaptive metropolis step to solution
            RealVector muC = new ArrayRealVector(EncodingUtils.getReal(child));
            RealVector ru = new ArrayRealVector(n);

            for (int j = 0; j < n; j++) {
                ru.setEntry(j, PRNG.nextGaussian());
            }

            double[] variables = muC.add(chol.preMultiply(ru)).toArray();

            //assign variables back to solution
            for (int j = 0; j < n; j++) {
                RealVariable variable = (RealVariable) child.getVariable(j);
                double value = variables[j];

                if (value < variable.getLowerBound()) {
                    value = variable.getLowerBound();
                } else if (value > variable.getUpperBound()) {
                    value = variable.getUpperBound();
                }

                variable.setValue(value);
            }

            offspring[i] = child;
        }

        return offspring;
    } catch (Exception e) {
        return new Solution[0];
    }
}

From source file:edu.oregonstate.eecs.mcplan.ml.SequentialProjectionHashLearner.java

@Override
public void run() {
    final RealMatrix cov_reg = MatrixUtils.createRealIdentityMatrix(X.getRowDimension())
            .scalarMultiply(shrinkage);/*from  w  w w  .ja v  a 2s.  c o  m*/
    for (int k = 0; k < K; ++k) {
        System.out.println("k = " + k);
        System.out.println("\tCovariance");
        final RealMatrix cov = Xi_.multiply(Xi_.transpose()).add(cov_reg);
        //         System.out.println( cov );
        System.out.println("\tM");
        final RealMatrix M = cov; // XL.multiply( Si_ ).multiply( XLt ).add( cov.scalarMultiply( eta ) );
        // TODO: You only need the largest eigenvalue, so the full
        // decomposition is a ton of unnecessary work.
        System.out.println("\tM[" + M.getRowDimension() + "x" + M.getColumnDimension() + "]");
        final EigenDecomposition evd = new EigenDecomposition(M);
        final RealVector w = evd.getEigenvector(0);
        w.mapMultiplyToSelf(1.0 / w.getNorm());
        //         if( Math.abs( w.getNorm() - 1.0 ) > 1e-2 ) {
        //            System.out.println( "! Non-unit eigenvector: ||w|| = " + w.getNorm() );
        //            System.out.println( Math.abs( w.getNorm() - 1.0 ) );
        //            assert( false );
        //         }
        W.add(w);
        final RealMatrix w_wt = w.outerProduct(w);
        final RealMatrix S_tilde = XLt.multiply(w_wt).multiply(XL);
        T(S_tilde, Si_);
        Si_ = Si_.subtract(S_tilde.scalarMultiply(alpha));
        Xi_ = Xi_.subtract(w_wt.multiply(Xi_));
    }
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

@Override
public GaussianFactor product(GaussianFactor other) {
    Scope newScope = scope.union(other.getVariables());

    int newFactorSize = newScope.size();

    int[] thisMapping = newScope.createContinuousVariableMapping(scope);
    int[] otherMapping = newScope.createContinuousVariableMapping(other.getVariables());

    RealMatrix newPrecisionMatrix = new Array2DRowRealMatrix(newScope.size(), newScope.size());

    RealMatrix otherPrecisionMatrix = other.getPrecisionMatrix();

    for (int i = 0; i < newFactorSize; i++) {
        RealVector column = new ArrayRealVector(newFactorSize);
        if (thisMapping[i] >= 0) {
            column = column.add(
                    padVector(precisionMatrix.getColumnVector(thisMapping[i]), newFactorSize, thisMapping));
        }/* w  ww.ja  v  a2s  .  co m*/

        if (otherMapping[i] >= 0) {
            column = column.add(padVector(otherPrecisionMatrix.getColumnVector(otherMapping[i]), newFactorSize,
                    otherMapping));
        }
        newPrecisionMatrix.setColumnVector(i, column);
    }

    RealVector newScaledMeanVector = padVector(scaledMeanVector, newScope.size(), thisMapping);
    RealVector otherScaledMeanVector = other.getScaledMeanVector();
    newScaledMeanVector = newScaledMeanVector
            .add(padVector(otherScaledMeanVector, newFactorSize, otherMapping));

    double newNormalizationConstant = normalizationConstant + other.getNormalizationConstant();

    return new CanonicalGaussianFactor(newScope, newPrecisionMatrix, newScaledMeanVector,
            newNormalizationConstant);
}

From source file:edu.utexas.cs.tactex.tariffoptimization.OptimizerWrapperGradientAscent.java

@Override
public TreeMap<Double, TariffSpecification> findOptimum(TariffUtilityEstimate tariffUtilityEstimate,
        int NUM_RATES, int numEval) {

    double[] startingVertex = new double[NUM_RATES]; // start from the fixed-rate tariff's offset
    Arrays.fill(startingVertex, 0.0);

    // temporary solution - getting fixed rate tariff to determine step size and scaling STEP_SIZE proportionally
    TariffSpecification bestFixedRateSpec = tariffUtilityEstimate.getCorrespondingSpec(startingVertex);
    double bestFixedRate = bestFixedRateSpec.getRates().get(0).getValue();
    double rateRatio = bestFixedRate / REFERENCE_RATE;
    // Note: using rateRatio has not been good enough, so trying powers of it (remember it's > 1)
    //STEP_SIZE = Math.max(REFERENCE_STEP_SIZE, (rateRatio * rateRatio) * REFERENCE_STEP_SIZE);
    STEP_SIZE = Math.max(REFERENCE_STEP_SIZE, Math.abs(rateRatio) * REFERENCE_STEP_SIZE);
    log.debug("STEP_SIZE = " + STEP_SIZE + " REFERENCE_STEP_SIZE=" + REFERENCE_STEP_SIZE + " bestFixedRate="
            + bestFixedRate + " REFERENCE_RATE=" + REFERENCE_RATE);

    evaluations = 0;/*from w ww .j a  va2 s .  c o m*/
    TreeMap<Double, TariffSpecification> eval2TOUTariff = new TreeMap<Double, TariffSpecification>();

    // OUTER LOOP
    //for( STEP_SIZE = 0.005; STEP_SIZE < 0.100; STEP_SIZE += 0.005) {
    //  log.info("STARTING A LOOP: STEP_SIZE=" + STEP_SIZE);

    // first compute numerical gradient
    RealVector gradient = new ArrayRealVector(NUM_RATES);
    for (int i = 0; i < NUM_RATES; ++i) {
        gradient.setEntry(i, computePartialDerivative(i, tariffUtilityEstimate, NUM_RATES, eval2TOUTariff));
    }
    gradient = gradient.unitVector();

    // taking steps in the gradient direction
    double previousPointValue = -Double.MAX_VALUE;
    final double alpha = STEP_SIZE;
    RealVector rateOffsets = new ArrayRealVector(NUM_RATES); // initializes with 0s?
    double currentPointValue = evaluatePoint(tariffUtilityEstimate, rateOffsets.toArray());
    eval2TOUTariff.put(currentPointValue, tariffUtilityEstimate.getCorrespondingSpec(rateOffsets.toArray()));
    while (!converged(currentPointValue, previousPointValue) && evaluations < MAX_EVALUATIONS) {
        previousPointValue = currentPointValue;
        rateOffsets = rateOffsets.add(gradient.mapMultiply(alpha));
        currentPointValue = evaluatePoint(tariffUtilityEstimate, rateOffsets.toArray());
        eval2TOUTariff.put(currentPointValue,
                tariffUtilityEstimate.getCorrespondingSpec(rateOffsets.toArray()));
    }

    log.info("gradient ascent finished after " + evaluations + " evaluations");

    //}

    // return map
    return eval2TOUTariff;
}