Example usage for org.apache.commons.math3.linear RealVector subtract

List of usage examples for org.apache.commons.math3.linear RealVector subtract

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector subtract.

Prototype

public RealVector subtract(RealVector v) throws DimensionMismatchException 

Source Link

Document

Subtract v from this vector.

Usage

From source file:com.bolatu.gezkoncsvlogger.GyroOrientation.RotationKalmanFilter.java

/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z// w  ww  . j  a  v a2  s.co m
 *            the measurement vector
 * @throws NullArgumentException
 *             if the measurement vector is {@code null}
 * @throws DimensionMismatchException
 *             if the dimension of the measurement vector does not fit
 * @throws SingularMatrixException
 *             if the covariance matrix could not be inverted
 */
public void correct(final RealVector z)
        throws NullArgumentException, DimensionMismatchException, SingularMatrixException {

    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(), measurementMatrix.getRowDimension());
    }

    // S = H * P(k) * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance).multiply(measurementMatrixT)
            .add(measurementModel.getMeasurementNoise());

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1

    // instead of calculating the inverse of S we can rearrange the formula,
    // and then solve the linear equation A x X = B with A = S', X = K' and
    // B = (H * P)'

    // K(k) * S = P(k)- * H'
    // S' * K(k)' = H * P(k)-'
    RealMatrix kalmanGain = new CholeskyDecomposition(s).getSolver()
            .solve(measurementMatrix.multiply(errorCovariance.transpose())).transpose();

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.BisquareLinearFit.java

/**
* Calculates the standardized adjusted residuals (according to the same definition used by MATLAB) of the data points for fitting.
* 
* @param indVarValues The values of the independent variable used for the fitting.
* @param depVarValues The values of the dependent variable used for the fitting.
* @param leverages the leverages of the independent variables, as compted by {@link #calculateLeverages(RealVector)}
* @param fitParams the results of a (possibly weighted) least squares fit to the data, containing one or two components: a slope and an optional y-intercept.
* @return a RealVector containing an adjusted residual value for each data point
*//* w  ww  . j a v a  2  s  . c  om*/
protected RealVector calculateStandardizedAdjustedResiduals(RealVector indVarValues, RealVector depVarValues,
        RealVector leverages, RealVector fitParams) {

    RealVector predictedValues = indVarValues.mapMultiply(fitParams.getEntry(0));

    RealVector denom = leverages.mapMultiply(-1.0).mapAddToSelf(1 + this.CLOSE_TO_ZERO)
            .mapToSelf(new org.apache.commons.math3.analysis.function.Sqrt());

    if (!this.noIntercept) {
        predictedValues = predictedValues.mapAdd(fitParams.getEntry(1));
    }

    double stddev = 0;
    double mean = 0;

    for (int i = 0; i < depVarValues.getDimension(); i++) {
        mean += depVarValues.getEntry(i);
    }

    mean /= depVarValues.getDimension();

    stddev = depVarValues.mapSubtract(mean).getNorm()
            * (depVarValues.getDimension() * 1.0 / (depVarValues.getDimension() - 1));

    RealVector residuals = depVarValues.subtract(predictedValues).ebeDivide(denom);

    RealVector absDev = residuals.map(new org.apache.commons.math3.analysis.function.Abs());

    int smallerDim = 2;

    if (this.noIntercept) {
        smallerDim = 1;
    }

    double[] resArray = residuals.map(new org.apache.commons.math3.analysis.function.Abs()).toArray();

    java.util.Arrays.sort(resArray);

    RealVector partialRes = new ArrayRealVector(absDev.getDimension() - smallerDim + 1, 0.0);

    for (int i = smallerDim - 1; i < resArray.length; i++) {
        partialRes.setEntry(i - smallerDim + 1, resArray[i]);
    }

    double resMAD = 0;

    if (partialRes.getDimension() % 2 == 0) {
        resMAD = LocalBackgroundEstimationFilter.quickFindKth(partialRes.getDimension() / 2, partialRes)
                + LocalBackgroundEstimationFilter.quickFindKth(partialRes.getDimension() / 2 - 1, partialRes);
        resMAD /= 2.0;
    } else {
        resMAD = LocalBackgroundEstimationFilter.quickFindKth((partialRes.getDimension() - 1) / 2, partialRes);
    }

    resMAD /= 0.6745;

    if (resMAD < stddev * CLOSE_TO_ZERO) {
        resMAD = stddev * CLOSE_TO_ZERO;
    }

    return residuals.mapDivide(DEFAULT_TUNING_CONST * resMAD);

}

From source file:com.analog.lyric.dimple.solvers.core.parameterizedMessages.MultivariateNormalParameters.java

/**
 * {@inheritDoc}//www  .  jav a 2  s.c o  m
 * <p>
 * For multivariate normal distributions, the formula is given by:
 * 
 * <blockquote>
 * &frac12; {
 * trace(&Sigma;<sub>Q</sub><sup>-1</sup>&Sigma;<sub>P</sub>) +
 * (&mu;<sub>Q</sub>-&mu;<sub>P</sub>)<sup>T</sup>&Sigma;<sub>Q</sub><sup>-1</sup>(&mu;<sub>Q</sub>-&mu;<sub>P</sub>)
 * -K - ln(det(&Sigma;<sub>P</sub>)/det(&Sigma;<sub>Q</sub>)))
 * }
 * </blockquote>
 * Note that this assumes that the determinants of the covariance matrices are non-zero.
 */
@Override
public double computeKLDivergence(IParameterizedMessage that) {
    if (that instanceof MultivariateNormalParameters) {
        // http://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback.E2.80.93Leibler_divergence
        //
        // K: size of vectors, # rows/columns of matrices
        // up, uq: vectors of means for P and Q
        // CP, CQ: covariance matrices for P and Q
        // inv(x): inverse of x
        // det(x): determinant of x
        // tr(x): trace of x
        // x': transpose of x
        //
        // KL(P|Q) == .5 * ( tr(inv(CQ) * CP) + (uq - up)' * inv(CQ) * (uq - up) - K - ln(det(CP)/det(CQ)) )
        //

        final MultivariateNormalParameters P = this, Q = (MultivariateNormalParameters) that;
        final int K = P.getVectorLength();
        assertSameSize(Q.getVectorLength());

        if (P.isDiagonal() && Q.isDiagonal()) {
            // If both are diagonal, we can simply add up the KL for the univariate cases along the diagonal.

            final double[] Pmeans = P._mean, Qmeans = Q._mean;
            final double[] Pprecisions = P._precision, Qprecisions = Q._precision;

            double kl = 0.0;

            for (int i = 0; i < K; ++i) {
                kl += NormalParameters.computeKLDiverence(Pmeans[i], Pprecisions[i], Qmeans[i], Qprecisions[i]);
            }

            return kl;
        }

        // TODO - if we ever start storing the eigendecomposition in this object, we can simply use
        // the eigenvalues to efficiently compute the trace and determinants. Perhaps it would be worthwhile
        // to save the eigenvalues if nothing else.

        RealVector mP = wrapRealVector(P.getMean());
        RealVector mQ = wrapRealVector(Q.getMean());
        RealMatrix CP = wrapRealMatrix(P.getCovariance());
        RealMatrix CQinv = wrapRealMatrix(Q.getInformationMatrix());

        RealVector mdiff = mQ.subtract(mP);

        // FIXME: do we need to worry about singular covariance matrices?

        double divergence = -K;

        // trace of product of matrices is equivalent to the dot-product of the vectorized versions
        // of the matrices - this is much faster than doing the actual matrix product
        // divergence += CQinv.multiply(CP).trace();
        for (int i = 0; i < K; ++i)
            for (int j = 0; j < K; ++j)
                divergence += CQinv.getEntry(i, j) * CP.getEntry(i, j);

        divergence += CQinv.preMultiply(mdiff).dotProduct(mdiff);
        divergence -= Math.log(
                new EigenDecomposition(CP).getDeterminant() * new EigenDecomposition(CQinv).getDeterminant());
        return Math.abs(divergence / 2); // use abs to guard against precision errors causing this to go negative.
    }

    throw new IllegalArgumentException(
            String.format("Expected '%s' but got '%s'", getClass(), that.getClass()));
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.NelderMeadMinimizer.java

/**
 * Runs the minimization of the specified function starting from an initial simplex.
 * @param f                 The ObjectiveFunction to be minimized.
 * @param initialSimplex    The initial simplex to use to start optimization, as might be returned from {@link #generateInitialSimplex}
 * @return                  The parameters at the function minimum in the same order as specified for each point on the simplex.
 *///from   w w  w. ja  va2 s . com
public RealVector optimize(ObjectiveFunction f, RealMatrix initialSimplex) {

    RealMatrix currentSimplex = initialSimplex.copy();

    double currTolVal = 1.0e6;

    RealVector values = new ArrayRealVector(initialSimplex.getRowDimension(), 0.0);

    RealVector centerOfMass = new ArrayRealVector(initialSimplex.getColumnDimension(), 0.0);

    boolean shouldEvaluate = false;

    long iterCounter = 0;

    while (Math.abs(currTolVal) > this.tol) {

        int maxIndex = 0;
        int minIndex = 0;
        double maxValue = -1.0 * Double.MAX_VALUE;
        double minValue = Double.MAX_VALUE;
        double secondMaxValue = -1.0 * Double.MAX_VALUE;

        centerOfMass.mapMultiplyToSelf(0.0);

        if (shouldEvaluate) {

            for (int i = 0; i < currentSimplex.getRowDimension(); i++) {
                RealVector currRow = currentSimplex.getRowVector(i);
                values.setEntry(i, f.evaluate(currRow));
            }

        }

        for (int i = 0; i < currentSimplex.getRowDimension(); i++) {

            double currValue = values.getEntry(i);

            if (currValue < minValue) {
                minValue = currValue;
                minIndex = i;
            }
            if (currValue > maxValue) {
                secondMaxValue = maxValue;
                maxValue = currValue;
                maxIndex = i;
            } else if (currValue > secondMaxValue) {
                secondMaxValue = currValue;
            }
        }

        for (int i = 0; i < currentSimplex.getRowDimension(); i++) {
            if (i == maxIndex)
                continue;

            centerOfMass = centerOfMass.add(currentSimplex.getRowVector(i));

        }

        centerOfMass.mapDivideToSelf(currentSimplex.getRowDimension() - 1);

        RealVector oldPoint = currentSimplex.getRowVector(maxIndex);

        RealVector newPoint = centerOfMass.subtract(oldPoint).mapMultiplyToSelf(a).add(centerOfMass); // newpoint = COM + a*(COM-oldpoint)

        double newValue = f.evaluate(newPoint);

        if (newValue < secondMaxValue) { // success

            if (newValue < minValue) { // best found so far

                //expansion

                RealVector expPoint = centerOfMass.subtract(oldPoint).mapMultiplyToSelf(g).add(centerOfMass);

                double expValue = f.evaluate(expPoint);

                if (expValue < newValue) {
                    currentSimplex.setRowVector(maxIndex, expPoint);
                    currTolVal = 2.0 * (expValue - maxValue) / (1.0e-20 + expValue + maxValue);

                    values.setEntry(maxIndex, expValue);
                    shouldEvaluate = false;
                    continue;
                }

            }

            //reflection

            currentSimplex.setRowVector(maxIndex, newPoint);
            currTolVal = 2.0 * (newValue - maxValue) / (1.0e-20 + newValue + maxValue);
            values.setEntry(maxIndex, newValue);
            shouldEvaluate = false;
            continue;

        }

        //contraction

        RealVector conPoint = centerOfMass.subtract(oldPoint).mapMultiplyToSelf(r).add(centerOfMass);
        double conValue = f.evaluate(conPoint);

        if (conValue < maxValue) {
            currentSimplex.setRowVector(maxIndex, conPoint);
            currTolVal = 2.0 * (conValue - maxValue) / (1.0e-20 + conValue + maxValue);
            values.setEntry(maxIndex, conValue);
            shouldEvaluate = false;
            continue;
        }

        //reduction

        for (int i = 0; i < currentSimplex.getRowDimension(); i++) {
            if (i == minIndex)
                continue;

            currentSimplex.setRowVector(i,
                    currentSimplex.getRowVector(i).subtract(currentSimplex.getRowVector(minIndex))
                            .mapMultiplyToSelf(s).add(currentSimplex.getRowVector(minIndex)));

        }

        double redValue = f.evaluate(currentSimplex.getRowVector(maxIndex));

        currTolVal = 2.0 * (redValue - maxValue) / (1.0e-20 + redValue + maxValue);

        shouldEvaluate = true;

        if (iterCounter++ > 100000) {
            System.out.println("stalled?  tol: " + currTolVal + "  minValue: " + minValue);
        }

    }

    double minValue = Double.MAX_VALUE;

    RealVector minVector = null;

    for (int i = 0; i < currentSimplex.getRowDimension(); i++) {
        values.setEntry(i, f.evaluate(currentSimplex.getRowVector(i)));
        if (values.getEntry(i) < minValue) {
            minValue = values.getEntry(i);
            minVector = currentSimplex.getRowVector(i);
        }
    }

    return minVector;

}

From source file:org.lenskit.mf.BPR.BPRMFModelProvider.java

@Override
public MFModel get() {
    // This will accumulate BPR-Opt (minus the regularization) and will be negated to make an error.
    // -30 is arbitrary, but would indicate a _really_ consistently bad prediction (~ p=1*10^-13),
    // and is therefore a reasonable "max_error".
    RollingWindowMeanAccumulator optAccum = new RollingWindowMeanAccumulator(10000, -30);

    // set up user index and matrix
    int userCount = dao.getEntityIds(CommonTypes.USER).size();
    for (long uid : dao.getEntityIds(CommonTypes.USER)) {
        userIndex.internId(uid);//from w w  w . j  av a  2  s . com
    }
    RealMatrix userFeatures = MatrixUtils.createRealMatrix(userCount, featureCount);
    initFeatures(userFeatures);

    // set up item index and matrix
    int itemCount = dao.getEntityIds(CommonTypes.ITEM).size();
    for (long iid : dao.getEntityIds(CommonTypes.ITEM)) {
        itemIndex.internId(iid);
    }
    RealMatrix itemFeatures = MatrixUtils.createRealMatrix(itemCount, featureCount);
    initFeatures(itemFeatures);

    logger.debug("Learning rate is {}", learningRate);
    logger.debug("Regularization term is {}", regularization);

    logger.info("Building MPR-MF with {} features for {} users and {} items", featureCount, userCount,
            itemCount);

    TrainingLoopController controller = stoppingCondition.newLoop();

    //REVIEW: because of the nature of training samples (and the point that the BPR paper makes that training
    // by-item or by-user are not optimal) one "iteration" here will be one training update. This leads to _really_
    // big iteration counts, which can actually overflow ints!. one suggestion would be to allow the iteration count
    // controller to accept longs, but I don't know if this will introduce backwards compatibility issues (I imagine
    // this depends on the robustness of our type conversion in the configuration.
    while (controller.keepTraining(-optAccum.getMean())) {
        for (TrainingItemPair pair : pairGenerator.nextBatch()) {
            // Note: bad code style variable names are generally to match BPR paper and enable easier implementation
            long iid = pair.g;
            int i = itemIndex.internId(iid);
            long jid = pair.l;
            int j = itemIndex.internId(jid);
            long uid = pair.u;
            int u = userIndex.internId(uid);

            RealVector w_u = userFeatures.getRowVector(u);
            RealVector h_i = itemFeatures.getRowVector(i);
            RealVector h_j = itemFeatures.getRowVector(j);

            double xui = w_u.dotProduct(h_i);
            double xuj = w_u.dotProduct(h_j);
            double xuij = xui - xuj;

            double bprTerm = 1 / (1 + exp(xuij));

            // w_u update
            RealVector h_i_j = h_i.subtract(h_j);
            RealVector w_u_update = w_u.mapMultiply(-regularization);
            w_u_update.combineToSelf(1, bprTerm, h_i_j);

            // h_i update
            RealVector h_i_update = h_i.mapMultiply(-regularization);
            h_i_update.combineToSelf(1, bprTerm, w_u);

            // h_j update
            RealVector h_j_update = h_j.mapMultiply(-regularization);
            h_j_update.combineToSelf(1, -bprTerm, w_u);

            // perform updates
            w_u.combineToSelf(1, learningRate, w_u_update);
            h_i.combineToSelf(1, learningRate, h_i_update);
            h_j.combineToSelf(1, learningRate, h_j_update);

            // update the optimization function accumulator (note we are not including the regularization term)
            optAccum.add(Math.log(1 / (1 + Math.exp(-xuij))));
        }
    }

    return new MFModel(userFeatures, itemFeatures, userIndex, itemIndex);
}

From source file:org.rhwlab.dispim.datasource.GaussianComponent.java

public RealMatrix precision(RealVector mu) {
    RealMatrix ret = new Array2DRowRealMatrix(mu.getDimension(), mu.getDimension());
    ret.scalarMultiply(0.0); // make sure it is zero
    int n = 0;//from   w  ww. j a  va  2s.  c om
    for (Integer index : indexes) {
        RealVector x = source.get(index).coords;
        RealVector del = x.subtract(mu);
        ret = ret.add(del.outerProduct(del));
        ++n;
    }

    ret = ret.scalarMultiply(1.0 / n);
    LUDecomposition lud = new LUDecomposition(ret);
    RealMatrix prec = lud.getSolver().getInverse();
    return prec;
}

From source file:org.rhwlab.dispim.datasource.MicroCluster.java

License:asdf

public static RealMatrix precision(List<MicroCluster> data, RealVector mu) {
    RealMatrix ret = new Array2DRowRealMatrix(mu.getDimension(), mu.getDimension());
    RealVector v = new ArrayRealVector(mu.getDimension());
    long n = 0;/*from w  w  w .j a  va2 s .  co m*/
    for (MicroCluster micro : data) {
        for (int p = 0; p < micro.points.length; ++p) {
            for (int d = 0; d < mu.getDimension(); ++d) {
                v.setEntry(d, micro.points[p][d]);
            }
            RealVector del = v.subtract(mu);
            ret = ret.add(del.outerProduct(del));
            ++n;
        }

    }
    ret = ret.scalarMultiply(1.0 / n);
    LUDecomposition lud = new LUDecomposition(ret);
    RealMatrix prec = null;
    if (lud.getSolver().isNonSingular()) {
        prec = lud.getSolver().getInverse();
    }
    return prec;
}

From source file:org.rhwlab.variationalbayesian.GaussianMixture.java

public void responsibities() {

    // calculate the r matrix        
    for (int n = 0; n < X.getN(); ++n) {
        RealVector x = X.get(n).coords;
        double intensity = X.get(n).adjustedIntensity;
        for (int k = 0; k < K; ++k) {
            if (inModel[k]) {
                RealVector diff = x.subtract(m[k]);
                double E = X.getD() / beta[k] + nu[k] * W[k].operate(diff).dotProduct(diff);
                lnRho[k] = lnPi[k] + intensity * (0.5 * lnLambdaTilde[k] - Dln2Pi2 - 0.5 * E);
            }/*from  w  ww.ja v  a2  s  . c  o  m*/
        }
        double[] rs = normalizeLogP(lnRho);
        for (int k = 0; k < K; ++k) {
            r.setEntry(n, k, rs[k]);
        }

        int sahdf = 0;
    }
}

From source file:org.rhwlab.variationalbayesian.GaussianMixture.java

public void statistics() {
    // calculate N
    calculateN();/*from w w  w  .j a  v  a2s  .  c o m*/

    // calculate xBar
    for (int k = 0; k < K; ++k) {
        if (inModel[k]) {
            xBar[k].set(0.0);
            for (int n = 0; n < X.getN(); ++n) {
                Voxel vox = X.get(n);
                double intensity = vox.getAdjusted();
                RealVector x = vox.coords;
                //                       double rnk = (intensity-background)*r.getEntry(n, k);
                double rnk = intensity * r.getEntry(n, k);
                xBar[k] = xBar[k].add(x.mapMultiply(rnk));
            }
            xBar[k].mapDivideToSelf(N[k]);
        }
    }

    // calculate the S
    for (int k = 0; k < K; ++k) {
        if (inModel[k]) {
            if (k == 31) {
                int iusahdfis = 90;
            }
            for (int row = 0; row < X.getD(); ++row) {
                for (int col = 0; col < X.getD(); ++col) {
                    S[k].setEntry(row, col, 0.0);
                }
            }
            for (int n = 0; n < X.getN(); ++n) {
                Voxel vox = X.get(n);
                RealVector x = vox.coords;
                RealVector del = x.subtract(xBar[k]);
                RealMatrix mat = del.outerProduct(del);
                double rnk = r.getEntry(n, k);
                if (rnk > 0) {
                    int iuhsafuid = 0;
                }
                //                        S[k] = S[k].add(mat.scalarMultiply((intensity-background)*rnk)); 
                S[k] = S[k].add(mat.scalarMultiply(vox.getAdjusted() * rnk));
            }
            S[k] = S[k].scalarMultiply(1.0 / N[k]);
        }
    }
    int uisahdfius = 0;
}

From source file:org.rhwlab.variationalbayesian.SuperVoxelGaussianMixture.java

public void responsibities() {

    // calculate the r matrix        
    for (int n = 0; n < X.getN(); ++n) {
        SuperVoxel sv = X.get(n);//from   www  . j a v  a  2 s.  c o  m
        for (int k = 0; k < K; ++k) {
            RealVector[] voxels = sv.getVoxels();
            double E = 0.0;
            for (RealVector vox : voxels) {
                RealVector diff = vox.subtract(m[k]);
                E = E + nu[k] * W[k].operate(diff).dotProduct(diff);
            }
            lnRho[k] = X.getD() / beta[k] + lnPi[k] + 0.5 * lnLambdaTilde[k] - Dln2Pi2 - 0.5 * E;
        }
        double[] rs = normalizeLogP(lnRho);
        for (int k = 0; k < K; ++k) {
            r.setEntry(n, k, rs[k]);
        }
    }
}