Example usage for org.apache.commons.math3.linear ArrayRealVector subtract

List of usage examples for org.apache.commons.math3.linear ArrayRealVector subtract

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear ArrayRealVector subtract.

Prototype

@Override
public ArrayRealVector subtract(RealVector v) throws DimensionMismatchException 

Source Link

Usage

From source file:fp.overlapr.algorithmen.StressMajorization.java

@Deprecated
private static ArrayRealVector conjugateGradientsMethod(Array2DRowRealMatrix A, ArrayRealVector b,
        ArrayRealVector werte) {//from  ww w.ja v a  2 s  .c  o  m

    Array2DRowRealMatrix preJacobi = new Array2DRowRealMatrix(A.getRowDimension(), A.getColumnDimension());

    // Predconditioner berechnen
    preJacobi.walkInRowOrder(new DefaultRealMatrixChangingVisitor() {
        @Override
        public double visit(int row, int column, double value) {
            if (row == column) {
                return 1 / A.getEntry(row, column);
            } else {
                return 0.0;
            }
        }
    });

    // x_k beliebig whlen
    ArrayRealVector x_k = new ArrayRealVector(werte);

    // r_k berechnen
    ArrayRealVector r_k = b.subtract(A.operate(x_k));

    // h_k berechnen
    ArrayRealVector h_k = (ArrayRealVector) preJacobi.operate(r_k);

    // d_k = r_k
    ArrayRealVector d_k = h_k;

    // x_k+1 und r_k+1 und d_k+1, sowie alpha und beta und z erzeugen
    ArrayRealVector x_k1;
    ArrayRealVector r_k1;
    ArrayRealVector d_k1;
    ArrayRealVector h_k1;
    double alpha;
    double beta;
    ArrayRealVector z;

    do {
        // Speichere Matrix-Vektor-Produkt, um es nur einmal auszurechnen
        z = (ArrayRealVector) A.operate(d_k);

        // Finde von x_k in Richtung d_k den Ort x_k1 des Minimums der
        // Funktion E
        // und aktualisere den Gradienten bzw. das Residuum
        alpha = r_k.dotProduct(h_k) / d_k.dotProduct(z);
        x_k1 = x_k.add(d_k.mapMultiply(alpha));
        r_k1 = r_k.subtract(z.mapMultiply(alpha));
        h_k1 = (ArrayRealVector) preJacobi.operate(r_k1);

        // Korrigiere die Suchrichtung d_k1
        beta = r_k1.dotProduct(h_k1) / r_k.dotProduct(h_k);
        d_k1 = h_k1.add(d_k.mapMultiply(beta));

        // Werte "eins" weitersetzen
        x_k = x_k1;
        r_k = r_k1;
        d_k = d_k1;
        h_k = h_k1;

    } while (r_k1.getNorm() > TOL);

    return x_k1;
}

From source file:edu.utexas.cs.tactex.subscriptionspredictors.LWRCustOldAppache.java

/**
  * LWR prediction/*from ww  w . j av  a  2  s .c o  m*/
  * 
  * @param X
  * @param Y
  * @param x0
  * @param tau
  * @return
  */
public Double LWRPredict(ArrayRealVector X, ArrayRealVector Y, double x0, final double tau) {
    ArrayRealVector X0 = new ArrayRealVector(X.getDimension(), x0);
    ArrayRealVector delta = X.subtract(X0);
    ArrayRealVector sqDists = delta.ebeMultiply(delta);
    UnivariateFunction expTau = new UnivariateFunction() {
        @Override
        public double value(double arg0) {
            //log.info(" cp univariate tau " + tau);
            return Math.pow(Math.E, -arg0 / (2 * tau));
        }
    };
    ArrayRealVector W = sqDists.map(expTau);
    double Xt_W_X = X.dotProduct(W.ebeMultiply(X));
    if (Xt_W_X == 0.0) {
        log.error(" cp LWR cannot predict - 0 denominator returning NULL");
        log.error("Xcv is " + X.toString());
        log.error("Ycv is " + Y.toString());
        log.error("x0 is " + x0);
        return null; // <==== NOTE: a caller must be prepared for it
    }
    double theta = (1.0 / Xt_W_X) * X.ebeMultiply(W).dotProduct(Y);

    return theta * x0;
}

From source file:hivemall.anomaly.SDAR2D.java

/**
 * @param x series of input in LIFO order
 * @param k AR window size// w w w .j  a  v a2s.  c om
 * @return x_hat predicted x
 * @link https://en.wikipedia.org/wiki/Matrix_multiplication#Outer_product
 */
@Nonnull
public RealVector update(@Nonnull final ArrayRealVector[] x, final int k) {
    Preconditions.checkArgument(x.length >= 1, "x.length MUST be greater than 1: " + x.length);
    Preconditions.checkArgument(k >= 0, "k MUST be greater than or equals to 0: ", k);
    Preconditions.checkArgument(k < _C.length,
            "k MUST be less than |C| but " + "k=" + k + ", |C|=" + _C.length);

    final ArrayRealVector x_t = x[0];
    final int dims = x_t.getDimension();

    if (_initialized == false) {
        this._mu = x_t.copy();
        this._sigma = new BlockRealMatrix(dims, dims);
        assert (_sigma.isSquare());
        this._initialized = true;
        return new ArrayRealVector(dims);
    }
    Preconditions.checkArgument(k >= 1, "k MUST be greater than 0: ", k);

    // old parameters are accessible to compute the Hellinger distance
    this._muOld = _mu.copy();
    this._sigmaOld = _sigma.copy();

    // update mean vector
    // \hat{mu} := (1-r) \hat{} + r x_t
    this._mu = _mu.mapMultiply(1.d - _r).add(x_t.mapMultiply(_r));

    // compute residuals (x - \hat{})
    final RealVector[] xResidual = new RealVector[k + 1];
    for (int j = 0; j <= k; j++) {
        xResidual[j] = x[j].subtract(_mu);
    }

    // update covariance matrices
    // C_j := (1-r) C_j + r (x_t - \hat{}) (x_{t-j} - \hat{})'
    final RealMatrix[] C = this._C;
    final RealVector rxResidual0 = xResidual[0].mapMultiply(_r); // r (x_t - \hat{})
    for (int j = 0; j <= k; j++) {
        RealMatrix Cj = C[j];
        if (Cj == null) {
            C[j] = rxResidual0.outerProduct(x[j].subtract(_mu));
        } else {
            C[j] = Cj.scalarMultiply(1.d - _r).add(rxResidual0.outerProduct(x[j].subtract(_mu)));
        }
    }

    // solve A in the following Yule-Walker equation
    // C_j = _{i=1}^{k} A_i C_{j-i} where j = 1..k, C_{-i} = C_i'
    /*
     * /C_1\     /A_1\  /C_0     |C_1'    |C_2'    | .  .  .   |C_{k-1}' \
     * |---|     |---|  |--------+--------+--------+           +---------|
     * |C_2|     |A_2|  |C_1     |C_0     |C_1'    |               .     |
     * |---|     |---|  |--------+--------+--------+               .     |
     * |C_3|  =  |A_3|  |C_2     |C_1     |C_0     |               .     |
     * | . |     | . |  |--------+--------+--------+                     |
     * | . |     | . |  |   .                            .               |
     * | . |     | . |  |   .                            .               |
     * |---|     |---|  |--------+                              +--------|
     * \C_k/     \A_k/  \C_{k-1} | .  .  .                      |C_0     /
     */
    RealMatrix[][] rhs = MatrixUtils.toeplitz(C, k);
    RealMatrix[] lhs = Arrays.copyOfRange(C, 1, k + 1);
    RealMatrix R = MatrixUtils.combinedMatrices(rhs, dims);
    RealMatrix L = MatrixUtils.combinedMatrices(lhs);
    RealMatrix A = MatrixUtils.solve(L, R, false);

    // estimate x
    // \hat{x} = \hat{} + _{i=1}^k A_i (x_{t-i} - \hat{})
    RealVector x_hat = _mu.copy();
    for (int i = 0; i < k; i++) {
        int offset = i * dims;
        RealMatrix Ai = A.getSubMatrix(offset, offset + dims - 1, 0, dims - 1);
        x_hat = x_hat.add(Ai.operate(xResidual[i + 1]));
    }

    // update model covariance
    //  := (1-r)  + r (x - \hat{x}) (x - \hat{x})'
    RealVector xEstimateResidual = x_t.subtract(x_hat);
    this._sigma = _sigma.scalarMultiply(1.d - _r)
            .add(xEstimateResidual.mapMultiply(_r).outerProduct(xEstimateResidual));

    return x_hat;
}

From source file:gamlss.algorithm.AdditiveFit.java

/**
 * Performs a smoothing process - creates an approximating function
 * that attempts to capture important patterns in the data, while 
 * leaving out noise or other fine-scale structures/rapid phenomena.
 * @param xMat - design matrix//from   ww w . j a v  a 2s . c  o m
 * @param y - response variable
 * @param sMat - smoother matrix
 * @param distParameter - distribution parameter
 * @return matrix of smoothed values
 */
public BlockRealMatrix fitSmoother(final ArrayRealVector y, final ArrayRealVector weights,
        final BlockRealMatrix xMat, final BlockRealMatrix sMat, final int distParameter) {

    this.s = sMat.copy();
    this.w = weights;
    this.whichDistParameter = distParameter;

    //residuals <- as.vector(y - s %*% rep(1, ncol(s)))   
    tempV = new ArrayRealVector(s.getColumnDimension());
    tempV.set(1.0);
    tempV = (ArrayRealVector) s.operate(tempV);
    residuals = MatrixFunctions.vecSub(y, tempV);
    tempV = null;

    //fit <- list(fitted.values = 0)
    fittedValues = new ArrayRealVector(residuals.getDimension());

    //rss <- weighted.mean(residuals^2, w)
    //rss = calculateRss(residuals, w);
    //tempArr = null;

    //rssold <- rss * 10
    //rssOld = rss*10;

    //nit <- 0
    nit = 0;

    //df <- rep(NA, length(who))
    //lambda <- rep(NA, length(who))
    //double[] df = new double[s.getColumnDimension()]; 

    //var <- s
    var = s.copy();

    //if(trace) cat("\nADDITIVE   iter   rss/n     term\n")
    if (Controls.BACKFIT_TRACE) {
        System.out.println("ADDITIVE   iter   rss/n     term");
    }

    //ndig <- -log10(tol) + 1
    //double ndig = -FastMath.log10(tol)+1;

    //RATIO <- tol + 1
    ratio = Controls.BACKFIT_TOL + 1;

    //while(RATIO > tol & nit < maxit) 
    while (ratio > Controls.BACKFIT_TOL & nit < Controls.BACKFIT_CYCLES) {

        //rssold <- rss
        //rssOld = rss;

        //nit <- nit + 1
        nit++;

        //z <- residuals + fit$fitted.values         
        z = residuals.add(fittedValues);

        //org.apache.commons.lang3.time.StopWatch watch = new org.apache.commons.lang3.time.StopWatch();
        //watch.start();         

        //fit <- lm.wfit(x, z, w, method="qr", ...)
        wls.setNoIntercept(Controls.NO_INTERCEPT[whichDistParameter - 1]);
        wls.newSampleData(z, xMat.copy(), w.copy());

        wls.setNoIntercept(false);

        //residuals <- fit$residuals
        fittedValues = (ArrayRealVector) wls.calculateFittedValues(Controls.IS_SVD);
        //watch.stop();
        //System.out.println(watch.getNanoTime()/(1e+9));
        //watch.reset();         

        //residuals = z.subtract(fittedValues);
        //residuals = (ArrayRealVector) wls.calculateResiduals();
        residuals = z.subtract(fittedValues);

        //rss <- weighted.mean(residuals^2, w)
        //rss = calculateRss(residuals, w);

        //if(trace) cat(nit, "   ", 
        //format(round(rss, ndig)), "  Parametric -- lm.wfit\n", sep = "")
        if (Controls.BACKFIT_TRACE) {
            //System.out.println(" " + nit + "  " + 
            //rss + " Parametric -- lm.wfit");
        }

        //deltaf <- 0
        deltaf = 0;

        //for(j in seq(names.calls)) 
        for (colNum = 0; colNum < s.getColumnDimension(); colNum++) {

            //old <- s[, j]
            old = (ArrayRealVector) s.getColumnVector(colNum);

            //z <- residuals + s[, j]
            z = residuals.add(old);

            //fit.call <- eval(smooth.calls[[j]])
            //residuals <- as.double(fit.call$residuals)      
            residuals = smoother.solve(this);

            //if(length(residuals) != n)
            //stop(paste(names.calls[j], "returns a vector 
            //of the wrong length"))
            if (residuals.getDimension() != y.getDimension()) {
                System.err.println(colNum + "  column of matrix s has a" + " vector of the wrong length");
            }

            //s[, j] <- z - residual
            s.setColumnVector(colNum, z.subtract(residuals));

            //if (length(fit.call$lambda)>1)
            //{for cases where there are multiple lambdas 
            //ambda[j] <- fit.call$lambda[1] 

            //coefSmo[[j]] <- if(is.null(fit.call$coefSmo))
            //0 else fit.call$coefSmo

            //deltaf <- deltaf + weighted.mean((s[, j] - old)^2, w)
            tempV = MatrixFunctions.vecSub((ArrayRealVector) s.getColumnVector(colNum), old);
            deltaf = deltaf + mean.evaluate(tempV.ebeMultiply(tempV).getDataRef(), w.getDataRef());
            tempV = null;

            //rss <- weighted.mean(residuals^2, w)
            //rss = calculateRss(residuals, w);

            // if(trace)
            if (Controls.BACKFIT_TRACE) {
                //cat(" ", nit, " ", format(round(rss, ndig)), 
                //"  Nonparametric -- ", names.calls[j], "\n", sep = "")
                //System.out.println("   " + nit +"   " + rss + "
                //Nonparametric " + "pb(column "+ i+")");
            }

            //df[j] <- fit.call$nl.df
            //df[i] = pb.getEdf();

            //if(se)
            if (Controls.IS_SE) {
                //var[, j] <- fit.call$var
                var.setColumnVector(colNum, smoother.getVar());
            }
        }

        //RATIO <- sqrt(deltaf/sum(w * apply(s, 1, sum)^2))   
        tempD = 0.0;
        tempM = new BlockRealMatrix(s.getRowDimension(), 1);
        for (int j = 0; j < s.getRowDimension(); j++) {
            for (int k = 0; k < s.getColumnDimension(); k++) {
                tempD = tempD + s.getEntry(j, k);
            }
            tempM.setEntry(j, 0, tempD);
            tempD = 0.0;
        }
        size = tempM.getRowDimension();
        for (int j = 0; j < size; j++) {
            tempD = tempD + tempM.getEntry(j, 0) * tempM.getEntry(j, 0) * w.getEntry(j);
        }
        ratio = FastMath.sqrt(deltaf / tempD);
        tempD = null;
        tempM = null;

        //if(trace)
        //cat("Relative change in functions:", 
        //format(round(RATIO, ndig)), "\n")
        if (Controls.BACKFIT_TRACE) {
            System.out.println("Relative change in functions:  " + ratio);
        }
    }

    //if(nit == maxit && maxit > 1)
    //warning(paste("additive.fit convergence not 
    //obtained in ", maxit, " iterations"))
    if (ratio > Controls.BACKFIT_TOL) {
        System.out.println(
                "AdditiveFit convergence is not obtained in " + Controls.BACKFIT_CYCLES + " iterations");
    }

    //fit$fitted.values <- y - residuals
    fittedValues = y.subtract(residuals);

    //rl <- c(fit, list(smooth = s, nl.df = 
    //sum(df), lambda=lambda, coefSmo=coefSmo))
    return s;
}