Example usage for org.apache.commons.math3.linear RealVector combineToSelf

List of usage examples for org.apache.commons.math3.linear RealVector combineToSelf

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector combineToSelf.

Prototype

public RealVector combineToSelf(double a, double b, RealVector y) throws DimensionMismatchException 

Source Link

Document

Updates this with the linear combination of this and y .

Usage

From source file:edu.byu.nlp.math.RealVectors.java

/**
 * Adds vector a to b and stores the results in a.
 * //  w ww  . j a  v a2 s  . c o m
 * @throws NullPointerException if a or b are null 
 */
public static void addToSelf(RealVector a, RealVector b) {
    Preconditions.checkNotNull(a);
    Preconditions.checkNotNull(b);
    if (a.getDimension() != b.getDimension()) {
        throw new DimensionMismatchException(b.getDimension(), a.getDimension());
    }

    a.combineToSelf(1.0, 1.0, b);
}

From source file:edu.oregonstate.eecs.mcplan.ml.KMeans.java

private RealVector centerOfMass(final int c) {
    final RealVector com = new ArrayRealVector(m_, 0);
    int nelements = 0;
    for (int i = 0; i < n_; ++i) {
        if (c_[i] == c) {
            nelements += 1;/*  w  ww. j a  v a2 s  .  c o m*/
            com.combineToSelf(1.0, 1.0, data_[i]); // Add in-place
        }
    }
    assert (nelements > 0);
    com.mapDivideToSelf(nelements);
    return com;
}

From source file:edu.oregonstate.eecs.mcplan.ml.GaussianMixtureModel.java

private void init() {
    final int step = Math.max(1, n_ / k_);
    final double unif = 1.0 / k_;
    double acc = 0.0;
    final RandomPermutationIterator<double[]> r = new RandomPermutationIterator<double[]>(data_, rng_);
    final RandomPermutationIterator<double[]> rrepeat = new RandomPermutationIterator<double[]>(data_,
            r.permutation());/*from   ww  w . j  ava2  s .c o m*/

    for (int i = 0; i < k_; ++i) {
        final RealVector mu = new ArrayRealVector(d_);
        for (int j = 0; j < step; ++j) {
            final double[] x = r.next();
            final RealVector v = new ArrayRealVector(x);
            mu.combineToSelf(1.0, 1.0, v);
        }
        final double Zinv = 1.0 / step;
        mu.mapMultiplyToSelf(Zinv);

        RealMatrix Sigma = new Array2DRowRealMatrix(d_, d_);
        for (int j = 0; j < step; ++j) {
            final double[] x = rrepeat.next();
            final RealVector v = new ArrayRealVector(x);
            v.combineToSelf(1.0, -1.0, mu);
            Sigma = Sigma.add(v.outerProduct(v));
        }
        Sigma = Sigma.scalarMultiply(Zinv);
        pi_[i] = unif;
        acc += unif;
        mu_[i] = mu;
        Sigma_[i] = Sigma; //MatrixUtils.createRealIdentityMatrix( d_ );
    }
    pi_[k_ - 1] += (1.0 - acc); // Round-off error
}

From source file:edu.oregonstate.eecs.mcplan.ml.GaussianMixtureModel.java

@Override
public void run() {
    init();/*from  www. j a  v a 2  s .  c  o m*/
    System.out.println("Init");
    for (int i = 0; i < mu().length; ++i) {
        System.out.println("Mu " + i + ": " + mu()[i]);
        System.out.println("Sigma " + i + ": " + Sigma()[i]);
    }

    int iterations = 0;
    while (!converged_ && iterations++ < max_iterations_) {
        // Expectation
        makeMixture();
        for (int i = 0; i < n_; ++i) {
            for (int j = 0; j < k_; ++j) {
                c_[i][j] = posterior(data_[i], j);
            }
            Fn.normalize_inplace(c_[i]);
        }

        // Maximization
        for (int j = 0; j < k_; ++j) {
            double Z = 0.0;
            final RealVector mu_j = new ArrayRealVector(d_);
            RealMatrix Sigma_j = new Array2DRowRealMatrix(d_, d_);
            for (int i = 0; i < n_; ++i) {
                final double c_ij = c_[i][j];
                Z += c_ij;
                final RealVector x_i = new ArrayRealVector(data_[i]);
                // mu_j += c_ij * x_i
                mu_j.combineToSelf(1.0, 1.0, x_i.mapMultiply(c_ij));
                final RealVector v = x_i.subtract(mu_[j]);
                // Sigma_j += c_ij * |v><v|
                Sigma_j = Sigma_j.add(v.outerProduct(v).scalarMultiply(c_ij));
            }
            final double Zinv = 1.0 / Z;
            final double pi_j = Z / n_;
            mu_j.mapMultiplyToSelf(Zinv);
            Sigma_j = Sigma_j.scalarMultiply(Zinv);
            //            converged &= hasConverged( j, pi_j, mu_j, Sigma_j );
            pi_[j] = pi_j;
            mu_[j] = mu_j;
            Sigma_[j] = Sigma_j;
        }
        //         debug();

        final double log_likelihood = logLikelihood();
        if (Math.abs(log_likelihood - old_log_likelihood_) < epsilon_) {
            converged_ = true;
        }
        old_log_likelihood_ = log_likelihood;
    }
}

From source file:org.apache.predictionio.examples.java.recommendations.tutorial4.FeatureBasedAlgorithm.java

public FeatureBasedModel train(PreparedData data) {
    Map<Integer, RealVector> userFeatures = new HashMap<Integer, RealVector>();
    Map<Integer, Integer> userActions = new HashMap<Integer, Integer>();

    for (Integer uid : data.userInfo.keySet()) {
        userFeatures.put(uid, new ArrayRealVector(data.featureCount));
        userActions.put(uid, 0);//from  w  ww.  j av  a  2s  .c  o  m
    }

    for (TrainingData.Rating rating : data.ratings) {
        final int uid = rating.uid;
        final int iid = rating.iid;
        final double rate = rating.rating;

        // Skip features outside the range.
        if (!(params.min <= rate && rate <= params.max))
            continue;

        final double actualRate = (rate - params.drift) * params.scale;
        final RealVector userFeature = userFeatures.get(uid);
        final RealVector itemFeature = data.itemFeatures.get(iid);
        userFeature.combineToSelf(1, actualRate, itemFeature);

        userActions.put(uid, userActions.get(uid) + 1);
    }

    // Normalize userFeatures by l-inf-norm
    for (Integer uid : userFeatures.keySet()) {
        final RealVector feature = userFeatures.get(uid);
        feature.mapDivideToSelf(feature.getLInfNorm());
    }

    // Normalize itemFeatures by weight
    Map<Integer, RealVector> itemFeatures = new HashMap<Integer, RealVector>();
    for (Integer iid : data.itemFeatures.keySet()) {
        final RealVector feature = data.itemFeatures.get(iid);
        final RealVector normalizedFeature = feature.mapDivide(feature.getL1Norm());
        itemFeatures.put(iid, normalizedFeature);
    }

    return new FeatureBasedModel(userFeatures, userActions, itemFeatures);
}

From source file:org.grouplens.samantha.modeler.featurizer.SVDFeatureFactorExtractor.java

public Map<String, List<Feature>> extract(JsonNode entity, boolean update, IndexSpace indexSpace) {
    int dim = model.getVectorVarDimensionByName(SVDFeatureKey.FACTORS.get());
    Map<String, List<Feature>> svdFeaMap = model.getFeatureMap(entity, false);
    Map<String, List<Feature>> feaMap = new HashMap<>();
    for (Map.Entry<String, List<String>> entry : fea2svdfeas.entrySet()) {
        RealVector vector = MatrixUtils.createRealVector(new double[dim]);
        List<String> svdfeas = entry.getValue();
        boolean hit = false;
        for (String svdfea : svdfeas) {
            if (svdFeaMap.containsKey(svdfea)) {
                List<Feature> features = svdFeaMap.get(svdfea);
                for (Feature feature : features) {
                    vector.combineToSelf(1.0, feature.getValue(),
                            model.getVectorVarByNameIndex(SVDFeatureKey.FACTORS.get(), feature.getIndex()));
                }//  w  w  w . j  a va2  s .  co  m
                hit = true;
            }
        }
        if (hit == false && sparse) {
            continue;
        }
        String feaName = entry.getKey();
        List<Feature> features = new ArrayList<>();
        for (int i = 0; i < dim; i++) {
            String key = FeatureExtractorUtilities.composeKey(feaName, Integer.valueOf(i).toString());
            FeatureExtractorUtilities.getOrSetIndexSpaceToFeaturize(features, update, indexSpace, indexName,
                    key, vector.getEntry(i));
        }
        feaMap.put(feaName, features);
    }
    return feaMap;
}

From source file:org.grouplens.samantha.modeler.solver.L2Regularizer.java

public RealVector addGradient(RealVector grad, RealVector var, double l2coef) {
    return grad.combineToSelf(1.0, 2 * l2coef, var);
}

From source file:org.grouplens.samantha.modeler.svdfeature.SVDFeature.java

private double predict(SVDFeatureInstance ins, StochasticOracle outOrc, RealVector outUfactSum,
        RealVector outIfactSum) {/*  ww w .jav a  2s .c  o m*/
    double pred = 0.0;
    for (int i = 0; i < ins.gfeas.size(); i++) {
        int ind = ins.gfeas.get(i).getIndex();
        double val = ins.gfeas.get(i).getValue();
        if (outOrc != null) {
            outOrc.addScalarOracle(SVDFeatureKey.BIASES.get(), ind, val);
        }
        pred += getScalarVarByNameIndex(SVDFeatureKey.BIASES.get(), ind) * val;
    }

    outUfactSum.set(0.0);
    for (int i = 0; i < ins.ufeas.size(); i++) {
        int index = ins.ufeas.get(i).getIndex();
        outUfactSum.combineToSelf(1.0, ins.ufeas.get(i).getValue(),
                getVectorVarByNameIndex(SVDFeatureKey.FACTORS.get(), index));
    }

    outIfactSum.set(0.0);
    for (int i = 0; i < ins.ifeas.size(); i++) {
        int index = ins.ifeas.get(i).getIndex();
        outIfactSum.combineToSelf(1.0, ins.ifeas.get(i).getValue(),
                getVectorVarByNameIndex(SVDFeatureKey.FACTORS.get(), index));
    }

    pred += outUfactSum.dotProduct(outIfactSum);
    return pred;
}

From source file:org.lenskit.mf.BPR.BPRMFModelProvider.java

@Override
public MFModel get() {
    // This will accumulate BPR-Opt (minus the regularization) and will be negated to make an error.
    // -30 is arbitrary, but would indicate a _really_ consistently bad prediction (~ p=1*10^-13),
    // and is therefore a reasonable "max_error".
    RollingWindowMeanAccumulator optAccum = new RollingWindowMeanAccumulator(10000, -30);

    // set up user index and matrix
    int userCount = dao.getEntityIds(CommonTypes.USER).size();
    for (long uid : dao.getEntityIds(CommonTypes.USER)) {
        userIndex.internId(uid);//from   w w w. j ava 2s  .co  m
    }
    RealMatrix userFeatures = MatrixUtils.createRealMatrix(userCount, featureCount);
    initFeatures(userFeatures);

    // set up item index and matrix
    int itemCount = dao.getEntityIds(CommonTypes.ITEM).size();
    for (long iid : dao.getEntityIds(CommonTypes.ITEM)) {
        itemIndex.internId(iid);
    }
    RealMatrix itemFeatures = MatrixUtils.createRealMatrix(itemCount, featureCount);
    initFeatures(itemFeatures);

    logger.debug("Learning rate is {}", learningRate);
    logger.debug("Regularization term is {}", regularization);

    logger.info("Building MPR-MF with {} features for {} users and {} items", featureCount, userCount,
            itemCount);

    TrainingLoopController controller = stoppingCondition.newLoop();

    //REVIEW: because of the nature of training samples (and the point that the BPR paper makes that training
    // by-item or by-user are not optimal) one "iteration" here will be one training update. This leads to _really_
    // big iteration counts, which can actually overflow ints!. one suggestion would be to allow the iteration count
    // controller to accept longs, but I don't know if this will introduce backwards compatibility issues (I imagine
    // this depends on the robustness of our type conversion in the configuration.
    while (controller.keepTraining(-optAccum.getMean())) {
        for (TrainingItemPair pair : pairGenerator.nextBatch()) {
            // Note: bad code style variable names are generally to match BPR paper and enable easier implementation
            long iid = pair.g;
            int i = itemIndex.internId(iid);
            long jid = pair.l;
            int j = itemIndex.internId(jid);
            long uid = pair.u;
            int u = userIndex.internId(uid);

            RealVector w_u = userFeatures.getRowVector(u);
            RealVector h_i = itemFeatures.getRowVector(i);
            RealVector h_j = itemFeatures.getRowVector(j);

            double xui = w_u.dotProduct(h_i);
            double xuj = w_u.dotProduct(h_j);
            double xuij = xui - xuj;

            double bprTerm = 1 / (1 + exp(xuij));

            // w_u update
            RealVector h_i_j = h_i.subtract(h_j);
            RealVector w_u_update = w_u.mapMultiply(-regularization);
            w_u_update.combineToSelf(1, bprTerm, h_i_j);

            // h_i update
            RealVector h_i_update = h_i.mapMultiply(-regularization);
            h_i_update.combineToSelf(1, bprTerm, w_u);

            // h_j update
            RealVector h_j_update = h_j.mapMultiply(-regularization);
            h_j_update.combineToSelf(1, -bprTerm, w_u);

            // perform updates
            w_u.combineToSelf(1, learningRate, w_u_update);
            h_i.combineToSelf(1, learningRate, h_i_update);
            h_j.combineToSelf(1, learningRate, h_j_update);

            // update the optimization function accumulator (note we are not including the regularization term)
            optAccum.add(Math.log(1 / (1 + Math.exp(-xuij))));
        }
    }

    return new MFModel(userFeatures, itemFeatures, userIndex, itemIndex);
}