Example usage for org.apache.commons.math3.linear RealVector dotProduct

List of usage examples for org.apache.commons.math3.linear RealVector dotProduct

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector dotProduct.

Prototype

public double dotProduct(RealVector v) throws DimensionMismatchException 

Source Link

Document

Compute the dot product of this vector with v .

Usage

From source file:com.joptimizer.optimizers.LPPrimalDualMethodNetlibTest.java

private void checkSolution(LPNetlibProblem problem, LPOptimizationRequest or, LPOptimizationResponse response)
        throws Exception {

    double expectedvalue = problem.optimalValue;
    log.debug("expectedvalue : " + expectedvalue);
    double[] sol = response.getSolution();
    RealVector cVector = new ArrayRealVector(or.getC().toArray());
    RealVector solVector = new ArrayRealVector(sol);
    double value = cVector.dotProduct(solVector);
    log.debug("sol   : " + ArrayUtils.toString(sol));
    log.debug("value : " + value);

    //check constraints
    assertEquals(or.getLb().size(), sol.length);
    assertEquals(or.getUb().size(), sol.length);
    RealVector x = MatrixUtils.createRealVector(sol);

    //x >= lb//from w  w  w. j  a v  a  2 s  .  c o  m
    double maxLbmx = -Double.MAX_VALUE;
    for (int i = 0; i < or.getLb().size(); i++) {
        double di = Double.isNaN(or.getLb().getQuick(i)) ? -Double.MAX_VALUE : or.getLb().getQuick(i);
        maxLbmx = Math.max(maxLbmx, di - x.getEntry(i));
        assertTrue(di <= x.getEntry(i) + or.getTolerance());
    }
    log.debug("max(lb - x): " + maxLbmx);

    //x <= ub
    double maxXmub = -Double.MAX_VALUE;
    for (int i = 0; i < or.getUb().size(); i++) {
        double di = Double.isNaN(or.getUb().getQuick(i)) ? Double.MAX_VALUE : or.getUb().getQuick(i);
        maxXmub = Math.max(maxXmub, x.getEntry(i) - di);
        assertTrue(di + or.getTolerance() >= x.getEntry(i));
    }
    log.debug("max(x - ub): " + maxXmub);

    //G.x <h
    if (or.getG() != null && or.getG().rows() > 0) {
        RealMatrix GMatrix = MatrixUtils.createRealMatrix(or.getG().toArray());
        RealVector hvector = MatrixUtils.createRealVector(or.getH().toArray());
        RealVector Gxh = GMatrix.operate(x).subtract(hvector);
        double maxGxh = -Double.MAX_VALUE;
        for (int i = 0; i < Gxh.getDimension(); i++) {
            maxGxh = Math.max(maxGxh, Gxh.getEntry(i));
            assertTrue(Gxh.getEntry(i) - or.getTolerance() <= 0);
        }
        log.debug("max(G.x - h): " + maxGxh);
    }

    //A.x = b
    if (or.getA() != null && or.getA().rows() > 0) {
        RealMatrix AMatrix = MatrixUtils.createRealMatrix(or.getA().toArray());
        RealVector bvector = MatrixUtils.createRealVector(or.getB().toArray());
        RealVector Axb = AMatrix.operate(x).subtract(bvector);
        double norm = Axb.getNorm();
        log.debug("||A.x -b||: " + norm);
        assertEquals(0., norm, or.getToleranceFeas());
    }

    double percDelta = Math.abs((expectedvalue - value) / expectedvalue);
    log.debug("percDelta: " + percDelta);
    //assertEquals(0., percDelta, or.getTolerance());
    //assertEquals(expectedvalue, value, or.getTolerance());
    assertTrue(value < expectedvalue + or.getTolerance());//can even beat other optimizers! the rebel yell...
}

From source file:com.github.thorbenlindhauer.factor.CanonicalGaussianFactor.java

/**
 * Transforms a conditional linear gaussian (i.e. a Gaussian of the form N(x; a + B^T * Y, C) into canonical form.
 *
 * @param meanVector a/* ww  w  .ja  va 2  s. c  om*/
 * @param weightMatrix B
 * @param covarianceMatrix C
 */
public static CanonicalGaussianFactor fromConditionalForm(Scope scope, Scope conditioningScope,
        RealVector meanVector, RealMatrix covarianceMatrix, RealMatrix weightMatrix) {

    // TODO: perform cardinality checks etc.

    // the following assumes that the resulting precision matrix (and mean vector) can be restructured as follows:
    // ( SUBMATRIX_XX SUBMATRIX_XY )
    // ( SUBMATRIX_YX SUBMATRIX_YY )
    // where X indicates variables that are part of the prediction scope and Y are variables being part of the conditioning scope

    // assuming
    //   meanVector: a; |x| vector
    //   covarianceMatrix: C; |x| cross |x| matrix
    //   weightMatrix: B;  |x| cross |y| matrix

    // XX = C^-1
    // XY = -C^-1 * B
    // YX = -B^T * C^-1
    // YY = B^T * C^-1 * B^T

    MathUtil mathUtil = new MathUtil(covarianceMatrix);

    // C^(-1)
    RealMatrix xxMatrix = null;

    xxMatrix = mathUtil.invert();

    //    if (!mathUtil.isZeroMatrix()) {
    //      xxMatrix = mathUtil.invert();
    //    } else {
    //
    //      // this is a special case for convolution in which the "summing" variable has no variance itself
    //      // although a 0 variance is not valid in general
    //      xxMatrix = covarianceMatrix;
    //    }

    // B^T * C^(-1)
    RealMatrix weightedXXMatrix = weightMatrix.transpose().multiply(xxMatrix);

    // -B^T * C^(-1)
    RealMatrix yxMatrix = weightedXXMatrix.scalarMultiply(-1.0d);

    // -C^(-1)^T * B
    RealMatrix xyMatrix = xxMatrix.transpose().multiply(weightMatrix).scalarMultiply(-1.0d);

    // B^T * C^(-1) * B
    RealMatrix yyMatrix = weightedXXMatrix.multiply(weightMatrix);

    // K
    RealMatrix conditionedPrecisionMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    // Matrix to generate h
    RealMatrix conditionedMeanTransformationMatrix = new Array2DRowRealMatrix(scope.size(), scope.size());

    Scope predictionScope = scope.reduceBy(conditioningScope);
    int[] predictionMapping = scope.createContinuousVariableMapping(predictionScope);
    int[] conditioningMapping = scope.createContinuousVariableMapping(conditioningScope);

    for (int i = 0; i < scope.size(); i++) {
        RealVector precisionColumn = conditionedPrecisionMatrix.getColumnVector(i);

        if (predictionMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(
                    padVector(xxMatrix.getColumnVector(predictionMapping[i]), scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yxMatrix.getColumnVector(predictionMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }

        if (conditioningMapping[i] >= 0) {
            precisionColumn = precisionColumn.add(padVector(xyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), predictionMapping));

            conditionedMeanTransformationMatrix.setColumnVector(i, precisionColumn);

            precisionColumn = precisionColumn.add(padVector(yyMatrix.getColumnVector(conditioningMapping[i]),
                    scope.size(), conditioningMapping));

            conditionedPrecisionMatrix.setColumnVector(i, precisionColumn);
        }
    }

    // h = (a, 0)^T * (XX, XY; 0, 0)
    RealMatrix scaledMeanMatrix = new Array2DRowRealMatrix(1, scope.size());
    scaledMeanMatrix.setRowVector(0, padVector(meanVector, scope.size(), predictionMapping));

    scaledMeanMatrix = scaledMeanMatrix.multiply(conditionedMeanTransformationMatrix);
    RealVector scaledMeanVector = scaledMeanMatrix.getRowVector(0);

    // g = a^T * C^-1 * a - log((2 * PI) ^ m/2 * det(C)^0.5) where m is the size of the prediction scope
    RealMatrix meanMatrix = new Array2DRowRealMatrix(predictionScope.size(), 1);
    meanMatrix.setColumnVector(0, meanVector);
    double normalizationConstant = -0.5d * meanVector.dotProduct(xxMatrix.operate(meanVector)) - Math.log(
            Math.pow(2 * Math.PI, (double) predictionScope.size() / 2.0d) * Math.sqrt(mathUtil.determinant()));

    return new CanonicalGaussianFactor(scope, conditionedPrecisionMatrix, scaledMeanVector,
            normalizationConstant);

}

From source file:edu.utexas.cs.tactex.utilityestimation.UtilityEstimatorDefaultForConsumption.java

/**
 * Core method for estimating utility/*  ww  w  .j  a  v a2  s  .com*/
 */
public double estimateUtility(HashMap<TariffSpecification, HashMap<CustomerInfo, Integer>> tariffSubscriptions,
        HashMap<TariffSpecification, HashMap<CustomerInfo, Double>> predictedCustomerSubscriptions,
        HashMap<CustomerInfo, HashMap<TariffSpecification, Double>> customer2estimatedTariffCharges,
        HashMap<CustomerInfo, HashMap<TariffSpecification, ShiftedEnergyData>> customerTariff2ShiftedEnergy,
        HashMap<CustomerInfo, ArrayRealVector> customer2NonShiftedEnergy,
        MarketPredictionManager marketPredictionManager, CostCurvesPredictor costCurvesPredictor,
        int currentTimeslot/*, HashMap<CustomerInfo,ArrayRealVector> customer2estimatedEnergy*/) { // <= for print purposes

    log.debug(
            "estimateUtility(): currently assuming competing tariffs are not new and that my subscriptions are not going to change as a result of them");

    // accumulate the final results    
    //int expectedRecordLength = 7 * 24; // TODO use Timeservice here correctly
    double estTariffIncome = 0;
    double estTariffCosts = 0;
    double wholesaleCosts = 0;
    double balancingCosts = 0;
    double distributionCosts = 0;
    double withdrawCosts = 0;
    //double totalConsumption = 0; 
    //double totalProduction = 0; 
    final int predictionRecordLength = BrokerUtils.extractPredictionRecordLength(customerTariff2ShiftedEnergy);
    RealVector predictedEnergyRecord = new ArrayRealVector(predictionRecordLength);
    RealVector totalConsumptionEnergyRecord = new ArrayRealVector(predictionRecordLength);
    RealVector totalProductionEnergyRecord = new ArrayRealVector(predictionRecordLength);
    // for each customer get his usage prediction
    for (Entry<TariffSpecification, HashMap<CustomerInfo, Double>> entry : predictedCustomerSubscriptions
            .entrySet()) {

        TariffSpecification spec = entry.getKey();
        HashMap<CustomerInfo, Double> subscriptions = entry.getValue();

        for (Entry<CustomerInfo, Double> ce : subscriptions.entrySet()) {

            CustomerInfo customerInfo = ce.getKey();
            Double subscribedPopulation = ce.getValue();

            // Predicted total tariff cash flow. Sign is inverted since
            // evaluatedTariffs was computed from customers' perspective
            if (spec.getPowerType().isConsumption()) {
                estTariffIncome += -customer2estimatedTariffCharges.get(customerInfo).get(spec)
                        * subscribedPopulation;
            } else if (spec.getPowerType().isProduction()) {
                estTariffCosts += -customer2estimatedTariffCharges.get(customerInfo).get(spec)
                        * subscribedPopulation;
            } else {
                log.warn("Ignoring unknown powertype when computing tariffs income/costs: "
                        + spec.getPowerType());
            }

            // Predicted total energy
            RealVector energyPrediction = customerTariff2ShiftedEnergy.get(customerInfo).get(spec)
                    .getShiftedEnergy().mapMultiply(subscribedPopulation);
            predictedEnergyRecord = predictedEnergyRecord.add(energyPrediction);

            // Predicted balancing cost 
            balancingCosts += 0;
            log.debug("Ignoring balancing costs - assuming they are 0");

            // Predicted withdraw costs (currently assuming everyone will pay).
            // sign is inverted since withdraw payment is from customer's perspective
            HashMap<CustomerInfo, Integer> cust2subs = tariffSubscriptions.get(spec);
            if (cust2subs != null) {
                Integer currentSubs = cust2subs.get(customerInfo);
                if (currentSubs != null && subscribedPopulation < currentSubs) {
                    double withdraws = currentSubs - subscribedPopulation;
                    withdrawCosts += -(withdraws * spec.getEarlyWithdrawPayment());
                }
            }

            // Predicted total consumption and total production
            if (spec.getPowerType().isConsumption()) {
                totalConsumptionEnergyRecord = totalConsumptionEnergyRecord.add(energyPrediction);
            } else if (spec.getPowerType().isProduction()) {
                totalProductionEnergyRecord = totalProductionEnergyRecord.add(energyPrediction);
            } else {
                log.warn("Ignoring unknown powertype when computing distribution costs");
            }
        }
    }

    // Predicted distribution costs
    log.debug("Ignoring balancing orders and curtailment when computing distribution costs");
    distributionCosts = 0;
    double distributionFee = contextManager.getDistributionFee();
    for (int i = 0; i < totalConsumptionEnergyRecord.getDimension(); ++i) {
        double totalTimeslotConsumption = Math.abs(totalConsumptionEnergyRecord.getEntry(i));
        double totalTimeslotProduction = Math.abs(totalProductionEnergyRecord.getEntry(i));
        distributionCosts += Math.max(totalTimeslotConsumption, totalTimeslotProduction) * distributionFee;
    }

    // Predicted wholesale costs (in one of the following two methods:)
    if (configuratorFactoryService.isUseCostCurves()) {
        // TODO: might not work for non-fixed rate competitor tariffs - 
        // better to send a mapping of competitor tariffs inside
        // predictedCustomerSubscriptions, compute shifted predictions for them,
        // and use these predictions here.

        // compute energy of customers I don't have 
        RealVector predictedCompetitorsEnergyRecord = new ArrayRealVector(predictionRecordLength);
        HashMap<CustomerInfo, HashMap<TariffSpecification, Double>> map = BrokerUtils
                .revertKeyMapping(predictedCustomerSubscriptions);
        for (CustomerInfo cust : map.keySet()) {
            double subsToOthers = cust.getPopulation() - BrokerUtils.sumMapValues(map.get(cust));
            RealVector customerNonShiftedEnergy = customer2NonShiftedEnergy.get(cust).mapMultiply(subsToOthers);
            predictedCompetitorsEnergyRecord = predictedCompetitorsEnergyRecord.add(customerNonShiftedEnergy);
        }

        for (int i = 0; i < predictedEnergyRecord.getDimension(); ++i) {
            int futureTimeslot = currentTimeslot + i;
            double neededKwh = predictedEnergyRecord.getEntry(i);
            double competitorKwh = predictedCompetitorsEnergyRecord.getEntry(i);
            double unitCost = costCurvesPredictor.predictUnitCostKwh(currentTimeslot, futureTimeslot, neededKwh,
                    competitorKwh);
            // NOTE: unitCost is signed (typically negative)
            wholesaleCosts += (unitCost + costCurvesPredictor.getFudgeFactorKwh(currentTimeslot)) * neededKwh;
            log.debug("cost-curve prediction: current " + currentTimeslot + " futureTimeslot " + futureTimeslot
                    + " neededKwh " + neededKwh + " neededKwh + competitorKwh " + (neededKwh + competitorKwh)
                    + " unitCost " + unitCost);
        }
    } else {
        // compute wholesale costs using this consumption and market manager marketprices prediction
        ArrayRealVector estimatedMarketPrices = marketPredictionManager.getPricePerKwhPredictionForAbout7Days();
        // sanity check
        if (predictedEnergyRecord.getDimension() != estimatedMarketPrices.getDimension()) {
            log.error("Cannot compute utility - prediction periods of market and energy differ);");
            return 0;
        }
        wholesaleCosts = -predictedEnergyRecord.dotProduct(estimatedMarketPrices);
    }

    log.info("estTariffIncome " + estTariffIncome);
    log.info("estTariffCosts " + estTariffCosts);
    log.info("wholesaleCosts " + wholesaleCosts);
    log.info("balancingCosts " + balancingCosts);
    log.info("distributionCosts " + distributionCosts);
    //log.info("old distributionCosts " +  Math.max(totalProduction, totalConsumption) * contextManager.getDistributionFee());
    log.info("withdrawCosts " + withdrawCosts);

    return estTariffIncome + estTariffCosts + wholesaleCosts + balancingCosts + distributionCosts
            + withdrawCosts;
}

From source file:org.apache.predictionio.examples.java.recommendations.tutorial4.FeatureBasedAlgorithm.java

public Float predict(FeatureBasedModel model, Query query) {
    final int uid = query.uid;
    final int iid = query.iid;

    if (!model.userFeatures.containsKey(uid)) {
        return Float.NaN;
    }//w w  w .  ja v  a  2s.  com

    if (!model.itemFeatures.containsKey(iid)) {
        return Float.NaN;
    }

    final RealVector userFeature = model.userFeatures.get(uid);
    final RealVector itemFeature = model.itemFeatures.get(iid);

    return new Float(userFeature.dotProduct(itemFeature));
}

From source file:org.apache.solr.client.solrj.io.eval.DotProductEvaluator.java

@Override
public Object doWork(Object first, Object second) throws IOException {
    if (null == first) {
        throw new IOException(String.format(Locale.ROOT,
                "Invalid expression %s - null found for the first value", toExpression(constructingFactory)));
    }/*ww  w  .  ja v  a  2 s . c om*/
    if (null == second) {
        throw new IOException(String.format(Locale.ROOT,
                "Invalid expression %s - null found for the second value", toExpression(constructingFactory)));
    }
    if (!(first instanceof List<?>)) {
        throw new IOException(String.format(Locale.ROOT,
                "Invalid expression %s - found type %s for the first value, expecting a list of numbers",
                toExpression(constructingFactory), first.getClass().getSimpleName()));
    }
    if (!(second instanceof List<?>)) {
        throw new IOException(String.format(Locale.ROOT,
                "Invalid expression %s - found type %s for the second value, expecting a list of numbers",
                toExpression(constructingFactory), first.getClass().getSimpleName()));
    }

    RealVector v = new ArrayRealVector(
            ((List) first).stream().mapToDouble(value -> ((BigDecimal) value).doubleValue()).toArray());
    RealVector v2 = new ArrayRealVector(
            ((List) second).stream().mapToDouble(value -> ((BigDecimal) value).doubleValue()).toArray());

    return v.dotProduct(v2);

}

From source file:org.grouplens.samantha.modeler.reinforce.LinearUCB.java

public double predict(LearningInstance instance) {
    RealMatrix A = variableSpace.getMatrixVarByName(LinearUCBKey.A.get());
    RealVector B = variableSpace.getScalarVarByName(LinearUCBKey.B.get());
    RealMatrix invA = new LUDecomposition(A).getSolver().getInverse();
    RealVector theta = invA.operate(B);//w w  w. j a  v a2  s .  co m
    RealVector x = extractDenseVector(theta.getDimension(), instance);
    double bound = Math.sqrt(x.dotProduct(invA.operate(x)));
    double mean = x.dotProduct(theta);
    double pred = mean + alpha * bound;
    if (Double.isNaN(pred)) {
        logger.error("Prediction is NaN, model parameter A probably goes wrong.");
        pred = 0.0;
    }
    return pred;
}

From source file:org.grouplens.samantha.modeler.svdfeature.SVDFeature.java

private double predict(SVDFeatureInstance ins, StochasticOracle outOrc, RealVector outUfactSum,
        RealVector outIfactSum) {/*from  ww w  .  ja  va  2s  .  com*/
    double pred = 0.0;
    for (int i = 0; i < ins.gfeas.size(); i++) {
        int ind = ins.gfeas.get(i).getIndex();
        double val = ins.gfeas.get(i).getValue();
        if (outOrc != null) {
            outOrc.addScalarOracle(SVDFeatureKey.BIASES.get(), ind, val);
        }
        pred += getScalarVarByNameIndex(SVDFeatureKey.BIASES.get(), ind) * val;
    }

    outUfactSum.set(0.0);
    for (int i = 0; i < ins.ufeas.size(); i++) {
        int index = ins.ufeas.get(i).getIndex();
        outUfactSum.combineToSelf(1.0, ins.ufeas.get(i).getValue(),
                getVectorVarByNameIndex(SVDFeatureKey.FACTORS.get(), index));
    }

    outIfactSum.set(0.0);
    for (int i = 0; i < ins.ifeas.size(); i++) {
        int index = ins.ifeas.get(i).getIndex();
        outIfactSum.combineToSelf(1.0, ins.ifeas.get(i).getValue(),
                getVectorVarByNameIndex(SVDFeatureKey.FACTORS.get(), index));
    }

    pred += outUfactSum.dotProduct(outIfactSum);
    return pred;
}

From source file:org.knime.al.util.noveltydetection.knfst.MultiClassKNFST.java

private RealMatrix squared_euclidean_distances(final RealMatrix x, final RealMatrix y) {
    final RealMatrix distmat = MatrixUtils.createRealMatrix(x.getRowDimension(), y.getRowDimension());

    for (int i = 0; i < x.getRowDimension(); i++) {
        for (int j = 0; j < y.getRowDimension(); j++) {
            final RealVector buff = x.getRowVector(i).subtract(y.getRowVector(j));
            distmat.setEntry(i, j, buff.dotProduct(buff));
        }//from   w  ww  . java  2s . c om
    }

    return distmat;
}

From source file:org.lenskit.mf.BiasedMFItemScorer.java

/**
 * Compute the score a user and item using their vectors.
 *
 * @param bias The combined user-item bias term (the baseline score, usually).
 * @param user The user-factor vector./* w  ww .ja  v  a 2 s . c o  m*/
 * @param item The item-factor vector.
 * @return The kernel function value (combined score).
 * @throws IllegalArgumentException if the user and item vectors have different lengths.
 */
protected double computeScore(double bias, @Nonnull RealVector user, @Nonnull RealVector item) {
    return bias + user.dotProduct(item);
}

From source file:org.lenskit.mf.BPR.BPRMFModelProvider.java

@Override
public MFModel get() {
    // This will accumulate BPR-Opt (minus the regularization) and will be negated to make an error.
    // -30 is arbitrary, but would indicate a _really_ consistently bad prediction (~ p=1*10^-13),
    // and is therefore a reasonable "max_error".
    RollingWindowMeanAccumulator optAccum = new RollingWindowMeanAccumulator(10000, -30);

    // set up user index and matrix
    int userCount = dao.getEntityIds(CommonTypes.USER).size();
    for (long uid : dao.getEntityIds(CommonTypes.USER)) {
        userIndex.internId(uid);/*  w  w  w  .j  av  a  2s.  co m*/
    }
    RealMatrix userFeatures = MatrixUtils.createRealMatrix(userCount, featureCount);
    initFeatures(userFeatures);

    // set up item index and matrix
    int itemCount = dao.getEntityIds(CommonTypes.ITEM).size();
    for (long iid : dao.getEntityIds(CommonTypes.ITEM)) {
        itemIndex.internId(iid);
    }
    RealMatrix itemFeatures = MatrixUtils.createRealMatrix(itemCount, featureCount);
    initFeatures(itemFeatures);

    logger.debug("Learning rate is {}", learningRate);
    logger.debug("Regularization term is {}", regularization);

    logger.info("Building MPR-MF with {} features for {} users and {} items", featureCount, userCount,
            itemCount);

    TrainingLoopController controller = stoppingCondition.newLoop();

    //REVIEW: because of the nature of training samples (and the point that the BPR paper makes that training
    // by-item or by-user are not optimal) one "iteration" here will be one training update. This leads to _really_
    // big iteration counts, which can actually overflow ints!. one suggestion would be to allow the iteration count
    // controller to accept longs, but I don't know if this will introduce backwards compatibility issues (I imagine
    // this depends on the robustness of our type conversion in the configuration.
    while (controller.keepTraining(-optAccum.getMean())) {
        for (TrainingItemPair pair : pairGenerator.nextBatch()) {
            // Note: bad code style variable names are generally to match BPR paper and enable easier implementation
            long iid = pair.g;
            int i = itemIndex.internId(iid);
            long jid = pair.l;
            int j = itemIndex.internId(jid);
            long uid = pair.u;
            int u = userIndex.internId(uid);

            RealVector w_u = userFeatures.getRowVector(u);
            RealVector h_i = itemFeatures.getRowVector(i);
            RealVector h_j = itemFeatures.getRowVector(j);

            double xui = w_u.dotProduct(h_i);
            double xuj = w_u.dotProduct(h_j);
            double xuij = xui - xuj;

            double bprTerm = 1 / (1 + exp(xuij));

            // w_u update
            RealVector h_i_j = h_i.subtract(h_j);
            RealVector w_u_update = w_u.mapMultiply(-regularization);
            w_u_update.combineToSelf(1, bprTerm, h_i_j);

            // h_i update
            RealVector h_i_update = h_i.mapMultiply(-regularization);
            h_i_update.combineToSelf(1, bprTerm, w_u);

            // h_j update
            RealVector h_j_update = h_j.mapMultiply(-regularization);
            h_j_update.combineToSelf(1, -bprTerm, w_u);

            // perform updates
            w_u.combineToSelf(1, learningRate, w_u_update);
            h_i.combineToSelf(1, learningRate, h_i_update);
            h_j.combineToSelf(1, learningRate, h_j_update);

            // update the optimization function accumulator (note we are not including the regularization term)
            optAccum.add(Math.log(1 / (1 + Math.exp(-xuij))));
        }
    }

    return new MFModel(userFeatures, itemFeatures, userIndex, itemIndex);
}