List of usage examples for org.apache.commons.math3.linear ArrayRealVector ArrayRealVector
public ArrayRealVector(ArrayRealVector v) throws NullArgumentException
From source file:lambertmrev.conversions.java
public static RealVector kMat2Vec(int[][] in) { int dim = in.length; RealVector vec = new ArrayRealVector(dim); for (int ii = 0; ii < dim; ii++) { for (int jj = 0; jj < dim; jj++) { if (in[ii][jj] == 1) { double tmp = (double) jj; // System.out.println("there is a one on row: " + ii + " and col: " + tmp); vec.setEntry(ii, tmp);//from www . j a v a 2 s. c o m // System.out.println("vector at: " + ii + " is " + vec.getEntry(ii)); } } } return vec; }
From source file:com.joptimizer.optimizers.LPPrimalDualMethodTest.java
/** * Simple problem in the form//from ww w .j a v a2 s . c om * min(c.x) s.t. * A.x = b * x >=0 */ public void testSimple2() throws Exception { log.debug("testSimple2"); double[] c = new double[] { -1, -2 }; double[][] A = new double[][] { { 1, 1 } }; double[] b = new double[] { 1 }; LPOptimizationRequest or = new LPOptimizationRequest(); or.setC(c); or.setA(A); or.setB(b); or.setLb(new double[] { 0, 0 }); //or.setUb(new double[]{Double.NaN, Double.NaN}); //or.setInitialPoint(new double[] { 0.9, 0.1 }); //or.setNotFeasibleInitialPoint(new double[] { -0.5, 1.5 }); or.setCheckKKTSolutionAccuracy(true); or.setToleranceFeas(1.E-7); or.setTolerance(1.E-7); or.setDumpProblem(true); //or.setPresolvingDisabled(true); //optimization LPPrimalDualMethod opt = new LPPrimalDualMethod(); opt.setLPOptimizationRequest(or); int returnCode = opt.optimize(); if (returnCode == OptimizationResponse.FAILED) { fail(); } LPOptimizationResponse response = opt.getLPOptimizationResponse(); double[] sol = response.getSolution(); RealVector cVector = new ArrayRealVector(c); RealVector solVector = new ArrayRealVector(sol); double value = cVector.dotProduct(solVector); log.debug("sol : " + ArrayUtils.toString(sol)); log.debug("value : " + value); assertEquals(2, sol.length); assertEquals(0, sol[0], or.getTolerance()); assertEquals(1, sol[1], or.getTolerance()); assertEquals(-2, value, or.getTolerance()); }
From source file:edu.stanford.cfuller.imageanalysistools.filter.LocalMaximumSeparabilityThresholdingFilter.java
/** * Applies the filter to an Image, optionally turning on adaptive determination of the increment size used to find the threshold, and specifying a size for the threshold determination increment. * Turning on adaptive determination of the increment will generally make the threshold slightly less optimal, but can sometimes speed up the filtering, especially * for images with a large dynamic range. * <p>//from w w w. j a v a 2 s . c o m * The increment size specified (in greylevels) will be used to determine the threshold only if adaptive determination is off; otherwise this parameter will be ignored. * @param im The Image to be thresholded; this will be overwritten by the thresholded Image. * @param adaptiveincrement true to turn on adaptive determination of the threshold increment; false to turn it off and use the default value * @param increment the increment size (in greylevels) to use for determining the threshold; must be positive. */ public void apply_ext(WritableImage im, boolean adaptiveincrement, int increment) { Histogram h = new Histogram(im); int thresholdValue = 0; final int numSteps = 1000; double best_eta = 0; int best_index = Integer.MAX_VALUE; int nonzerocounts = h.getTotalCounts() - h.getCounts(0); double meannonzero = h.getMeanNonzero(); ArrayRealVector omega_v = new ArrayRealVector(h.getMaxValue()); ArrayRealVector mu_v = new ArrayRealVector(h.getMaxValue()); ArrayRealVector eta_v = new ArrayRealVector(h.getMaxValue()); int c = 0; if (adaptiveincrement) { increment = (int) ((h.getMaxValue() - h.getMinValue() + 1) * 1.0 / numSteps); if (increment < 1) increment = 1; } for (int k = h.getMinValue(); k < h.getMaxValue() + 1; k += increment) { if (k == 0) continue; omega_v.setEntry(c, h.getCumulativeCounts(k) * 1.0 / nonzerocounts); if (c == 0) { mu_v.setEntry(c, k * omega_v.getEntry(c)); } else { mu_v.setEntry(c, mu_v.getEntry(c - 1) + k * h.getCounts(k) * 1.0 / nonzerocounts); for (int i = k - increment + 1; i < k; i++) { mu_v.setEntry(c, mu_v.getEntry(c) + h.getCounts(i) * i * 1.0 / nonzerocounts); } } double omega = omega_v.getEntry(c); double mu = mu_v.getEntry(c); if (omega > 1e-8 && 1 - omega > 1e-8) { double eta = omega * (1 - omega) * Math.pow((meannonzero - mu) / (1 - omega) - mu / omega, 2); eta_v.setEntry(c, eta); if (eta >= best_eta) { best_eta = eta; best_index = k; } } else { eta_v.setEntry(c, 0); } c++; } int orig_method_best_index = best_index; c = 1; ArrayList<Integer> maxima = new ArrayList<Integer>(); Map<Integer, Integer> k_by_c = new HashMap<Integer, Integer>(); Map<Integer, Integer> c_by_k = new HashMap<Integer, Integer>(); for (int k = h.getMinValue() + 1; k < h.getMaxValue(); k += increment) { //detect if this is a local maximum k_by_c.put(c, k); c_by_k.put(k, c); int lastEntryNotEqual = c - 1; int nextEntryNotEqual = c + 1; while (lastEntryNotEqual > 0 && eta_v.getEntry(lastEntryNotEqual) == eta_v.getEntry(c)) { --lastEntryNotEqual; } while (nextEntryNotEqual < (eta_v.getDimension() - 1) && eta_v.getEntry(nextEntryNotEqual) == eta_v.getEntry(c)) { ++nextEntryNotEqual; } if (eta_v.getEntry(c) > eta_v.getEntry(lastEntryNotEqual) && eta_v.getEntry(c) > eta_v.getEntry(nextEntryNotEqual)) { maxima.add(k); } c++; } //now that we have maxima, try doing a gaussian fit to find the positions. If there's only one, we need to guess at a second RealVector parameters = new ArrayRealVector(6, 0.0); int position0 = 0; int position1 = h.getMaxValue(); if (maxima.size() > 1) { double best_max = 0; double second_best_max = 0; int best_pos = 0; int second_best_pos = 0; for (Integer k : maxima) { int ck = c_by_k.get(k); double eta_k = eta_v.getEntry(ck); if (eta_k > best_max) { second_best_max = best_max; second_best_pos = best_pos; best_max = eta_k; best_pos = ck; } else if (eta_k > second_best_max) { second_best_max = eta_k; second_best_pos = ck; } } position0 = best_pos; position1 = second_best_pos; } else { position0 = c_by_k.get(maxima.get(0)); position1 = (eta_v.getDimension() - position0) / 2 + position0; } //make sure that position 1 is larger than position 0 if (position1 < position0) { int temp = position0; position0 = position1; position1 = temp; } double s = (position1 - position0) / 4.0; parameters.setEntry(0, eta_v.getEntry(position0));//*Math.sqrt(2*Math.PI)*s); parameters.setEntry(1, position0); parameters.setEntry(2, s); parameters.setEntry(3, eta_v.getEntry(position1));//*Math.sqrt(2*Math.PI)*s); parameters.setEntry(4, position1); parameters.setEntry(5, s); DoubleGaussianObjectiveFunction dgof = new DoubleGaussianObjectiveFunction(); dgof.setEta(eta_v); NelderMeadMinimizer nmm = new NelderMeadMinimizer(); RealVector result = nmm.optimize(dgof, parameters); best_index = (int) result.getEntry(4); if (k_by_c.containsKey(best_index)) { best_index = k_by_c.get(best_index); } else { //fall back to the normal global maximum if the fitting seems to have found an invalid value. best_index = orig_method_best_index; } thresholdValue = best_index; if (thresholdValue == Integer.MAX_VALUE) { thresholdValue = 0; } for (ImageCoordinate coord : im) { if (im.getValue(coord) < thresholdValue) im.setValue(coord, 0); } }
From source file:com.gordoni.opal.UtilityJoinSlope.java
public UtilityJoinSlope(Config config, String join_function, Utility utility1, Utility utility2, double c1, double c2) { this.config = config; this.utility1 = utility1; this.utility2 = utility2; this.c1 = c1; this.c2 = c2; if (c1 < c2) { if (join_function.equals("slope-cubic-monotone") || join_function.equals("slope-cubic-smooth")) { RealMatrix a = new Array2DRowRealMatrix( new double[][] { { c1 * c1 * c1, c1 * c1, c1, 1 }, { c2 * c2 * c2, c2 * c2, c2, 1 }, { 3 * c1 * c1, 2 * c1, 1, 0 }, { 3 * c2 * c2, 2 * c2, 1, 0 } }); double s1 = utility1.slope(c1); double s2 = utility2.slope(c2); double s2_1 = utility1.slope2(c1); double s2_2 = utility2.slope2(c2); double secant = (s2 - s1) / (c2 - c1); assert (secant < 0); double alpha = s2_1 / secant; double beta = s2_2 / secant; if (!monotone(alpha, beta)) { assert (!join_function.equals("slope-cubic-smooth")); // Guarantee monotonicity at the cost of a discontinuity in slope2 space. s2_1 = Math.max(s2_1, 3 * secant); // Can do better (see paper), but this is good enough. alpha = s2_1 / secant;//from w w w. ja va2 s. c o m if (!monotone(alpha, beta)) { s2_2 = 3 * secant; beta = s2_2 / secant; } } RealVector b = new ArrayRealVector(new double[] { s1, s2, s2_1, s2_2 }); DecompositionSolver solver = new LUDecomposition(a).getSolver(); RealVector solution = solver.solve(b); this.w = solution.getEntry(0); this.x = solution.getEntry(1); this.y = solution.getEntry(2); this.z = solution.getEntry(3); } else if (join_function.equals("slope-linear")) { this.w = 0; this.x = 0; this.y = (utility2.slope(c2) - utility1.slope(c1)) / (c2 - c1); this.z = utility1.slope(c1) - this.y * c1; } else assert (false); } else { this.w = 0; this.x = 0; this.y = 0; this.z = 0; } this.zero1 = utility(c1) - utility1.utility(c1); this.zero2 = utility2.utility(c2) - utility(c2); this.u1 = utility(c1); this.u2 = utility(c2); set_constants(); }
From source file:com.datumbox.common.dataobjects.MatrixDataset.java
public static RealVector parseRecord(Record r, Map<Object, Integer> featureIdsReference) { if (featureIdsReference.isEmpty()) { throw new RuntimeException("The featureIdsReference map should not be empty."); }/*from w w w. j a va 2s . co m*/ int d = featureIdsReference.size(); RealVector v = new ArrayRealVector(d); boolean addConstantColumn = featureIdsReference.containsKey(Dataset.constantColumnName); if (addConstantColumn) { v.setEntry(0, 1.0); //add the constant column } for (Map.Entry<Object, Object> entry : r.getX().entrySet()) { Object feature = entry.getKey(); Double value = Dataset.toDouble(entry.getValue()); if (value != null) { Integer featureId = featureIdsReference.get(feature); if (featureId != null) {//if the feature exists in our database v.setEntry(featureId, value); } } else { //else the X matrix maintains the 0.0 default value } } return v; }
From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.core.operator.real.AdaptiveMetropolis.java
@Override public Solution[] evolve(Solution[] parents) { int k = parents.length; int n = parents[0].getNumberOfVariables(); RealMatrix x = new Array2DRowRealMatrix(k, n); for (int i = 0; i < k; i++) { x.setRow(i, EncodingUtils.getReal(parents[i])); }/*from w ww .ja va2 s . c o m*/ try { //perform Cholesky factorization and get the upper triangular matrix double jumpRate = Math.pow(jumpRateCoefficient / Math.sqrt(n), 2.0); RealMatrix chol = new CholeskyDecomposition( new Covariance(x.scalarMultiply(jumpRate)).getCovarianceMatrix()).getLT(); //produce the offspring Solution[] offspring = new Solution[numberOfOffspring]; for (int i = 0; i < numberOfOffspring; i++) { Solution child = parents[PRNG.nextInt(parents.length)].copy(); //apply adaptive metropolis step to solution RealVector muC = new ArrayRealVector(EncodingUtils.getReal(child)); RealVector ru = new ArrayRealVector(n); for (int j = 0; j < n; j++) { ru.setEntry(j, PRNG.nextGaussian()); } double[] variables = muC.add(chol.preMultiply(ru)).toArray(); //assign variables back to solution for (int j = 0; j < n; j++) { RealVariable variable = (RealVariable) child.getVariable(j); double value = variables[j]; if (value < variable.getLowerBound()) { value = variable.getLowerBound(); } else if (value > variable.getUpperBound()) { value = variable.getUpperBound(); } variable.setValue(value); } offspring[i] = child; } return offspring; } catch (Exception e) { return new Solution[0]; } }
From source file:lambertmrev.Lambert.java
/** Constructs and solves a Lambert problem. * * \param[in] R1 first cartesian position * \param[in] R2 second cartesian position * \param[in] tof time of flight/* ww w. ja v a 2s. c o m*/ * \param[in] mu gravity parameter * \param[in] cw when 1 a retrograde orbit is assumed * \param[in] multi_revs maximum number of multirevolutions to compute */ public void lambert_problem(Vector3D r1, Vector3D r2, double tof, double mu, Boolean cw, int multi_revs) { // sanity checks if (tof <= 0) { System.out.println("ToF is negative! \n"); } if (mu <= 0) { System.out.println("mu is below zero"); } // 1 - getting lambda and T double m_c = FastMath.sqrt((r2.getX() - r1.getX()) * (r2.getX() - r1.getX()) + (r2.getY() - r1.getY()) * (r2.getY() - r1.getY()) + (r2.getZ() - r1.getZ()) * (r2.getZ() - r1.getZ())); double R1 = r1.getNorm(); double R2 = r2.getNorm(); double m_s = (m_c + R1 + R2) / 2.0; Vector3D ir1 = r1.normalize(); Vector3D ir2 = r2.normalize(); Vector3D ih = Vector3D.crossProduct(ir1, ir2); ih = ih.normalize(); if (ih.getZ() == 0) { System.out.println("angular momentum vector has no z component \n"); } double lambda2 = 1.0 - m_c / m_s; double m_lambda = FastMath.sqrt(lambda2); Vector3D it1 = new Vector3D(0.0, 0.0, 0.0); Vector3D it2 = new Vector3D(0.0, 0.0, 0.0); if (ih.getZ() < 0.0) { // Transfer angle is larger than 180 degrees as seen from abive the z axis m_lambda = -m_lambda; it1 = Vector3D.crossProduct(ir1, ih); it2 = Vector3D.crossProduct(ir2, ih); } else { it1 = Vector3D.crossProduct(ih, ir1); it2 = Vector3D.crossProduct(ih, ir2); } it1.normalize(); it2.normalize(); if (cw) { // Retrograde motion m_lambda = -m_lambda; it1.negate(); it2.negate(); } double lambda3 = m_lambda * lambda2; double T = FastMath.sqrt(2.0 * mu / m_s / m_s / m_s) * tof; // 2 - We now hava lambda, T and we will find all x // 2.1 - let us first detect the maximum number of revolutions for which there exists a solution int m_Nmax = FastMath.toIntExact(FastMath.round(T / FastMath.PI)); double T00 = FastMath.acos(m_lambda) + m_lambda * FastMath.sqrt(1.0 - lambda2); double T0 = (T00 + m_Nmax * FastMath.PI); double T1 = 2.0 / 3.0 * (1.0 - lambda3); double DT = 0.0; double DDT = 0.0; double DDDT = 0.0; if (m_Nmax > 0) { if (T < T0) { // We use Halley iterations to find xM and TM int it = 0; double err = 1.0; double T_min = T0; double x_old = 0.0, x_new = 0.0; while (true) { ArrayRealVector deriv = dTdx(x_old, T_min, m_lambda); DT = deriv.getEntry(0); DDT = deriv.getEntry(1); DDDT = deriv.getEntry(2); if (DT != 0.0) { x_new = x_old - DT * DDT / (DDT * DDT - DT * DDDT / 2.0); } err = FastMath.abs(x_old - x_new); if ((err < 1e-13) || (it > 12)) { break; } tof = x2tof(x_new, m_Nmax, m_lambda); x_old = x_new; it++; } if (T_min > T) { m_Nmax -= 1; } } } // We exit this if clause with Mmax being the maximum number of revolutions // for which there exists a solution. We crop it to multi_revs m_Nmax = FastMath.min(multi_revs, m_Nmax); // 2.2 We now allocate the memory for the output variables m_v1 = MatrixUtils.createRealMatrix(m_Nmax * 2 + 1, 3); RealMatrix m_v2 = MatrixUtils.createRealMatrix(m_Nmax * 2 + 1, 3); RealMatrix m_iters = MatrixUtils.createRealMatrix(m_Nmax * 2 + 1, 3); //RealMatrix m_x = MatrixUtils.createRealMatrix(m_Nmax*2+1, 3); ArrayRealVector m_x = new ArrayRealVector(m_Nmax * 2 + 1); // 3 - We may now find all solution in x,y // 3.1 0 rev solution // 3.1.1 initial guess if (T >= T00) { m_x.setEntry(0, -(T - T00) / (T - T00 + 4)); } else if (T <= T1) { m_x.setEntry(0, T1 * (T1 - T) / (2.0 / 5.0 * (1 - lambda2 * lambda3) * T) + 1); } else { m_x.setEntry(0, FastMath.pow((T / T00), 0.69314718055994529 / FastMath.log(T1 / T00)) - 1.0); } // 3.1.2 Householder iterations //m_iters.setEntry(0, 0, housOutTmp.getEntry(0)); m_x.setEntry(0, householder(T, m_x.getEntry(0), 0, 1e-5, 15, m_lambda)); // 3.2 multi rev solutions double tmp; double x0; for (int i = 1; i < m_Nmax + 1; i++) { // 3.2.1 left householder iterations tmp = FastMath.pow((i * FastMath.PI + FastMath.PI) / (8.0 * T), 2.0 / 3.0); m_x.setEntry(2 * i - 1, (tmp - 1) / (tmp + 1)); x0 = householder(T, m_x.getEntry(2 * i - 1), i, 1e-8, 15, m_lambda); m_x.setEntry(2 * i - 1, x0); //m_iters.setEntry(2*i-1, 0, housOutTmp.getEntry(0)); //3.2.1 right Householder iterations tmp = FastMath.pow((8.0 * T) / (i * FastMath.PI), 2.0 / 3.0); m_x.setEntry(2 * i, (tmp - 1) / (tmp + 1)); x0 = householder(T, m_x.getEntry(2 * i), i, 1e-8, 15, m_lambda); m_x.setEntry(2 * i, x0); //m_iters.setEntry(2*i, 0, housOutTmp.getEntry(0)); } // 4 - For each found x value we recontruct the terminal velocities double gamma = FastMath.sqrt(mu * m_s / 2.0); double rho = (R1 - R2) / m_c; double sigma = FastMath.sqrt(1 - rho * rho); double vr1, vt1, vr2, vt2, y; ArrayRealVector ir1_vec = new ArrayRealVector(3); ArrayRealVector ir2_vec = new ArrayRealVector(3); ArrayRealVector it1_vec = new ArrayRealVector(3); ArrayRealVector it2_vec = new ArrayRealVector(3); // set Vector3D values to a mutable type ir1_vec.setEntry(0, ir1.getX()); ir1_vec.setEntry(1, ir1.getY()); ir1_vec.setEntry(2, ir1.getZ()); ir2_vec.setEntry(0, ir2.getX()); ir2_vec.setEntry(1, ir2.getY()); ir2_vec.setEntry(2, ir2.getZ()); it1_vec.setEntry(0, it1.getX()); it1_vec.setEntry(1, it1.getY()); it1_vec.setEntry(2, it1.getZ()); it2_vec.setEntry(0, it2.getX()); it2_vec.setEntry(1, it2.getY()); it2_vec.setEntry(2, it2.getZ()); for (int i = 0; i < m_x.getDimension(); i++) { y = FastMath.sqrt(1.0 - lambda2 + lambda2 * m_x.getEntry(i) * m_x.getEntry(i)); vr1 = gamma * ((m_lambda * y - m_x.getEntry(i)) - rho * (m_lambda * y + m_x.getEntry(i))) / R1; vr2 = -gamma * ((m_lambda * y - m_x.getEntry(i)) + rho * (m_lambda * y + m_x.getEntry(i))) / R2; double vt = gamma * sigma * (y + m_lambda * m_x.getEntry(i)); vt1 = vt / R1; vt2 = vt / R2; for (int j = 0; j < 3; ++j) m_v1.setEntry(i, j, vr1 * ir1_vec.getEntry(j) + vt1 * it1_vec.getEntry(j)); for (int j = 0; j < 3; ++j) m_v2.setEntry(i, j, vr2 * ir2_vec.getEntry(j) + vt2 * it2_vec.getEntry(j)); } }
From source file:com.joptimizer.functions.SOCPLogarithmicBarrier.java
/** * Create the barrier function for the Phase I. * It is an instance of this class for the constraints: * <br>||Ai.x+bi|| < ci.x+di+t, i=1,...,m * @see "S.Boyd and L.Vandenberghe, Convex Optimization, 11.6.2" *///w ww .jav a 2s .co m public BarrierFunction createPhase1BarrierFunction() { final int dimPh1 = dim + 1; List<SOCPConstraintParameters> socpConstraintParametersPh1List = new ArrayList<SOCPConstraintParameters>(); SOCPLogarithmicBarrier bfPh1 = new SOCPLogarithmicBarrier(socpConstraintParametersPh1List, dimPh1); for (int i = 0; i < socpConstraintParametersList.size(); i++) { SOCPConstraintParameters param = socpConstraintParametersList.get(i); RealMatrix A = param.getA(); RealVector b = param.getB(); RealVector c = param.getC(); double d = param.getD(); RealMatrix APh1 = MatrixUtils.createRealMatrix(A.getRowDimension(), dimPh1); APh1.setSubMatrix(A.getData(), 0, 0); RealVector bPh1 = b; RealVector cPh1 = new ArrayRealVector(c.getDimension() + 1); cPh1.setSubVector(0, c); cPh1.setEntry(c.getDimension(), 1); double dPh1 = d; SOCPConstraintParameters paramsPh1 = new SOCPConstraintParameters(APh1.getData(), bPh1.toArray(), cPh1.toArray(), dPh1); socpConstraintParametersPh1List.add(socpConstraintParametersPh1List.size(), paramsPh1); } return bfPh1; }
From source file:edu.utexas.cs.tactex.tariffoptimization.TariffOptimizierTOUFixedMargin.java
private TotalEnergyRecords sumTotalEnergy(HashMap<CustomerInfo, ArrayRealVector> customer2estimatedEnergy, HashMap<TariffSpecification, HashMap<CustomerInfo, Integer>> tariffSubscriptions, int currentTimeslot) { List<TariffSpecification> allSpecs = new ArrayList<TariffSpecification>(); allSpecs.addAll(tariffSubscriptions.keySet()); HashMap<CustomerInfo, HashMap<TariffSpecification, ShiftedEnergyData>> customer2ShiftedEnergy = estimateShiftedPredictions( customer2estimatedEnergy, allSpecs, currentTimeslot); int predictionRecordLength = customer2estimatedEnergy.values().iterator().next().getDimension(); ArrayRealVector predictedMyCustomersEnergy = new ArrayRealVector(predictionRecordLength); // we currently predict cost by total amount of energy ///*www.j ava2 s.c o m*/ for (Entry<TariffSpecification, HashMap<CustomerInfo, Integer>> entry : tariffSubscriptions.entrySet()) { TariffSpecification spec = entry.getKey(); for (Entry<CustomerInfo, Integer> e : entry.getValue().entrySet()) { CustomerInfo customer = e.getKey(); int subs = e.getValue(); RealVector customerEnergy = customer2ShiftedEnergy.get(customer).get(spec).getShiftedEnergy() .mapMultiply(subs); predictedMyCustomersEnergy = predictedMyCustomersEnergy.add(customerEnergy); } } // competitor energy prediction ArrayRealVector predictedCompetitorsEnergyRecord = new ArrayRealVector(predictionRecordLength); HashMap<CustomerInfo, HashMap<TariffSpecification, Integer>> predictedCustomerSubscriptions = BrokerUtils .revertKeyMapping(tariffSubscriptions); for (CustomerInfo cust : predictedCustomerSubscriptions.keySet()) { double subsToOthers = cust.getPopulation() - BrokerUtils.sumMapValues(predictedCustomerSubscriptions.get(cust)); RealVector customerNonShiftedEnergy = customer2estimatedEnergy.get(cust).mapMultiply(subsToOthers); predictedCompetitorsEnergyRecord = predictedCompetitorsEnergyRecord.add(customerNonShiftedEnergy); } log.debug("predictedMyCustomersEnergy =" + predictedMyCustomersEnergy.toString()); log.debug("predictedCompetitorsEnergyRecord =" + predictedCompetitorsEnergyRecord.toString()); return new TotalEnergyRecords(predictedMyCustomersEnergy, predictedCompetitorsEnergyRecord); }
From source file:edu.duke.cs.osprey.tupexp.IterativeCGTupleFitter.java
RealVector calcRHS() {//Calculate right-hand side vector of normal equations double atb[] = new double[numTup]; //apply A^T to true vals for (int s = 0; s < numSamp; s++) { double curTarget = getCurTarget(s); if (!Double.isNaN(curTarget)) {//restraint active for sample ArrayList<Integer> sampTup = tupIndMat.calcSampleTuples(samples.get(s)); for (int t : sampTup) atb[t] += curTarget;//from w w w . j a v a2 s.c o m } } //damping. Slightly penalizes changes from curCoeffs if (curCoeffs != null) { for (int t = 0; t < numTup; t++) atb[t] += damperLambda * curCoeffs.getEntry(t); } Atb = new ArrayRealVector(atb); return Atb; }