Example usage for org.apache.commons.math3.fitting WeightedObservedPoint WeightedObservedPoint

List of usage examples for org.apache.commons.math3.fitting WeightedObservedPoint WeightedObservedPoint

Introduction

In this page you can find the example usage for org.apache.commons.math3.fitting WeightedObservedPoint WeightedObservedPoint.

Prototype

public WeightedObservedPoint(final double weight, final double x, final double y) 

Source Link

Document

Simple constructor.

Usage

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.SigmoidFitterImpl.java

@Override
public void fitTopConstrain(List<DoseResponsePair> dataToFit, SigmoidFittingResultsHolder resultsHolder,
        Double topConstrain, int standardHillslope) {
    final Double top = topConstrain;

    //initial parameter values for fitting: lowest y, middle x and standard hillslope
    double[] yValues = AnalysisUtils.generateYValues(dataToFit);
    double[] xValues = AnalysisUtils.generateXValues(dataToFit);
    double initialBottom = yValues[0];
    double initialLogEC50;
    double maxX = xValues[0];
    double minX = xValues[0];
    for (int i = 0; i < yValues.length; i++) {
        if (yValues[i] < initialBottom) {
            initialBottom = yValues[i];//from w ww .j  a  va 2 s . co m
        }
        if (xValues[i] < minX) {
            minX = xValues[i];
        } else if (xValues[i] > maxX) {
            maxX = xValues[i];
        }
    }
    initialLogEC50 = (maxX + minX) / 2;

    final double[] initialGuesses = new double[] { initialBottom, initialLogEC50, standardHillslope };

    //add all datapoint to collection with standard weight 1.0
    Collection<WeightedObservedPoint> observations = new ArrayList<>();
    for (int i = 0; i < xValues.length; i++) {
        observations.add(new WeightedObservedPoint(1.0, xValues[i], yValues[i]));
    }

    final ParametricUnivariateFunction function = new ParametricUnivariateFunction() {
        /**
         * @param conc The concentration of the drug, log transformed
         * @param paramaters The fitted parameters (bottom, logEC50 and
         * hillslope)
         * @return The velocity
         */
        @Override
        public double value(double conc, double[] parameters) {
            double bottom = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return (bottom + (top - bottom) / (1 + Math.pow(10, (logEC50 - conc) * hillslope)));
        }

        @Override
        public double[] gradient(double conc, double[] parameters) {
            double bottom = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return new double[] { 1 - (1 / ((Math.pow(10, (logEC50 - conc) * hillslope)) + 1)),
                    (hillslope * Math.log(10) * Math.pow(10, hillslope * (logEC50 + conc)) * (bottom - top))
                            / (Math.pow(Math.pow(10, hillslope * conc) + Math.pow(10, hillslope * logEC50), 2)),
                    (Math.log(10) * (logEC50 - conc) * (bottom - top)
                            * Math.pow(10, (logEC50 + conc) * hillslope))
                            / Math.pow((Math.pow(10, logEC50 * hillslope) + Math.pow(10, hillslope * conc)), 2)

            };

        }

    };

    //set up the fitter with the observations and function created above
    DoseResponseAbstractCurveFitter fitter = new DoseResponseAbstractCurveFitter() {

        @Override
        protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
            // Prepare least-squares problem.
            final int len = observations.size();
            final double[] target = new double[len];
            final double[] weights = new double[len];

            int i = 0;
            for (final WeightedObservedPoint obs : observations) {
                target[i] = obs.getY();
                weights[i] = obs.getWeight();
                ++i;
            }

            final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
                    function, observations);

            // build a new least squares problem set up to fit a secular and harmonic curve to the observed points
            return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
                    .start(initialGuesses).target(target).weight(new DiagonalMatrix(weights))
                    .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
        }
    };

    OptimumImpl bestFit = fitter.performRegression(observations);
    double[] params = bestFit.getPoint().toArray();
    double bottom = params[0];
    double logEC50 = params[1];
    double hillslope = params[2];

    resultsHolder.setBottom(bottom);
    resultsHolder.setTop(top);
    resultsHolder.setLogEC50(logEC50);
    resultsHolder.setHillslope(hillslope);
    //top parameter was constrained
    List<String> constrainedParam = new ArrayList<>();
    constrainedParam.add("top");
    resultsHolder.setConstrainedParameters(constrainedParam);
    resultsHolder.setCovariances(bestFit.getCovariances(0).getData());
}

From source file:ffx.utilities.BlockAverager.java

/**
 * Compute the statistical uncertainty of G in each histogram bin and
 * overall. Loop over increasing values of block size. For each, calculate
 * the block means and their standard deviation. Then limit(blockStdErr,
 * blockSize to entireTraj) == trajStdErr.
 *
 * @return aggregate standard error of the total free energy change
 *///w  w  w . jav  a  2 s . c  o  m
public double[] computeBinUncertainties() {
    double[][] sems = new double[numBins][maxBlockSize + 1];
    BinDev[][] binStDevs = new BinDev[numBins][maxBlockSize + 1];

    List<WeightedObservedPoint>[] obsDev = new ArrayList[numBins];
    List<WeightedObservedPoint>[] obsErr = new ArrayList[numBins];

    for (int binIndex = 0; binIndex < numBins; binIndex++) {
        logger.info(format(" Computing stdError for bin %d...", binIndex));
        obsDev[binIndex] = new ArrayList<>();
        obsErr[binIndex] = new ArrayList<>();
        for (int blockSize = 1; blockSize <= maxBlockSize; blockSize += blockSizeStep) {
            int numBlocks = (int) Math.floor(numObs / blockSize);
            binStDevs[binIndex][blockSize] = new BinDev(binIndex, blockSize);
            sems[binIndex][blockSize] = binStDevs[binIndex][blockSize].stdev / Math.sqrt(numBlocks - 1);
            obsDev[binIndex]
                    .add(new WeightedObservedPoint(1.0, blockSize, binStDevs[binIndex][blockSize].stdev));
            obsErr[binIndex].add(new WeightedObservedPoint(1.0, blockSize, sems[binIndex][blockSize]));
            if (TEST) {
                logger.info(format("  bin,blockSize,stdev,sem: %d %d %.6g %.6g", binIndex, blockSize,
                        binStDevs[binIndex][blockSize].stdev, sems[binIndex][blockSize]));
            }
        }
    }

    // Fit a function to (blockSize v. stdError) and extrapolate to blockSize == entire trajectory.
    // This is our correlation-corrected estimate of the std error for this lambda bin.
    stdError = new double[numBins];
    for (int binIndex = 0; binIndex < numBins; binIndex++) {
        logger.info(format("\n Bin %d : fitting & extrapolating blockSize v. stdError", binIndex));
        if (fitter == FITTER.POLYNOMIAL) {
            // Fit a polynomial (shitty).
            double[] coeffsPoly = PolynomialCurveFitter.create(polyDegree).fit(obsErr[binIndex]);
            PolynomialFunction poly = new PolynomialFunction(coeffsPoly);
            logger.info(format("    Poly %d:   %12.10g     %s", polyDegree, poly.value(numObs),
                    Arrays.toString(poly.getCoefficients())));
        } else if (fitter == FITTER.POWER) {
            // Fit a power function (better).
            double[] coeffsPow = powerFit(obsErr[binIndex]);
            double powerExtrapolated = coeffsPow[0] * pow(numObs, coeffsPow[1]);
            logger.info(format("    Power:     %12.10g     %s", powerExtrapolated, Arrays.toString(coeffsPow)));
        }
        // Fit a log function (best).
        double[] logCoeffs = logFit(obsErr[binIndex]);
        double logExtrap = logCoeffs[0] + logCoeffs[1] * log(numObs);
        logger.info(format("    Log sem:   %12.10g     Residual: %12.10g     Coeffs: %6.4f, %6.4f \n",
                logExtrap, logCoeffs[0], logCoeffs[1]));

        // Also try fitting a linear function for the case of uncorrelated or extremely well-converged data.
        double[] linearCoef = linearFit(obsErr[binIndex]);
        double linearExtrap = linearCoef[0] + linearCoef[1] * numObs;
        logger.info(format("    Lin. sem:  %12.10g     Residual: %12.10g     Coeffs: %6.4f, %6.4f \n",
                linearExtrap, linearCoef[0], linearCoef[1]));

        stdError[binIndex] = logExtrap;
    }
    return stdError;
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.SigmoidFitterImpl.java

@Override
public void fitBotTopConstrain(List<DoseResponsePair> dataToFit, SigmoidFittingResultsHolder resultsHolder,
        Double bottomConstrain, Double topConstrain, int standardHillslope) {

    final Double bottom = bottomConstrain;
    final Double top = topConstrain;

    //initial parameter values for fitting: middle x and standard hillslope
    double[] xValues = AnalysisUtils.generateXValues(dataToFit);
    double[] yValues = AnalysisUtils.generateYValues(dataToFit);
    double initialLogEC50;
    double maxX = xValues[0];
    double minX = xValues[0];
    for (int i = 0; i < xValues.length; i++) {
        if (xValues[i] < minX) {
            minX = xValues[i];// w w w  .  j a  v  a2 s . c  o  m
        } else if (xValues[i] > maxX) {
            maxX = xValues[i];
        }
    }
    initialLogEC50 = (maxX + minX) / 2;

    final double[] initialGuesses = new double[] { initialLogEC50, standardHillslope };

    //add all datapoint to collection with standard weight 1.0
    Collection<WeightedObservedPoint> observations = new ArrayList<>();
    for (int i = 0; i < xValues.length; i++) {
        observations.add(new WeightedObservedPoint(1.0, xValues[i], yValues[i]));
    }

    final ParametricUnivariateFunction function = new ParametricUnivariateFunction() {
        /**
         * @param conc The concentration of the drug, log transformed
         * @param paramaters The fitted parameters (bottom, top, logEC50 and
         * hillslope)
         * @return The velocity
         */
        @Override
        public double value(double conc, double[] parameters) {
            double logEC50 = parameters[0];
            double hillslope = parameters[1];

            return (bottom + (top - bottom) / (1 + Math.pow(10, (logEC50 - conc) * hillslope)));
        }

        @Override
        public double[] gradient(double conc, double[] parameters) {
            double logEC50 = parameters[0];
            double hillslope = parameters[1];

            return new double[] {
                    (hillslope * Math.log(10) * Math.pow(10, hillslope * (logEC50 + conc)) * (bottom - top))
                            / (Math.pow(Math.pow(10, hillslope * conc) + Math.pow(10, hillslope * logEC50), 2)),
                    (Math.log(10) * (logEC50 - conc) * (bottom - top)
                            * Math.pow(10, (logEC50 + conc) * hillslope))
                            / Math.pow((Math.pow(10, logEC50 * hillslope) + Math.pow(10, hillslope * conc)), 2)

            };

        }

    };

    //set up the fitter with the observations and function created above
    DoseResponseAbstractCurveFitter fitter = new DoseResponseAbstractCurveFitter() {

        @Override
        protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
            // Prepare least-squares problem.
            final int len = observations.size();
            final double[] target = new double[len];
            final double[] weights = new double[len];

            int i = 0;
            for (final WeightedObservedPoint obs : observations) {
                target[i] = obs.getY();
                weights[i] = obs.getWeight();
                ++i;
            }

            final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
                    function, observations);

            // build a new least squares problem set up to fit a secular and harmonic curve to the observed points
            return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
                    .start(initialGuesses).target(target).weight(new DiagonalMatrix(weights))
                    .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
        }
    };

    OptimumImpl bestFit = fitter.performRegression(observations);
    //get best-fit parameters
    double[] params = bestFit.getPoint().toArray();
    double logEC50 = params[0];
    double hillslope = params[1];

    //set the values in the fitting results holder
    resultsHolder.setBottom(bottom);
    resultsHolder.setTop(top);
    resultsHolder.setLogEC50(logEC50);
    resultsHolder.setHillslope(hillslope);
    //bottom and top parameter were constrained
    List<String> constrainedParam = new ArrayList<>();
    constrainedParam.add("bottom");
    constrainedParam.add("top");
    resultsHolder.setConstrainedParameters(constrainedParam);
    resultsHolder.setCovariances(bestFit.getCovariances(0).getData());
}

From source file:io.warp10.continuum.gts.GTSHelper.java

/**
 * Compute local weighted regression at given tick
 * //from w ww . ja  v a 2  s  . c  o m
 * @param gts      : input GTS
 * @param idx      : considered as index of the first non-null neighbour at the right
 * @param tick     : tick at which lowess is achieved
 * @param q        : bandwitdth, i.e. number of nearest neighbours to consider
 * @param d        : degree of polynomial fit
 * @param weights  : optional array that store the weights
 * @param rho      : optional array that store the robustness weights
 * @param beta     : optional array that store the regression parameters
 * @param reversed : should idx be considered to be at the left instead
 */
public static double pointwise_lowess(GeoTimeSerie gts, int idx, long tick, int q, int p, double[] weights,
        double[] rho, double[] beta, boolean reversed) throws WarpScriptException {

    if (null != weights && q > weights.length || (null != rho && gts.values > rho.length)
            || (null != beta && p + 1 > beta.length)) {
        throw new WarpScriptException("Incoherent array lengths as input of pointwise_lowess");
    }
    /**
     * FIXME(JCV):
     * q = 3: 22 errors out of 100 points (at these points the value is almost equal to the value at q=2)
     * q = 4: 2 errors out of 100 points (at these points the value is almost equal to the value at q=3)
     * other q: 0 error
     * But it's not meant to be used with low value of q anyway.
     */

    //
    // Determine the 'q' closest values
    // We use two indices i and j for that, i moves forward, j moves backward from idx, until we
    // identified 'q' values (or 'n' if q > n)
    //

    int i = idx;
    int j = idx - 1;

    if (reversed) {
        i += 1;
        j += 1;
    }

    int count = 0;

    while (count < q) {
        long idist = Long.MAX_VALUE;
        long jdist = Long.MAX_VALUE;

        if (i < gts.values) {
            idist = Math.abs(tickAtIndex(gts, i) - tick);
        }

        if (j >= 0) {
            jdist = Math.abs(tickAtIndex(gts, j) - tick);
        }

        // If we exhausted the possible values
        if (Long.MAX_VALUE == idist && Long.MAX_VALUE == jdist) {
            break;
        }

        if (idist < jdist) {
            i++;
        } else {
            j--;
        }

        count++;
    }

    // The 'q' nearest values are between indices j and i (excluded)

    // Compute the maximum distance from 'tick'

    double maxdist = Math.max(j < -1 ? 0.0D : Math.abs(tickAtIndex(gts, j + 1) - tick),
            i <= 0 ? 0.0D : Math.abs(tickAtIndex(gts, i - 1) - tick));

    // Adjust maxdist if q > gtq.values

    if (q > gts.values) {
        maxdist = (maxdist * q) / gts.values;
    }

    // Compute the weights

    // Reset the weights array
    if (null == weights) {
        weights = new double[q];
    } else {
        Arrays.fill(weights, 0.0D);
    }

    int widx = 0;

    double wsum = 0.0D;

    for (int k = j + 1; k < i; k++) {
        if (0 == maxdist) {
            weights[widx] = 1.0D;
        } else {
            double u = Math.abs(gts.ticks[k] - tick) / maxdist;

            if (u >= 1.0) {
                weights[widx] = 0.0D;
            } else {

                weights[widx] = 1.0D - u * u * u;

                double rho_ = 1.0D;
                if (null != rho) {
                    // In some cases, "all rho are equal to 0.0", which should be translated to "all rho are equal" (or else there is no regression at all)
                    // So if rho equals zero we set it to a certain value which is low enough not to bias the result in case all rho are not all equals.
                    rho_ = 0.0D != rho[k] ? rho[k] : 0.000001D;
                }
                weights[widx] = rho_ * weights[widx] * weights[widx] * weights[widx];
            }
        }
        wsum += weights[widx];
        widx++;
    }

    // Regression parameters
    if (null == beta) {
        beta = new double[p + 1];
    }

    //
    // Linear polynomial fit
    //

    if (1 == p) {

        //
        // Compute weighted centroids for ticks and values
        //

        widx = 0;

        double ctick = 0.0D;
        double cvalue = 0.0D;

        for (int k = j + 1; k < i; k++) {
            ctick = ctick + weights[widx] * gts.ticks[k];
            cvalue = cvalue + weights[widx] * ((Number) valueAtIndex(gts, k)).doubleValue();
            widx++;
        }

        ctick = ctick / wsum;
        cvalue = cvalue / wsum;

        //
        // Compute weighted covariance and variance
        //

        double covar = 0.0D;
        double var = 0.0D;

        widx = 0;

        for (int k = j + 1; k < i; k++) {
            covar = covar + weights[widx] * (gts.ticks[k] - ctick)
                    * (((Number) valueAtIndex(gts, k)).doubleValue() - cvalue);
            var = var + weights[widx] * (gts.ticks[k] - ctick) * (gts.ticks[k] - ctick);
            widx++;
        }

        covar = covar / wsum;
        var = var / wsum;

        //
        // Compute regression parameters
        //

        beta[1] = 0 == var ? 0.0D : covar / var;
        beta[0] = cvalue - ctick * beta[1];

    } else {

        //
        // Quadratic-or-more polynomial fit
        //

        // filling the container with the points

        List<WeightedObservedPoint> observations = new ArrayList<WeightedObservedPoint>();

        widx = 0;
        for (int k = j + 1; k < i; k++) {
            WeightedObservedPoint point = new WeightedObservedPoint(weights[widx], (double) gts.ticks[k],
                    ((Number) valueAtIndex(gts, k)).doubleValue());
            observations.add(point);
            widx++;
        }

        PolynomialCurveFitter fitter = PolynomialCurveFitter.create(p);
        beta = fitter.fit(observations);
        observations.clear();

    }

    //
    // Compute value at 'tick'
    //

    double estimated = beta[0];
    double tmp = 1.0D;
    for (int u = 1; u < p + 1; u++) {
        tmp *= tick;
        estimated += tmp * beta[u];
    }

    return estimated;
}

From source file:org.orekit.attitudes.BodyCenterPointingTest.java

@Test
public void testQDot() throws OrekitException {

    Utils.setDataRoot("regular-data");
    final double ehMu = 3.9860047e14;
    final double ae = 6.378137e6;
    final double c20 = -1.08263e-3;
    final double c30 = 2.54e-6;
    final double c40 = 1.62e-6;
    final double c50 = 2.3e-7;
    final double c60 = -5.5e-7;
    final AbsoluteDate date = AbsoluteDate.J2000_EPOCH.shiftedBy(584.);
    final Vector3D position = new Vector3D(3220103., 69623., 6449822.);
    final Vector3D velocity = new Vector3D(6414.7, -2006., -3180.);
    final CircularOrbit initialOrbit = new CircularOrbit(new PVCoordinates(position, velocity),
            FramesFactory.getEME2000(), date, ehMu);

    EcksteinHechlerPropagator propagator = new EcksteinHechlerPropagator(initialOrbit, ae, ehMu, c20, c30, c40,
            c50, c60);//w  w w .j  av  a  2  s  .  c  om
    propagator.setAttitudeProvider(earthCenterAttitudeLaw);

    List<WeightedObservedPoint> w0 = new ArrayList<WeightedObservedPoint>();
    List<WeightedObservedPoint> w1 = new ArrayList<WeightedObservedPoint>();
    List<WeightedObservedPoint> w2 = new ArrayList<WeightedObservedPoint>();
    List<WeightedObservedPoint> w3 = new ArrayList<WeightedObservedPoint>();
    for (double dt = -1; dt < 1; dt += 0.01) {
        Rotation rP = propagator.propagate(date.shiftedBy(dt)).getAttitude().getRotation();
        w0.add(new WeightedObservedPoint(1, dt, rP.getQ0()));
        w1.add(new WeightedObservedPoint(1, dt, rP.getQ1()));
        w2.add(new WeightedObservedPoint(1, dt, rP.getQ2()));
        w3.add(new WeightedObservedPoint(1, dt, rP.getQ3()));
    }

    double q0DotRef = PolynomialCurveFitter.create(2).fit(w0)[1];
    double q1DotRef = PolynomialCurveFitter.create(2).fit(w1)[1];
    double q2DotRef = PolynomialCurveFitter.create(2).fit(w2)[1];
    double q3DotRef = PolynomialCurveFitter.create(2).fit(w3)[1];

    Attitude a0 = propagator.propagate(date).getAttitude();
    double q0 = a0.getRotation().getQ0();
    double q1 = a0.getRotation().getQ1();
    double q2 = a0.getRotation().getQ2();
    double q3 = a0.getRotation().getQ3();
    double oX = a0.getSpin().getX();
    double oY = a0.getSpin().getY();
    double oZ = a0.getSpin().getZ();

    // first time-derivatives of the quaternion
    double q0Dot = 0.5 * MathArrays.linearCombination(-q1, oX, -q2, oY, -q3, oZ);
    double q1Dot = 0.5 * MathArrays.linearCombination(q0, oX, -q3, oY, q2, oZ);
    double q2Dot = 0.5 * MathArrays.linearCombination(q3, oX, q0, oY, -q1, oZ);
    double q3Dot = 0.5 * MathArrays.linearCombination(-q2, oX, q1, oY, q0, oZ);

    Assert.assertEquals(q0DotRef, q0Dot, 5.0e-9);
    Assert.assertEquals(q1DotRef, q1Dot, 5.0e-9);
    Assert.assertEquals(q2DotRef, q2Dot, 5.0e-9);
    Assert.assertEquals(q3DotRef, q3Dot, 5.0e-9);

}

From source file:org.orekit.utils.SecularAndHarmonic.java

/** Add a fitting point.
 * @param date date of the point//from  w  ww .java2s  . c  om
 * @param osculatingValue osculating value
 */
public void addPoint(final AbsoluteDate date, final double osculatingValue) {
    observedPoints.add(new WeightedObservedPoint(1.0, date.durationFrom(reference), osculatingValue));
}

From source file:org.orekit.utils.SecularAndHarmonic.java

/** Approximate an already fitted model to polynomial only terms.
 * <p>/*from  w  w  w .java2s  . c  o  m*/
 * This method is mainly used in order to combine the large amplitude long
 * periods with the secular part as a new approximate polynomial model over
 * some time range. This should be used rather than simply extracting the
 * polynomial coefficients from {@link #getFittedParameters()} when some
 * periodic terms amplitudes are large (for example Sun resonance effects
 * on local solar time in sun synchronous orbits). In theses cases, the pure
 * polynomial secular part in the coefficients may be far from the mean model.
 * </p>
 * @param combinedDegree desired degree for the combined polynomial
 * @param combinedReference desired reference date for the combined polynomial
 * @param meanDegree degree of polynomial secular part to consider
 * @param meanHarmonics number of harmonics terms to consider
 * @param start start date of the approximation time range
 * @param end end date of the approximation time range
 * @param step sampling step
 * @return coefficients of the approximate polynomial (in increasing degree order),
 * using the user provided reference date
 */
public double[] approximateAsPolynomialOnly(final int combinedDegree, final AbsoluteDate combinedReference,
        final int meanDegree, final int meanHarmonics, final AbsoluteDate start, final AbsoluteDate end,
        final double step) {
    final List<WeightedObservedPoint> points = new ArrayList<WeightedObservedPoint>();
    for (AbsoluteDate date = start; date.compareTo(end) < 0; date = date.shiftedBy(step)) {
        points.add(new WeightedObservedPoint(1.0, date.durationFrom(combinedReference),
                meanValue(date, meanDegree, meanHarmonics)));
    }
    return PolynomialCurveFitter.create(combinedDegree).fit(points);
}

From source file:org.terracotta.statistics.derived.histogram.HistogramFittingTest.java

public static double[] fit(Histogram histogram, AbstractCurveFitter fitter) {
    return histogram.getBuckets().stream().map(b -> new WeightedObservedPoint(1.0,
            (b.maximum() + b.minimum()) / 2.0, b.count() / (b.maximum() - b.minimum())))
            .collect(collectingAndThen(toList(), fitter::fit));
}

From source file:ro.hasna.ts.math.representation.PiecewiseCurveFitterApproximation.java

@Override
public double[][] transform(double[] values) {
    int len = values.length;
    if (len < segments) {
        throw new ArrayLengthIsTooSmallException(len, segments, true);
    }/*from   www.jav  a  2s.  c om*/

    int modulo = len % segments;
    double[][] reducedValues = new double[segments][];
    if (modulo == 0) {
        int segmentSize = len / segments;
        List<WeightedObservedPoint> segment = new ArrayList<>(segmentSize);
        for (int i = 0; i < segmentSize; i++) {
            segment.add(null);
        }

        int n = 0;
        for (int i = 0; i < len; i++) {
            int index = i % segmentSize;
            segment.set(index, new WeightedObservedPoint(1, index, values[i]));
            if (index + 1 == segmentSize) {
                reducedValues[n++] = curveFitter.fit(segment);
                if (n == segments)
                    break;
            }
        }
    } else {
        double segmentSize = len * 1.0 / segments;
        int k = 0;
        int segmentSizeReal = (int) FastMath.ceil(segmentSize) + 1;
        int index = 0;
        List<WeightedObservedPoint> segment = new ArrayList<>(segmentSizeReal);

        for (int i = 0; i < segments - 1; i++) {
            double x = (i + 1) * segmentSize - 1;

            for (; k < x; k++) {
                segment.add(new WeightedObservedPoint(1, index, values[k]));
                index++;
            }

            double delta = x - (int) x;
            if (!Precision.equals(delta, 0, TimeSeriesPrecision.EPSILON)) {
                segment.add(new WeightedObservedPoint(delta, index, values[k]));
            }
            reducedValues[i] = curveFitter.fit(segment);
            segment = new ArrayList<>(segmentSizeReal);
            index = 0;

            if (!Precision.equals(delta, 1, TimeSeriesPrecision.EPSILON)) {
                segment.add(new WeightedObservedPoint(1 - delta, index, values[k]));
                index++;
            }
            k++;
        }

        for (; k < len; k++) {
            segment.add(new WeightedObservedPoint(1, index, values[k]));
            index++;
        }
        reducedValues[segments - 1] = curveFitter.fit(segment);
    }

    return reducedValues;
}