Example usage for org.apache.commons.math3.optim.nonlinear.vector ModelFunctionJacobian ModelFunctionJacobian

List of usage examples for org.apache.commons.math3.optim.nonlinear.vector ModelFunctionJacobian ModelFunctionJacobian

Introduction

In this page you can find the example usage for org.apache.commons.math3.optim.nonlinear.vector ModelFunctionJacobian ModelFunctionJacobian.

Prototype

public ModelFunctionJacobian(MultivariateMatrixFunction j) 

Source Link

Usage

From source file:de.tuberlin.uebb.jdae.solvers.OptimalitySolver.java

@Override
public double[] solve(int maxEval, MultivariateDifferentiableVectorFunction f, double[] startValue) {
    final double[] zeros = startValue.clone();
    final double[] ones = startValue.clone();
    Arrays.fill(zeros, 0.0);/*from  ww  w . j  a v  a 2 s .c o  m*/
    Arrays.fill(ones, 1.0);

    return optim.optimize(new MaxEval(maxEval), new InitialGuess(startValue), new Target(zeros),
            new Weight(ones), new ModelFunction(f), new ModelFunctionJacobian(new JacobianFunction(f)))
            .getPoint();
}

From source file:de.tuberlin.uebb.jdae.solvers.OptimalitySolver.java

public double[] solve(int maxEval, MultivariateVectorFunction residual, MultivariateMatrixFunction jacobian,
        double[] startValue) {
    final double[] zeros = startValue.clone();
    final double[] ones = startValue.clone();
    Arrays.fill(zeros, 0.0);/*w  ww  .jav  a2s.  c  o  m*/
    Arrays.fill(ones, 1.0);

    return optim.optimize(new MaxEval(maxEval), new InitialGuess(startValue), new Target(zeros),
            new Weight(ones), new ModelFunction(residual), new ModelFunctionJacobian(jacobian)).getPoint();
}

From source file:gdsc.smlm.fitting.nonlinear.ApacheLVMFitter.java

public FitStatus fit(int n, double[] y, double[] y_fit, double[] a, double[] a_dev, double[] error,
        double noise) {
    numberOfFittedPoints = n;// w ww .  ja  va  2  s. c om

    try {
        // Different convergence thresholds seem to have no effect on the resulting fit, only the number of
        // iterations for convergence
        final double initialStepBoundFactor = 100;
        final double costRelativeTolerance = 1e-10;
        final double parRelativeTolerance = 1e-10;
        final double orthoTolerance = 1e-10;
        final double threshold = Precision.SAFE_MIN;

        // Extract the parameters to be fitted
        final double[] initialSolution = getInitialSolution(a);

        // TODO - Pass in more advanced stopping criteria.

        // Create the target and weight arrays
        final double[] yd = new double[n];
        final double[] w = new double[n];
        for (int i = 0; i < n; i++) {
            yd[i] = y[i];
            w[i] = 1;
        }

        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
                costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);
        PointVectorValuePair optimum = optimizer.optimize(new MaxIter(getMaxEvaluations()),
                new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunctionWrapper(f, a, n)),
                new ModelFunction(new MultivariateVectorFunctionWrapper(f, a, n)), new Target(yd),
                new Weight(w), new InitialGuess(initialSolution));

        final double[] parameters = optimum.getPoint();
        setSolution(a, parameters);
        iterations = optimizer.getIterations();
        evaluations = optimizer.getEvaluations();
        if (a_dev != null) {
            double[][] covar = optimizer.computeCovariances(parameters, threshold);
            setDeviations(a_dev, covar);
        }
        // Compute fitted function if desired 
        if (y_fit != null) {
            f.initialise(a);
            for (int i = 0; i < n; i++)
                y_fit[i] = f.eval(i);
        }

        residualSumOfSquares = error[0] = optimizer.getChiSquare();
        totalSumOfSquares = getSumOfSquares(n, y);
    } catch (TooManyEvaluationsException e) {
        return FitStatus.FAILED_TO_CONVERGE;
    } catch (ConvergenceException e) {
        // Occurs when QR decomposition fails - mark as a singular non-linear model (no solution)
        return FitStatus.SINGULAR_NON_LINEAR_MODEL;
    } catch (Exception e) {
        // TODO - Find out the other exceptions from the fitter and add return values to match. 
        return FitStatus.UNKNOWN;
    }

    return FitStatus.OK;
}

From source file:de.thkwalter.et.ortskurve.Ausgleichsproblem.java

/**
 * Diese Methode lst das Ausgleichsproblem.
 * /*from ww  w.java  2  s. com*/
 * @param startpunkt Der Startpunkt der Ausgleichsrechnung
 * @param ausgleichsproblemtyp Der Typ des Ausgleichsproblems
 * 
 * @return Die berechnete Ortskurve
 */
public Ortskurve ausgleichsproblemLoesen(double[] startpunkt, Ausgleichsproblemtyp ausgleichsproblemtyp) {
    // Die Referenzen auf die Modellgleichungen und die Jakobimatrix werden deklariert.
    MultivariateVectorFunction modellgleichungen = null;
    MultivariateMatrixFunction jakobiMatrix = null;

    // Falls das zweidimensionale Ausgleichsproblem gelst werden soll, ...
    if (ausgleichsproblemtyp == Ausgleichsproblemtyp.ORTSKURVE_2d) {
        // Die Modellgleichungen (die Kreisgleichungen) werden erzeugt.
        modellgleichungen = new Modellgleichungen2d(this.messpunkte);

        // Die Jakobi-Matrix der Modellgleichungen (der Kreisgleichungen) wird erzeugt.
        jakobiMatrix = new Jakobimatrix2d(this.messpunkte);
    }

    // Falls das dreidimensionale Ausgleichsproblem gelst werden soll, ...
    else if (ausgleichsproblemtyp == Ausgleichsproblemtyp.ORTSKURVE_3d) {
        // Die Modellgleichungen (die Kreisgleichungen) werden erzeugt.
        modellgleichungen = new Modellgleichungen(this.messpunkte);

        // Die Jakobi-Matrix der Modellgleichungen (der Kreisgleichungen) wird erzeugt.
        jakobiMatrix = new Jakobimatrix(this.messpunkte);
    }

    // Das Ausgleichsproblem wird gelst, wobei hchstens 200 Iterationsschritte durchgefhrt werden.
    double[] ortskurvenparameter = null;
    try {
        PointVectorValuePair endParameter = this.gaussNewtonOptimizer.optimize(new Weight(gewichte),
                new Target(zielwerte), new InitialGuess(startpunkt), new MaxEval(200),
                new ModelFunction(modellgleichungen), new ModelFunctionJacobian(jakobiMatrix));

        ortskurvenparameter = endParameter.getPoint();
    }

    // Falls das Verfahren nicht konvergiert hat, wird die entsprechende Ausnahme gefangen.
    catch (TooManyEvaluationsException e) {
        // Die Fehlermeldung fr den Entwickler wird erzeugt und protokolliert.
        String fehlermeldung = "Der Gauss-Newton-Algorithmus konvergiert nicht!";
        Ausgleichsproblem.logger.severe(fehlermeldung);

        // Die Ausnahme wird erzeugt und mit der Fehlermeldung fr den Benutzer initialisiert.
        String jsfMeldung = "Der Gauss-Newton-Algorithmus zur Berechnung der Ortskurve konvergiert nicht! "
                + "berprfen Sie bitte, ob die eingegebenen Punkte annhernd auf einem Kreis liegen.";
        ApplicationRuntimeException applicationRuntimeException = new ApplicationRuntimeException(jsfMeldung);

        throw applicationRuntimeException;
    }

    // Die Referenz auf die Ortskurve wird deklariert.
    Ortskurve ortskurve = null;

    // Falls das zweidimensionale Ausgleichsproblem gelst werden soll, ...
    if (ausgleichsproblemtyp == Ausgleichsproblemtyp.ORTSKURVE_2d) {
        ortskurve = new Ortskurve(new Vector2D(ortskurvenparameter[0], 0.0), ortskurvenparameter[1]);
    }

    // Falls das dreidimensionale Ausgleichsproblem gelst werden soll, ...
    else if (ausgleichsproblemtyp == Ausgleichsproblemtyp.ORTSKURVE_3d) {
        ortskurve = new Ortskurve(new Vector2D(ortskurvenparameter[0], ortskurvenparameter[1]),
                ortskurvenparameter[2]);
    }

    // Die berechnete Ortskurve wird zurckgegeben.
    return ortskurve;
}

From source file:gdsc.smlm.fitting.BinomialFitter.java

/**
 * Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a fixed n value and
 * attempts to optimise p./*from  www  .jav a 2s .co m*/
 * 
 * @param histogram
 *            The input histogram
 * @param mean
 *            The histogram mean (used to estimate p). Calculated if NaN.
 * @param n
 *            The n to evaluate
 * @param zeroTruncated
 *            True if the model should ignore n=0 (zero-truncated binomial)
 * @return The best fit (n, p)
 * @throws IllegalArgumentException
 *             If any of the input data values are negative
 * @throws IllegalArgumentException
 *             If any fitting a zero truncated binomial and there are no values above zero
 */
public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) {
    if (Double.isNaN(mean))
        mean = getMean(histogram);

    if (zeroTruncated && histogram[0] > 0) {
        log("Fitting zero-truncated histogram but there are zero values - Renormalising to ignore zero");
        double cumul = 0;
        for (int i = 1; i < histogram.length; i++)
            cumul += histogram[i];
        if (cumul == 0)
            throw new IllegalArgumentException(
                    "Fitting zero-truncated histogram but there are no non-zero values");
        histogram[0] = 0;
        for (int i = 1; i < histogram.length; i++)
            histogram[i] /= cumul;
    }

    int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0);
    if (nFittedPoints < 1) {
        log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints,
                histogram.length, n, zeroTruncated);
        return null;
    }

    // The model is only fitting the probability p
    // For a binomial n*p = mean => p = mean/n
    double[] initialSolution = new double[] { FastMath.min(mean / n, 1) };

    // Create the function
    BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated);

    double[] lB = new double[1];
    double[] uB = new double[] { 1 };
    SimpleBounds bounds = new SimpleBounds(lB, uB);

    // Fit
    // CMAESOptimizer or BOBYQAOptimizer support bounds

    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    int maxIterations = 2000;
    double stopFitness = 0; //Double.NEGATIVE_INFINITY;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    RandomGenerator random = new Well19937c();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 });
    OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2))));

    try {
        PointValuePair solution = null;
        boolean noRefit = maximumLikelihood;
        if (n == 1 && zeroTruncated) {
            // No need to fit
            solution = new PointValuePair(new double[] { 1 }, 0);
            noRefit = true;
        } else {
            GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE;

            // Iteratively fit
            CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly,
                    checkFeasableCount, random, generateStatistics, checker);
            for (int iteration = 0; iteration <= fitRestarts; iteration++) {
                try {
                    // Start from the initial solution
                    PointValuePair result = opt.optimize(new InitialGuess(initialSolution),
                            new ObjectiveFunction(function), goalType, bounds, sigma, popSize,
                            new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration, result.getValue(),
                    //      opt.getEvaluations());
                    if (solution == null || result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
                if (solution == null)
                    continue;
                try {
                    // Also restart from the current optimum
                    PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()),
                            new ObjectiveFunction(function), goalType, bounds, sigma, popSize,
                            new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration, result.getValue(),
                    //      opt.getEvaluations());
                    if (result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
            }
            if (solution == null)
                return null;
        }

        if (noRefit) {
            // Although we fit the log-likelihood, return the sum-of-squares to allow 
            // comparison across different n
            double p = solution.getPointRef()[0];
            double ss = 0;
            double[] obs = function.p;
            double[] exp = function.getP(p);
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            return new PointValuePair(solution.getPointRef(), ss);
        }
        // We can do a LVM refit if the number of fitted points is more than 1
        else if (nFittedPoints > 1) {
            // Improve SS fit with a gradient based LVM optimizer
            LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
            try {
                final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient(
                        histogram, n, zeroTruncated);
                PointVectorValuePair lvmSolution = optimizer.optimize(new MaxIter(3000),
                        new MaxEval(Integer.MAX_VALUE),
                        new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                            public double[][] value(double[] point) throws IllegalArgumentException {
                                return gradientFunction.jacobian(point);
                            }
                        }), new ModelFunction(gradientFunction), new Target(gradientFunction.p),
                        new Weight(gradientFunction.getWeights()), new InitialGuess(solution.getPointRef()));

                double ss = 0;
                double[] obs = gradientFunction.p;
                double[] exp = lvmSolution.getValue();
                for (int i = 0; i < obs.length; i++)
                    ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
                // Check the pValue is valid since the LVM is not bounded.
                double p = lvmSolution.getPointRef()[0];
                if (ss < solution.getValue() && p <= 1 && p >= 0) {
                    //log("Re-fitting improved the SS from %s to %s (-%s%%)",
                    //      Utils.rounded(solution.getValue(), 4), Utils.rounded(ss, 4),
                    //      Utils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4));
                    return new PointValuePair(lvmSolution.getPoint(), ss);
                }
            } catch (TooManyIterationsException e) {
                log("Failed to re-fit: Too many iterations (%d)", optimizer.getIterations());
            } catch (ConvergenceException e) {
                log("Failed to re-fit: %s", e.getMessage());
            } catch (Exception e) {
                // Ignore this ...
            }
        }

        return solution;
    } catch (Exception e) {
        log("Failed to fit Binomial distribution with N=%d : %s", n, e.getMessage());
    }
    return null;
}

From source file:gdsc.smlm.ij.plugins.BlinkEstimator.java

/**
 * Fit the dark time to counts of molecules curve. Only use the first n fitted points.
 * <p>//  w  ww .j ava 2 s .  c  om
 * Calculates:<br/>
 * N = The number of photoblinking molecules in the sample<br/>
 * nBlink = The average number of blinks per flourophore<br/>
 * tOff = The off-time
 * 
 * @param td
 *            The dark time
 * @param ntd
 *            The counts of molecules
 * @param nFittedPoints
 * @param log
 *            Write the fitting results to the ImageJ log window
 * @return The fitted parameters [N, nBlink, tOff], or null if no fit was possible
 */
public double[] fit(double[] td, double[] ntd, int nFittedPoints, boolean log) {
    blinkingModel = new BlinkingFunction();
    blinkingModel.setLogging(true);
    for (int i = 0; i < nFittedPoints; i++)
        blinkingModel.addPoint(td[i], ntd[i]);

    // Different convergence thresholds seem to have no effect on the resulting fit, only the number of
    // iterations for convergence
    double initialStepBoundFactor = 100;
    double costRelativeTolerance = 1e-6;
    double parRelativeTolerance = 1e-6;
    double orthoTolerance = 1e-6;
    double threshold = Precision.SAFE_MIN;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);
    try {
        double[] obs = blinkingModel.getY();

        PointVectorValuePair optimum = optimizer.optimize(new MaxIter(1000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return blinkingModel.jacobian(point);
                    }
                }), new ModelFunction(blinkingModel), new Target(obs), new Weight(blinkingModel.getWeights()),
                new InitialGuess(new double[] { ntd[0], 0.1, td[1] }));

        blinkingModel.setLogging(false);

        double[] parameters = optimum.getPoint();

        double[] exp = optimum.getValue();
        double mean = 0;
        for (double d : obs)
            mean += d;
        mean /= obs.length;
        double ssResiduals = 0, ssTotal = 0;
        for (int i = 0; i < obs.length; i++) {
            ssResiduals += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            ssTotal += (obs[i] - mean) * (obs[i] - mean);
        }

        r2 = 1 - ssResiduals / ssTotal;
        adjustedR2 = getAdjustedCoefficientOfDetermination(ssResiduals, ssTotal, obs.length, parameters.length);

        if (log) {
            Utils.log("  Fit %d points. R^2 = %s. Adjusted R^2 = %s", obs.length, Utils.rounded(r2, 4),
                    Utils.rounded(adjustedR2, 4));
            Utils.log("  N=%s, nBlink=%s, tOff=%s (%s frames)", Utils.rounded(parameters[0], 4),
                    Utils.rounded(parameters[1], 4), Utils.rounded(parameters[2], 4),
                    Utils.rounded(parameters[2] / msPerFrame, 4));
        }

        return parameters;
    } catch (TooManyIterationsException e) {
        if (log)
            Utils.log("  Failed to fit %d points: Too many iterations (%d)", blinkingModel.size(),
                    optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        if (log)
            Utils.log("  Failed to fit %d points", blinkingModel.size());
        return null;
    }
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMMolecules.java

private double[] optimiseLeastSquares(float[] x, float[] y, double[] initialSolution) {
    // Least-squares optimisation using numerical gradients
    final SkewNormalDifferentiableFunction myFunction = new SkewNormalDifferentiableFunction(initialSolution);
    myFunction.addData(x, y);//from  w w  w  .j a va2s .  c  om
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();

    PointVectorValuePair optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
            new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                public double[][] value(double[] point) throws IllegalArgumentException {
                    return myFunction.jacobian(point);
                }
            }), new ModelFunction(myFunction), new Target(myFunction.calculateTarget()),
            new Weight(myFunction.calculateWeights()), new InitialGuess(initialSolution));

    double[] skewParameters = optimum.getPoint();
    return skewParameters;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the random model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * /*from  www. j a va  2  s.co m*/
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimate protein density
 * @return The fitted parameters [precision, density]
 */
private double[] fitRandomModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final RandomModelFunction myFunction = new RandomModelFunction();
    randomModel = myFunction;

    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", randomModel.getName(),
            sigmaS, proteinDensity * 1e6);

    randomModel.setLogging(true);

    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            randomModel.addPoint(gr[0][i], gr[1][i]);
    }
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();

    PointVectorValuePair optimum;
    try {
        optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return myFunction.jacobian(point);
                    }
                }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                new Weight(myFunction.getWeights()), new InitialGuess(new double[] { sigmaS, proteinDensity }));
    } catch (TooManyIterationsException e) {
        log("Failed to fit %s: Too many iterations (%d)", randomModel.getName(), optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        log("Failed to fit %s: %s", randomModel.getName(), e.getMessage());
        return null;
    }

    randomModel.setLogging(false);

    double[] parameters = optimum.getPoint();
    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);

    double ss = 0;
    double[] obs = randomModel.getY();
    double[] exp = optimum.getValue();
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic1 = Maths.getInformationCriterion(ss, randomModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];

    // Check the fitted parameters are within tolerance of the initial estimates
    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", randomModel.getName(), ss, ic1,
            optimizer.getEvaluations());
    log("  %s parameters:", randomModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));

    valid1 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                randomModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid1 = false;
    }

    if (valid1) {
        // ---------
        // TODO - My data does not comply with this criteria. 
        // This could be due to the PC-PALM Molecule code limiting the nmPerPixel to fit the images in memory 
        // thus removing correlations at small r.
        // It could also be due to the nature of the random simulations being 3D not 2D membranes 
        // as per the PC-PALM paper. 
        // ---------
        // Evaluate g(r)protein where:
        // g(r)peaks = g(r)protein + g(r)stoch
        // g(r)peaks ~ 1           + g(r)stoch
        // Verify g(r)protein should be <1.5 for all r>0
        double[] gr_stoch = randomModel.value(parameters);
        double[] gr_peaks = randomModel.getY();
        double[] gr_ = randomModel.getX();

        //SummaryStatistics stats = new SummaryStatistics();
        for (int i = 0; i < gr_peaks.length; i++) {
            // Only evaluate above the fitted average precision 
            if (gr_[i] < fitSigmaS)
                continue;

            // Note the RandomModelFunction evaluates g(r)stoch + 1;
            double gr_protein_i = gr_peaks[i] - (gr_stoch[i] - 1);

            if (gr_protein_i > gr_protein_threshold) {
                // Failed fit
                log("  Failed to fit %s: g(r)protein %s > %s @ r=%s", randomModel.getName(),
                        Utils.rounded(gr_protein_i, 4), Utils.rounded(gr_protein_threshold, 4),
                        Utils.rounded(gr_[i], 4));
                valid1 = false;
            }
            //stats.addValue(gr_i);
            //System.out.printf("g(r)protein @ %f = %f\n", gr[0][i], gr_protein_i);
        }
    }

    addResult(randomModel.getName(), resultColour, valid1, fitSigmaS, fitProteinDensity, 0, 0, 0, 0, ic1);

    return parameters;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the clustered model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * /*from  w w w.  jav  a  2s .  c  o m*/
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimated protein density
 * @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
 */
private double[] fitClusteredModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final ClusteredModelFunctionGradient myFunction = new ClusteredModelFunctionGradient();
    clusteredModel = myFunction;
    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2",
            clusteredModel.getName(), sigmaS, proteinDensity * 1e6);

    clusteredModel.setLogging(true);
    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            clusteredModel.addPoint(gr[0][i], gr[1][i]);
    }

    double[] parameters;
    // The model is: sigma, density, range, amplitude
    double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1 };
    int evaluations = 0;

    // Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
    // LVM fitting does not support constrained fitting so use a bounded optimiser.
    SumOfSquaresModelFunction clusteredModelMulti = new SumOfSquaresModelFunction(clusteredModel);
    double[] x = clusteredModelMulti.x;

    // Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
    double limit = (fittingTolerance > 0) ? 1 + fittingTolerance / 100 : 2;
    double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0 };
    // The amplitude and range should not extend beyond the limits of the g(r) curve.
    double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, Maths.max(x),
            Maths.max(gr[1]) };
    log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", clusteredModel.getName(),
            Utils.rounded(lB[0], 4), Utils.rounded(uB[0], 4), Utils.rounded(lB[1] * 1e6, 4),
            Utils.rounded(uB[1] * 1e6, 4));

    PointValuePair constrainedSolution = runBoundedOptimiser(gr, initialSolution, lB, uB, clusteredModelMulti);

    if (constrainedSolution == null)
        return null;

    parameters = constrainedSolution.getPointRef();
    evaluations = boundedEvaluations;

    // Refit using a LVM  
    if (useLSE) {
        log("Re-fitting %s using a gradient optimisation", clusteredModel.getName());
        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return myFunction.jacobian(point);
                        }
                    }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                    new Weight(myFunction.getWeights()), new InitialGuess(parameters));
            evaluations += optimizer.getEvaluations();

            double ss = 0;
            double[] obs = clusteredModel.getY();
            double[] exp = lvmSolution.getValue();
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            if (ss < constrainedSolution.getValue()) {
                log("Re-fitting %s improved the SS from %s to %s (-%s%%)", clusteredModel.getName(),
                        Utils.rounded(constrainedSolution.getValue(), 4), Utils.rounded(ss, 4),
                        Utils.rounded(
                                100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(),
                                4));
                parameters = lvmSolution.getPoint();
            }
        } catch (TooManyIterationsException e) {
            log("Failed to re-fit %s: Too many iterations (%d)", clusteredModel.getName(),
                    optimizer.getIterations());
        } catch (ConvergenceException e) {
            log("Failed to re-fit %s: %s", clusteredModel.getName(), e.getMessage());
        }
    }

    clusteredModel.setLogging(false);

    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);
    //parameters[2] = Math.abs(parameters[2]);

    double ss = 0;
    double[] obs = clusteredModel.getY();
    double[] exp = clusteredModel.value(parameters);
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic2 = Maths.getInformationCriterion(ss, clusteredModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];
    final double domainRadius = parameters[2]; //The radius of the cluster domain
    final double domainDensity = parameters[3]; //The density of the cluster domain

    // This is from the PC-PALM paper. However that paper fits the g(r)protein exponential convolved in 2D
    // with the g(r)PSF. In this method we have just fit the exponential
    final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;

    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", clusteredModel.getName(), ss, ic2, evaluations);
    log("  %s parameters:", clusteredModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));
    log("    Domain radius = %s nm", Utils.rounded(domainRadius, 4));
    log("    Domain density = %s", Utils.rounded(domainDensity, 4));
    log("    nCluster = %s", Utils.rounded(nCluster, 4));

    // Check the fitted parameters are within tolerance of the initial estimates
    valid2 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                clusteredModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid2 = false;
    }

    // Check extra parameters. Domain radius should be higher than the precision. Density should be positive
    if (domainRadius < fitSigmaS) {
        log("  Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)",
                clusteredModel.getName(), Utils.rounded(domainRadius, 4), Utils.rounded(fitSigmaS, 4));
        valid2 = false;
    }
    if (domainDensity < 0) {
        log("  Failed to fit %s: Domain density is negative (%s)", clusteredModel.getName(),
                Utils.rounded(domainDensity, 4));
        valid2 = false;
    }

    if (ic2 > ic1) {
        log("  Failed to fit %s - Information Criterion has increased %s%%", clusteredModel.getName(),
                Utils.rounded((100 * (ic2 - ic1) / ic1), 4));
        valid2 = false;
    }

    addResult(clusteredModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius,
            domainDensity, nCluster, 0, ic2);

    return parameters;
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the MSD using a linear fit that must pass through 0,0.
 * <p>/*www . j  av a2s.c  o  m*/
 * Update the plot by adding the fit line.
 * 
 * @param x
 * @param y
 * @param title
 * @param plot
 * @return
 */
private double fitMSD(double[] x, double[] y, String title, Plot2 plot) {
    double D = 0;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    PointVectorValuePair lvmSolution;
    try {
        final LinearFunction function = new LinearFunction(x, y, settings.fitLength);
        double[] parameters = new double[] { function.guess() };
        lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(parameters));

        double ss = 0;
        double[] obs = function.getY();
        double[] exp = lvmSolution.getValue();
        for (int i = 0; i < obs.length; i++)
            ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);

        D = lvmSolution.getPoint()[0] / 4;
        Utils.log("Linear fit (%d points) : Gradient = %s, D = %s um^2/s, SS = %f (%d evaluations)", obs.length,
                Utils.rounded(lvmSolution.getPoint()[0], 4), Utils.rounded(D, 4), ss,
                optimizer.getEvaluations());

        // Add the fit to the plot
        plot.setColor(Color.magenta);
        plot.drawLine(0, 0, x[x.length - 1], x[x.length - 1] * 4 * D);
        display(title, plot);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }
    return D;
}