Example usage for org.apache.commons.math3.analysis MultivariateMatrixFunction MultivariateMatrixFunction

List of usage examples for org.apache.commons.math3.analysis MultivariateMatrixFunction MultivariateMatrixFunction

Introduction

In this page you can find the example usage for org.apache.commons.math3.analysis MultivariateMatrixFunction MultivariateMatrixFunction.

Prototype

MultivariateMatrixFunction

Source Link

Usage

From source file:gdsc.smlm.fitting.BinomialFitter.java

/**
 * Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a fixed n value and
 * attempts to optimise p.//from w  ww . j a  v  a 2 s  . co  m
 * 
 * @param histogram
 *            The input histogram
 * @param mean
 *            The histogram mean (used to estimate p). Calculated if NaN.
 * @param n
 *            The n to evaluate
 * @param zeroTruncated
 *            True if the model should ignore n=0 (zero-truncated binomial)
 * @return The best fit (n, p)
 * @throws IllegalArgumentException
 *             If any of the input data values are negative
 * @throws IllegalArgumentException
 *             If any fitting a zero truncated binomial and there are no values above zero
 */
public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) {
    if (Double.isNaN(mean))
        mean = getMean(histogram);

    if (zeroTruncated && histogram[0] > 0) {
        log("Fitting zero-truncated histogram but there are zero values - Renormalising to ignore zero");
        double cumul = 0;
        for (int i = 1; i < histogram.length; i++)
            cumul += histogram[i];
        if (cumul == 0)
            throw new IllegalArgumentException(
                    "Fitting zero-truncated histogram but there are no non-zero values");
        histogram[0] = 0;
        for (int i = 1; i < histogram.length; i++)
            histogram[i] /= cumul;
    }

    int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0);
    if (nFittedPoints < 1) {
        log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints,
                histogram.length, n, zeroTruncated);
        return null;
    }

    // The model is only fitting the probability p
    // For a binomial n*p = mean => p = mean/n
    double[] initialSolution = new double[] { FastMath.min(mean / n, 1) };

    // Create the function
    BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated);

    double[] lB = new double[1];
    double[] uB = new double[] { 1 };
    SimpleBounds bounds = new SimpleBounds(lB, uB);

    // Fit
    // CMAESOptimizer or BOBYQAOptimizer support bounds

    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    int maxIterations = 2000;
    double stopFitness = 0; //Double.NEGATIVE_INFINITY;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    RandomGenerator random = new Well19937c();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 });
    OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2))));

    try {
        PointValuePair solution = null;
        boolean noRefit = maximumLikelihood;
        if (n == 1 && zeroTruncated) {
            // No need to fit
            solution = new PointValuePair(new double[] { 1 }, 0);
            noRefit = true;
        } else {
            GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE;

            // Iteratively fit
            CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly,
                    checkFeasableCount, random, generateStatistics, checker);
            for (int iteration = 0; iteration <= fitRestarts; iteration++) {
                try {
                    // Start from the initial solution
                    PointValuePair result = opt.optimize(new InitialGuess(initialSolution),
                            new ObjectiveFunction(function), goalType, bounds, sigma, popSize,
                            new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration, result.getValue(),
                    //      opt.getEvaluations());
                    if (solution == null || result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
                if (solution == null)
                    continue;
                try {
                    // Also restart from the current optimum
                    PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()),
                            new ObjectiveFunction(function), goalType, bounds, sigma, popSize,
                            new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration, result.getValue(),
                    //      opt.getEvaluations());
                    if (result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
            }
            if (solution == null)
                return null;
        }

        if (noRefit) {
            // Although we fit the log-likelihood, return the sum-of-squares to allow 
            // comparison across different n
            double p = solution.getPointRef()[0];
            double ss = 0;
            double[] obs = function.p;
            double[] exp = function.getP(p);
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            return new PointValuePair(solution.getPointRef(), ss);
        }
        // We can do a LVM refit if the number of fitted points is more than 1
        else if (nFittedPoints > 1) {
            // Improve SS fit with a gradient based LVM optimizer
            LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
            try {
                final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient(
                        histogram, n, zeroTruncated);
                PointVectorValuePair lvmSolution = optimizer.optimize(new MaxIter(3000),
                        new MaxEval(Integer.MAX_VALUE),
                        new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                            public double[][] value(double[] point) throws IllegalArgumentException {
                                return gradientFunction.jacobian(point);
                            }
                        }), new ModelFunction(gradientFunction), new Target(gradientFunction.p),
                        new Weight(gradientFunction.getWeights()), new InitialGuess(solution.getPointRef()));

                double ss = 0;
                double[] obs = gradientFunction.p;
                double[] exp = lvmSolution.getValue();
                for (int i = 0; i < obs.length; i++)
                    ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
                // Check the pValue is valid since the LVM is not bounded.
                double p = lvmSolution.getPointRef()[0];
                if (ss < solution.getValue() && p <= 1 && p >= 0) {
                    //log("Re-fitting improved the SS from %s to %s (-%s%%)",
                    //      Utils.rounded(solution.getValue(), 4), Utils.rounded(ss, 4),
                    //      Utils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4));
                    return new PointValuePair(lvmSolution.getPoint(), ss);
                }
            } catch (TooManyIterationsException e) {
                log("Failed to re-fit: Too many iterations (%d)", optimizer.getIterations());
            } catch (ConvergenceException e) {
                log("Failed to re-fit: %s", e.getMessage());
            } catch (Exception e) {
                // Ignore this ...
            }
        }

        return solution;
    } catch (Exception e) {
        log("Failed to fit Binomial distribution with N=%d : %s", n, e.getMessage());
    }
    return null;
}

From source file:gdsc.smlm.ij.plugins.BlinkEstimator.java

/**
 * Fit the dark time to counts of molecules curve. Only use the first n fitted points.
 * <p>//from  ww w  .  jav a  2  s.c  o m
 * Calculates:<br/>
 * N = The number of photoblinking molecules in the sample<br/>
 * nBlink = The average number of blinks per flourophore<br/>
 * tOff = The off-time
 * 
 * @param td
 *            The dark time
 * @param ntd
 *            The counts of molecules
 * @param nFittedPoints
 * @param log
 *            Write the fitting results to the ImageJ log window
 * @return The fitted parameters [N, nBlink, tOff], or null if no fit was possible
 */
public double[] fit(double[] td, double[] ntd, int nFittedPoints, boolean log) {
    blinkingModel = new BlinkingFunction();
    blinkingModel.setLogging(true);
    for (int i = 0; i < nFittedPoints; i++)
        blinkingModel.addPoint(td[i], ntd[i]);

    // Different convergence thresholds seem to have no effect on the resulting fit, only the number of
    // iterations for convergence
    double initialStepBoundFactor = 100;
    double costRelativeTolerance = 1e-6;
    double parRelativeTolerance = 1e-6;
    double orthoTolerance = 1e-6;
    double threshold = Precision.SAFE_MIN;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);
    try {
        double[] obs = blinkingModel.getY();

        PointVectorValuePair optimum = optimizer.optimize(new MaxIter(1000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return blinkingModel.jacobian(point);
                    }
                }), new ModelFunction(blinkingModel), new Target(obs), new Weight(blinkingModel.getWeights()),
                new InitialGuess(new double[] { ntd[0], 0.1, td[1] }));

        blinkingModel.setLogging(false);

        double[] parameters = optimum.getPoint();

        double[] exp = optimum.getValue();
        double mean = 0;
        for (double d : obs)
            mean += d;
        mean /= obs.length;
        double ssResiduals = 0, ssTotal = 0;
        for (int i = 0; i < obs.length; i++) {
            ssResiduals += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            ssTotal += (obs[i] - mean) * (obs[i] - mean);
        }

        r2 = 1 - ssResiduals / ssTotal;
        adjustedR2 = getAdjustedCoefficientOfDetermination(ssResiduals, ssTotal, obs.length, parameters.length);

        if (log) {
            Utils.log("  Fit %d points. R^2 = %s. Adjusted R^2 = %s", obs.length, Utils.rounded(r2, 4),
                    Utils.rounded(adjustedR2, 4));
            Utils.log("  N=%s, nBlink=%s, tOff=%s (%s frames)", Utils.rounded(parameters[0], 4),
                    Utils.rounded(parameters[1], 4), Utils.rounded(parameters[2], 4),
                    Utils.rounded(parameters[2] / msPerFrame, 4));
        }

        return parameters;
    } catch (TooManyIterationsException e) {
        if (log)
            Utils.log("  Failed to fit %d points: Too many iterations (%d)", blinkingModel.size(),
                    optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        if (log)
            Utils.log("  Failed to fit %d points", blinkingModel.size());
        return null;
    }
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMMolecules.java

private double[] optimiseLeastSquares(float[] x, float[] y, double[] initialSolution) {
    // Least-squares optimisation using numerical gradients
    final SkewNormalDifferentiableFunction myFunction = new SkewNormalDifferentiableFunction(initialSolution);
    myFunction.addData(x, y);/*from w ww.j  a v  a2  s. c om*/
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();

    PointVectorValuePair optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
            new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                public double[][] value(double[] point) throws IllegalArgumentException {
                    return myFunction.jacobian(point);
                }
            }), new ModelFunction(myFunction), new Target(myFunction.calculateTarget()),
            new Weight(myFunction.calculateWeights()), new InitialGuess(initialSolution));

    double[] skewParameters = optimum.getPoint();
    return skewParameters;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the random model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * //from  w w  w  . j av a  2 s . co  m
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimate protein density
 * @return The fitted parameters [precision, density]
 */
private double[] fitRandomModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final RandomModelFunction myFunction = new RandomModelFunction();
    randomModel = myFunction;

    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", randomModel.getName(),
            sigmaS, proteinDensity * 1e6);

    randomModel.setLogging(true);

    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            randomModel.addPoint(gr[0][i], gr[1][i]);
    }
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();

    PointVectorValuePair optimum;
    try {
        optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return myFunction.jacobian(point);
                    }
                }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                new Weight(myFunction.getWeights()), new InitialGuess(new double[] { sigmaS, proteinDensity }));
    } catch (TooManyIterationsException e) {
        log("Failed to fit %s: Too many iterations (%d)", randomModel.getName(), optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        log("Failed to fit %s: %s", randomModel.getName(), e.getMessage());
        return null;
    }

    randomModel.setLogging(false);

    double[] parameters = optimum.getPoint();
    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);

    double ss = 0;
    double[] obs = randomModel.getY();
    double[] exp = optimum.getValue();
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic1 = Maths.getInformationCriterion(ss, randomModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];

    // Check the fitted parameters are within tolerance of the initial estimates
    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", randomModel.getName(), ss, ic1,
            optimizer.getEvaluations());
    log("  %s parameters:", randomModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));

    valid1 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                randomModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid1 = false;
    }

    if (valid1) {
        // ---------
        // TODO - My data does not comply with this criteria. 
        // This could be due to the PC-PALM Molecule code limiting the nmPerPixel to fit the images in memory 
        // thus removing correlations at small r.
        // It could also be due to the nature of the random simulations being 3D not 2D membranes 
        // as per the PC-PALM paper. 
        // ---------
        // Evaluate g(r)protein where:
        // g(r)peaks = g(r)protein + g(r)stoch
        // g(r)peaks ~ 1           + g(r)stoch
        // Verify g(r)protein should be <1.5 for all r>0
        double[] gr_stoch = randomModel.value(parameters);
        double[] gr_peaks = randomModel.getY();
        double[] gr_ = randomModel.getX();

        //SummaryStatistics stats = new SummaryStatistics();
        for (int i = 0; i < gr_peaks.length; i++) {
            // Only evaluate above the fitted average precision 
            if (gr_[i] < fitSigmaS)
                continue;

            // Note the RandomModelFunction evaluates g(r)stoch + 1;
            double gr_protein_i = gr_peaks[i] - (gr_stoch[i] - 1);

            if (gr_protein_i > gr_protein_threshold) {
                // Failed fit
                log("  Failed to fit %s: g(r)protein %s > %s @ r=%s", randomModel.getName(),
                        Utils.rounded(gr_protein_i, 4), Utils.rounded(gr_protein_threshold, 4),
                        Utils.rounded(gr_[i], 4));
                valid1 = false;
            }
            //stats.addValue(gr_i);
            //System.out.printf("g(r)protein @ %f = %f\n", gr[0][i], gr_protein_i);
        }
    }

    addResult(randomModel.getName(), resultColour, valid1, fitSigmaS, fitProteinDensity, 0, 0, 0, 0, ic1);

    return parameters;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the clustered model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * //w  ww  .ja  v a  2s  .  c  o m
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimated protein density
 * @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
 */
private double[] fitClusteredModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final ClusteredModelFunctionGradient myFunction = new ClusteredModelFunctionGradient();
    clusteredModel = myFunction;
    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2",
            clusteredModel.getName(), sigmaS, proteinDensity * 1e6);

    clusteredModel.setLogging(true);
    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            clusteredModel.addPoint(gr[0][i], gr[1][i]);
    }

    double[] parameters;
    // The model is: sigma, density, range, amplitude
    double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1 };
    int evaluations = 0;

    // Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
    // LVM fitting does not support constrained fitting so use a bounded optimiser.
    SumOfSquaresModelFunction clusteredModelMulti = new SumOfSquaresModelFunction(clusteredModel);
    double[] x = clusteredModelMulti.x;

    // Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
    double limit = (fittingTolerance > 0) ? 1 + fittingTolerance / 100 : 2;
    double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0 };
    // The amplitude and range should not extend beyond the limits of the g(r) curve.
    double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, Maths.max(x),
            Maths.max(gr[1]) };
    log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", clusteredModel.getName(),
            Utils.rounded(lB[0], 4), Utils.rounded(uB[0], 4), Utils.rounded(lB[1] * 1e6, 4),
            Utils.rounded(uB[1] * 1e6, 4));

    PointValuePair constrainedSolution = runBoundedOptimiser(gr, initialSolution, lB, uB, clusteredModelMulti);

    if (constrainedSolution == null)
        return null;

    parameters = constrainedSolution.getPointRef();
    evaluations = boundedEvaluations;

    // Refit using a LVM  
    if (useLSE) {
        log("Re-fitting %s using a gradient optimisation", clusteredModel.getName());
        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return myFunction.jacobian(point);
                        }
                    }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                    new Weight(myFunction.getWeights()), new InitialGuess(parameters));
            evaluations += optimizer.getEvaluations();

            double ss = 0;
            double[] obs = clusteredModel.getY();
            double[] exp = lvmSolution.getValue();
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            if (ss < constrainedSolution.getValue()) {
                log("Re-fitting %s improved the SS from %s to %s (-%s%%)", clusteredModel.getName(),
                        Utils.rounded(constrainedSolution.getValue(), 4), Utils.rounded(ss, 4),
                        Utils.rounded(
                                100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(),
                                4));
                parameters = lvmSolution.getPoint();
            }
        } catch (TooManyIterationsException e) {
            log("Failed to re-fit %s: Too many iterations (%d)", clusteredModel.getName(),
                    optimizer.getIterations());
        } catch (ConvergenceException e) {
            log("Failed to re-fit %s: %s", clusteredModel.getName(), e.getMessage());
        }
    }

    clusteredModel.setLogging(false);

    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);
    //parameters[2] = Math.abs(parameters[2]);

    double ss = 0;
    double[] obs = clusteredModel.getY();
    double[] exp = clusteredModel.value(parameters);
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic2 = Maths.getInformationCriterion(ss, clusteredModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];
    final double domainRadius = parameters[2]; //The radius of the cluster domain
    final double domainDensity = parameters[3]; //The density of the cluster domain

    // This is from the PC-PALM paper. However that paper fits the g(r)protein exponential convolved in 2D
    // with the g(r)PSF. In this method we have just fit the exponential
    final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;

    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", clusteredModel.getName(), ss, ic2, evaluations);
    log("  %s parameters:", clusteredModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));
    log("    Domain radius = %s nm", Utils.rounded(domainRadius, 4));
    log("    Domain density = %s", Utils.rounded(domainDensity, 4));
    log("    nCluster = %s", Utils.rounded(nCluster, 4));

    // Check the fitted parameters are within tolerance of the initial estimates
    valid2 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                clusteredModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid2 = false;
    }

    // Check extra parameters. Domain radius should be higher than the precision. Density should be positive
    if (domainRadius < fitSigmaS) {
        log("  Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)",
                clusteredModel.getName(), Utils.rounded(domainRadius, 4), Utils.rounded(fitSigmaS, 4));
        valid2 = false;
    }
    if (domainDensity < 0) {
        log("  Failed to fit %s: Domain density is negative (%s)", clusteredModel.getName(),
                Utils.rounded(domainDensity, 4));
        valid2 = false;
    }

    if (ic2 > ic1) {
        log("  Failed to fit %s - Information Criterion has increased %s%%", clusteredModel.getName(),
                Utils.rounded((100 * (ic2 - ic1) / ic1), 4));
        valid2 = false;
    }

    addResult(clusteredModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius,
            domainDensity, nCluster, 0, ic2);

    return parameters;
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the MSD using a linear fit that must pass through 0,0.
 * <p>/*from w w w .j  a  v  a2 s  . co m*/
 * Update the plot by adding the fit line.
 * 
 * @param x
 * @param y
 * @param title
 * @param plot
 * @return
 */
private double fitMSD(double[] x, double[] y, String title, Plot2 plot) {
    double D = 0;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    PointVectorValuePair lvmSolution;
    try {
        final LinearFunction function = new LinearFunction(x, y, settings.fitLength);
        double[] parameters = new double[] { function.guess() };
        lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(parameters));

        double ss = 0;
        double[] obs = function.getY();
        double[] exp = lvmSolution.getValue();
        for (int i = 0; i < obs.length; i++)
            ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);

        D = lvmSolution.getPoint()[0] / 4;
        Utils.log("Linear fit (%d points) : Gradient = %s, D = %s um^2/s, SS = %f (%d evaluations)", obs.length,
                Utils.rounded(lvmSolution.getPoint()[0], 4), Utils.rounded(D, 4), ss,
                optimizer.getEvaluations());

        // Add the fit to the plot
        plot.setColor(Color.magenta);
        plot.drawLine(0, 0, x[x.length - 1], x[x.length - 1] * 4 * D);
        display(title, plot);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }
    return D;
}

From source file:gdsc.utils.Cell_Outliner.java

/**
 * Find an ellipse that optimises the fit to the polygon detected edges.
 * /*w w  w .  j  av  a 2s .c o  m*/
 * @param roi
 * @param params
 * @param weightMap
 * @param angle
 * @return
 */
private double[] fitPolygonalCell(PolygonRoi roi, double[] params, FloatProcessor weightMap,
        FloatProcessor angle) {
    // Get an estimate of the starting parameters using the current polygon
    double[] startPoint = params;
    startPoint = estimateStartPoint(roi, weightMap.getWidth(), weightMap.getHeight());

    int maxEval = 2000;
    final DifferentiableEllipticalFitFunction func = new DifferentiableEllipticalFitFunction(roi, weightMap);

    double relativeThreshold = 100 * Precision.EPSILON;
    double absoluteThreshold = 100 * Precision.SAFE_MIN;
    ConvergenceChecker<PointVectorValuePair> checker = new SimplePointChecker<PointVectorValuePair>(
            relativeThreshold, absoluteThreshold);
    double initialStepBoundFactor = 10;
    double costRelativeTolerance = 1e-10;
    double parRelativeTolerance = 1e-10;
    double orthoTolerance = 1e-10;
    double threshold = Precision.SAFE_MIN;

    LevenbergMarquardtOptimizer optimiser = new LevenbergMarquardtOptimizer(initialStepBoundFactor, checker,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);

    try {
        PointVectorValuePair solution = optimiser.optimize(new MaxIter(maxEval), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return func.jacobian(point);
                    }
                }), new ModelFunction(func), new Target(func.calculateTarget()),
                new Weight(func.calculateWeights()), new InitialGuess(startPoint));

        if (debug)
            IJ.log(String.format("Eval = %d (Iter = %d), RMS = %f", optimiser.getEvaluations(),
                    optimiser.getIterations(), optimiser.getRMS()));
        return solution.getPointRef();
    } catch (Exception e) {
        IJ.log("Failed to find an elliptical solution, defaulting to polygon");
        e.printStackTrace();
    }

    return null;
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the jump distance histogram using a cumulative sum as detailed in
 * <p>//  w  ww  .  j av  a 2s .c  o  m
 * Update the plot by adding the fit line.
 * 
 * @param jumpDistances
 * @param jdHistogram
 * @param title
 * @param plot
 * @return
 */
private double[][] fitJumpDistance(StoredDataStatistics jumpDistances, double[][] jdHistogram, String title,
        Plot2 plot) {
    final double meanDistance = Math.sqrt(jumpDistances.getMean()) * 1e3;
    final double beta = meanDistance / precision;
    Utils.log(
            "Jump Distance analysis : N = %d, Time = %d frames (%s seconds). Mean Distance = %s nm, Precision = %s nm, Beta = %s",
            jumpDistances.getN(), settings.jumpDistance, Utils.rounded(settings.jumpDistance * exposureTime, 4),
            Utils.rounded(meanDistance, 4), Utils.rounded(precision, 4), Utils.rounded(beta, 4));
    int n = 0;
    int N = 10;
    double[] SS = new double[N];
    Arrays.fill(SS, -1);
    double[] ic = new double[N];
    Arrays.fill(ic, Double.POSITIVE_INFINITY);
    double[][] coefficients = new double[N][];
    double[][] fractions = new double[N][];
    double[][] fitParams = new double[N][];
    double bestIC = Double.POSITIVE_INFINITY;
    int best = -1;

    // Guess the D
    final double estimatedD = jumpDistances.getMean() / 4;
    Utils.log("Estimated D = %s um^2/s", Utils.rounded(estimatedD, 4));

    // Fit using a single population model
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    try {
        final JumpDistanceFunction function = new JumpDistanceFunction(jdHistogram[0], jdHistogram[1],
                estimatedD);
        PointVectorValuePair lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(function.guess()));

        fitParams[n] = lvmSolution.getPointRef();
        SS[n] = calculateSumOfSquares(function.getY(), lvmSolution.getValueRef());
        ic[n] = Maths.getInformationCriterion(SS[n], function.x.length, 1);
        coefficients[n] = fitParams[n];
        fractions[n] = new double[] { 1 };

        Utils.log("Fit Jump distance (N=%d) : D = %s um^2/s, SS = %f, IC = %s (%d evaluations)", n + 1,
                Utils.rounded(fitParams[n][0], 4), SS[n], Utils.rounded(ic[n], 4), optimizer.getEvaluations());

        bestIC = ic[n];
        best = 0;

        addToPlot(function, fitParams[n], jdHistogram, title, plot, Color.magenta);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }

    n++;

    // Fit using a mixed population model. 
    // Vary n from 2 to N. Stop when the fit fails or the fit is worse.
    int bestMulti = -1;
    double bestMultiIC = Double.POSITIVE_INFINITY;
    while (n < N) {
        // Uses a weighted sum of n exponential functions, each function models a fraction of the particles.
        // An LVM fit cannot restrict the parameters so the fractions do not go below zero.
        // Use the CMEASOptimizer which supports bounded fitting.

        MixedJumpDistanceFunctionMultivariate mixedFunction = new MixedJumpDistanceFunctionMultivariate(
                jdHistogram[0], jdHistogram[1], estimatedD, n + 1);

        double[] lB = mixedFunction.getLowerBounds();
        double[] uB = mixedFunction.getUpperBounds();
        SimpleBounds bounds = new SimpleBounds(lB, uB);

        int maxIterations = 2000;
        double stopFitness = 0; //Double.NEGATIVE_INFINITY;
        boolean isActiveCMA = true;
        int diagonalOnly = 20;
        int checkFeasableCount = 1;
        RandomGenerator random = new Well19937c();
        boolean generateStatistics = false;
        ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
        // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
        double[] s = new double[lB.length];
        for (int i = 0; i < s.length; i++)
            s[i] = (uB[i] - lB[i]) / 3;
        OptimizationData sigma = new CMAESOptimizer.Sigma(s);
        OptimizationData popSize = new CMAESOptimizer.PopulationSize(
                (int) (4 + Math.floor(3 * Math.log(mixedFunction.x.length))));

        // Iterate this for stability in the initial guess
        CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly,
                checkFeasableCount, random, generateStatistics, checker);

        int evaluations = 0;
        PointValuePair constrainedSolution = null;

        for (int i = 0; i <= settings.fitRestarts; i++) {
            // Try from the initial guess
            try {
                PointValuePair solution = opt.optimize(new InitialGuess(mixedFunction.guess()),
                        new ObjectiveFunction(mixedFunction), GoalType.MINIMIZE, bounds, sigma, popSize,
                        new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                if (constrainedSolution == null || solution.getValue() < constrainedSolution.getValue()) {
                    evaluations = opt.getEvaluations();
                    constrainedSolution = solution;
                    //Utils.log("[%da] Fit Jump distance (N=%d) : SS = %f (%d evaluations)", i, n + 1,
                    //      solution.getValue(), evaluations);
                }
            } catch (TooManyEvaluationsException e) {
            }

            if (constrainedSolution == null)
                continue;

            // Try from the current optimum
            try {
                PointValuePair solution = opt.optimize(new InitialGuess(constrainedSolution.getPointRef()),
                        new ObjectiveFunction(mixedFunction), GoalType.MINIMIZE, bounds, sigma, popSize,
                        new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                if (constrainedSolution == null || solution.getValue() < constrainedSolution.getValue()) {
                    evaluations = opt.getEvaluations();
                    constrainedSolution = solution;
                    //Utils.log("[%db] Fit Jump distance (N=%d) : SS = %f (%d evaluations)", i, n + 1,
                    //      solution.getValue(), evaluations);
                }
            } catch (TooManyEvaluationsException e) {
            }
        }

        if (constrainedSolution == null) {
            Utils.log("Failed to fit N=%d", n + 1);
            break;
        }

        fitParams[n] = constrainedSolution.getPointRef();
        SS[n] = constrainedSolution.getValue();

        // TODO - Try a bounded BFGS optimiser

        // Try and improve using a LVM fit
        final MixedJumpDistanceFunctionGradient mixedFunctionGradient = new MixedJumpDistanceFunctionGradient(
                jdHistogram[0], jdHistogram[1], estimatedD, n + 1);

        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return mixedFunctionGradient.jacobian(point);
                        }
                    }), new ModelFunction(mixedFunctionGradient), new Target(mixedFunctionGradient.getY()),
                    new Weight(mixedFunctionGradient.getWeights()), new InitialGuess(fitParams[n]));
            double ss = calculateSumOfSquares(mixedFunctionGradient.getY(), lvmSolution.getValue());
            // All fitted parameters must be above zero
            if (ss < SS[n] && Maths.min(lvmSolution.getPoint()) > 0) {
                //Utils.log("  Re-fitting improved the SS from %s to %s (-%s%%)", Utils.rounded(SS[n], 4),
                //      Utils.rounded(ss, 4), Utils.rounded(100 * (SS[n] - ss) / SS[n], 4));
                fitParams[n] = lvmSolution.getPoint();
                SS[n] = ss;
                evaluations += optimizer.getEvaluations();
            }
        } catch (TooManyIterationsException e) {
            //Utils.log("Failed to re-fit : Too many evaluations (%d)", optimizer.getEvaluations());
        } catch (ConvergenceException e) {
            //Utils.log("Failed to re-fit : %s", e.getMessage());
        }

        // Since the fractions must sum to one we subtract 1 degree of freedom from the number of parameters
        ic[n] = Maths.getInformationCriterion(SS[n], mixedFunction.x.length, fitParams[n].length - 1);

        double[] d = new double[n + 1];
        double[] f = new double[n + 1];
        double sum = 0;
        for (int i = 0; i < d.length; i++) {
            f[i] = fitParams[n][i * 2];
            sum += f[i];
            d[i] = fitParams[n][i * 2 + 1];
        }
        for (int i = 0; i < f.length; i++)
            f[i] /= sum;
        // Sort by coefficient size
        sort(d, f);
        coefficients[n] = d;
        fractions[n] = f;

        Utils.log("Fit Jump distance (N=%d) : D = %s um^2/s (%s), SS = %f, IC = %s (%d evaluations)", n + 1,
                format(d), format(f), SS[n], Utils.rounded(ic[n], 4), evaluations);

        boolean valid = true;
        for (int i = 0; i < f.length; i++) {
            // Check the fit has fractions above the minimum fraction
            if (f[i] < minFraction) {
                Utils.log("Fraction is less than the minimum fraction: %s < %s", Utils.rounded(f[i]),
                        Utils.rounded(minFraction));
                valid = false;
                break;
            }
            // Check the coefficients are different
            if (i + 1 < f.length && d[i] / d[i + 1] < minDifference) {
                Utils.log("Coefficients are not different: %s / %s = %s < %s", Utils.rounded(d[i]),
                        Utils.rounded(d[i + 1]), Utils.rounded(d[i] / d[i + 1]), Utils.rounded(minDifference));
                valid = false;
                break;
            }
        }

        if (!valid)
            break;

        // Store the best model
        if (bestIC > ic[n]) {
            bestIC = ic[n];
            best = n;
        }

        // Store the best multi model
        if (bestMultiIC < ic[n]) {
            break;
        }

        bestMultiIC = ic[n];
        bestMulti = n;

        n++;
    }

    // Add the best fit to the plot and return the parameters.
    if (bestMulti > -1) {
        Function function = new MixedJumpDistanceFunctionMultivariate(jdHistogram[0], jdHistogram[1], 0,
                bestMulti + 1);
        addToPlot(function, fitParams[bestMulti], jdHistogram, title, plot, Color.yellow);
    }

    if (best > -1) {
        Utils.log("Best fit achieved using %d population%s: D = %s um^2/s, Fractions = %s", best + 1,
                (best == 0) ? "" : "s", format(coefficients[best]), format(fractions[best]));
    }

    return (best > -1) ? new double[][] { coefficients[best], fractions[best] } : null;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the clustered model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * // w  ww .j  ava2 s . co  m
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimated protein density
 * @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
 */
private double[] fitEmulsionModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final EmulsionModelFunctionGradient myFunction = new EmulsionModelFunctionGradient();
    emulsionModel = myFunction;
    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2",
            emulsionModel.getName(), sigmaS, proteinDensity * 1e6);

    emulsionModel.setLogging(true);
    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            emulsionModel.addPoint(gr[0][i], gr[1][i]);
    }

    double[] parameters;
    // The model is: sigma, density, range, amplitude, alpha
    double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1, sigmaS * 5 };
    int evaluations = 0;

    // Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
    // LVM fitting does not support constrained fitting so use a bounded optimiser.
    SumOfSquaresModelFunction emulsionModelMulti = new SumOfSquaresModelFunction(emulsionModel);
    double[] x = emulsionModelMulti.x;
    double[] y = emulsionModelMulti.y;

    // Range should be equal to the first time the g(r) curve crosses 1
    for (int i = 0; i < x.length; i++)
        if (y[i] < 1) {
            initialSolution[4] = initialSolution[2] = (i > 0) ? (x[i - 1] + x[i]) * 0.5 : x[i];
            break;
        }

    // Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
    double limit = (fittingTolerance > 0) ? 1 + fittingTolerance / 100 : 2;
    double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0, 0 };
    // The amplitude and range should not extend beyond the limits of the g(r) curve.
    // TODO - Find out the expected range for the alpha parameter.  
    double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, Maths.max(x),
            Maths.max(gr[1]), Maths.max(x) * 2 };
    log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", emulsionModel.getName(),
            Utils.rounded(lB[0], 4), Utils.rounded(uB[0], 4), Utils.rounded(lB[1] * 1e6, 4),
            Utils.rounded(uB[1] * 1e6, 4));

    PointValuePair constrainedSolution = runBoundedOptimiser(gr, initialSolution, lB, uB, emulsionModelMulti);

    if (constrainedSolution == null)
        return null;

    parameters = constrainedSolution.getPointRef();
    evaluations = boundedEvaluations;

    // Refit using a LVM  
    if (useLSE) {
        log("Re-fitting %s using a gradient optimisation", emulsionModel.getName());
        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return myFunction.jacobian(point);
                        }
                    }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                    new Weight(myFunction.getWeights()), new InitialGuess(parameters));
            evaluations += optimizer.getEvaluations();

            double ss = 0;
            double[] obs = emulsionModel.getY();
            double[] exp = lvmSolution.getValue();
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            if (ss < constrainedSolution.getValue()) {
                log("Re-fitting %s improved the SS from %s to %s (-%s%%)", emulsionModel.getName(),
                        Utils.rounded(constrainedSolution.getValue(), 4), Utils.rounded(ss, 4),
                        Utils.rounded(
                                100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(),
                                4));
                parameters = lvmSolution.getPoint();
            }
        } catch (TooManyIterationsException e) {
            log("Failed to re-fit %s: Too many iterations (%d)", emulsionModel.getName(),
                    optimizer.getIterations());
        } catch (ConvergenceException e) {
            log("Failed to re-fit %s: %s", emulsionModel.getName(), e.getMessage());
        }
    }

    emulsionModel.setLogging(false);

    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);
    //parameters[2] = Math.abs(parameters[2]);

    double ss = 0;
    double[] obs = emulsionModel.getY();
    double[] exp = emulsionModel.value(parameters);
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic3 = Maths.getInformationCriterion(ss, emulsionModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];
    final double domainRadius = parameters[2]; //The radius of the cluster domain
    final double domainDensity = parameters[3]; //The density of the cluster domain
    final double coherence = parameters[4]; //The coherence length between circles

    // This is from the PC-PALM paper. It may not be correct for the emulsion model.
    final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;

    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", emulsionModel.getName(), ss, ic3, evaluations);
    log("  %s parameters:", emulsionModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));
    log("    Domain radius = %s nm", Utils.rounded(domainRadius, 4));
    log("    Domain density = %s", Utils.rounded(domainDensity, 4));
    log("    Domain coherence = %s", Utils.rounded(coherence, 4));
    log("    nCluster = %s", Utils.rounded(nCluster, 4));

    // Check the fitted parameters are within tolerance of the initial estimates
    valid2 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                emulsionModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid2 = false;
    }

    // Check extra parameters. Domain radius should be higher than the precision. Density should be positive
    if (domainRadius < fitSigmaS) {
        log("  Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)",
                emulsionModel.getName(), Utils.rounded(domainRadius, 4), Utils.rounded(fitSigmaS, 4));
        valid2 = false;
    }
    if (domainDensity < 0) {
        log("  Failed to fit %s: Domain density is negative (%s)", emulsionModel.getName(),
                Utils.rounded(domainDensity, 4));
        valid2 = false;
    }

    if (ic3 > ic1) {
        log("  Failed to fit %s - Information Criterion has increased %s%%", emulsionModel.getName(),
                Utils.rounded((100 * (ic3 - ic1) / ic1), 4));
        valid2 = false;
    }

    addResult(emulsionModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius,
            domainDensity, nCluster, coherence, ic3);

    return parameters;
}

From source file:org.eclipse.dawnsci.analysis.dataset.roi.fitting.CircleCoordinatesFunction.java

@Override
public MultivariateMatrixFunction jacobian() {
    return new MultivariateMatrixFunction() {
        @Override/* w ww .  j av  a  2s. com*/
        public double[][] value(double[] p) throws IllegalArgumentException {
            calculateJacobian(p);
            return j;
        }
    };
}