List of usage examples for org.apache.commons.math3.optim MaxIter MaxIter
public MaxIter(int max)
From source file:com.google.cloud.genomics.dataflow.functions.verifybamid.Solver.java
/** * Maximizes a univariate function using a grid search followed by Brent's algorithm. * * @param fn the likelihood function to minimize * @param gridStart the lower bound for the grid search * @param gridEnd the upper bound for the grid search * @param gridStep step size for the grid search * @param relErr relative error tolerance for Brent's algorithm * @param absErr absolute error tolerance for Brent's algorithm * @param maxIter maximum # of iterations to perform in Brent's algorithm * @param maxEval maximum # of Likelihood function evaluations in Brent's algorithm * * @return the value of the parameter that maximizes the function *//*from ww w .j a v a 2s. c o m*/ public static double maximize(UnivariateFunction fn, double gridStart, double gridEnd, double gridStep, double relErr, double absErr, int maxIter, int maxEval) { Interval interval = gridSearch(fn, gridStart, gridEnd, gridStep); BrentOptimizer bo = new BrentOptimizer(relErr, absErr); UnivariatePointValuePair max = bo.optimize(new MaxIter(maxIter), new MaxEval(maxEval), new SearchInterval(interval.getInf(), interval.getSup()), new UnivariateObjectiveFunction(fn), GoalType.MAXIMIZE); return max.getPoint(); }
From source file:gdsc.smlm.fitting.nonlinear.ApacheLVMFitter.java
public FitStatus fit(int n, double[] y, double[] y_fit, double[] a, double[] a_dev, double[] error, double noise) { numberOfFittedPoints = n;/* w ww . j av a 2 s .c o m*/ try { // Different convergence thresholds seem to have no effect on the resulting fit, only the number of // iterations for convergence final double initialStepBoundFactor = 100; final double costRelativeTolerance = 1e-10; final double parRelativeTolerance = 1e-10; final double orthoTolerance = 1e-10; final double threshold = Precision.SAFE_MIN; // Extract the parameters to be fitted final double[] initialSolution = getInitialSolution(a); // TODO - Pass in more advanced stopping criteria. // Create the target and weight arrays final double[] yd = new double[n]; final double[] w = new double[n]; for (int i = 0; i < n; i++) { yd[i] = y[i]; w[i] = 1; } LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor, costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold); PointVectorValuePair optimum = optimizer.optimize(new MaxIter(getMaxEvaluations()), new MaxEval(Integer.MAX_VALUE), new ModelFunctionJacobian(new MultivariateMatrixFunctionWrapper(f, a, n)), new ModelFunction(new MultivariateVectorFunctionWrapper(f, a, n)), new Target(yd), new Weight(w), new InitialGuess(initialSolution)); final double[] parameters = optimum.getPoint(); setSolution(a, parameters); iterations = optimizer.getIterations(); evaluations = optimizer.getEvaluations(); if (a_dev != null) { double[][] covar = optimizer.computeCovariances(parameters, threshold); setDeviations(a_dev, covar); } // Compute fitted function if desired if (y_fit != null) { f.initialise(a); for (int i = 0; i < n; i++) y_fit[i] = f.eval(i); } residualSumOfSquares = error[0] = optimizer.getChiSquare(); totalSumOfSquares = getSumOfSquares(n, y); } catch (TooManyEvaluationsException e) { return FitStatus.FAILED_TO_CONVERGE; } catch (ConvergenceException e) { // Occurs when QR decomposition fails - mark as a singular non-linear model (no solution) return FitStatus.SINGULAR_NON_LINEAR_MODEL; } catch (Exception e) { // TODO - Find out the other exceptions from the fitter and add return values to match. return FitStatus.UNKNOWN; } return FitStatus.OK; }
From source file:de.bund.bfr.math.MultivariateOptimization.java
@Override public Result optimize(int nParameterSpace, int nOptimizations, boolean stopWhenSuccessful, Map<String, Double> minStartValues, Map<String, Double> maxStartValues, int maxIterations, DoubleConsumer progessListener, ExecutionContext exec) throws CanceledExecutionException { if (exec != null) { exec.checkCanceled();//w w w. j a va 2s. co m } progessListener.accept(0.0); List<ParamRange> ranges = MathUtils.getParamRanges(parameters, minStartValues, maxStartValues, nParameterSpace); ranges.set(parameters.indexOf(sdParam), new ParamRange(1.0, 1, 1.0)); List<StartValues> startValuesList = MathUtils.createStartValuesList(ranges, nOptimizations, values -> optimizerFunction.value(Doubles.toArray(values)), progress -> progessListener.accept(0.5 * progress), exec); Result result = new Result(); AtomicInteger currentIteration = new AtomicInteger(); SimplexOptimizer optimizer = new SimplexOptimizer(new SimpleValueChecker(1e-10, 1e-10) { @Override public boolean converged(int iteration, PointValuePair previous, PointValuePair current) { if (super.converged(iteration, previous, current)) { return true; } return currentIteration.incrementAndGet() >= maxIterations; } }); int count = 0; for (StartValues startValues : startValuesList) { if (exec != null) { exec.checkCanceled(); } progessListener.accept(0.5 * count++ / startValuesList.size() + 0.5); try { PointValuePair optimizerResults = optimizer.optimize(new MaxEval(Integer.MAX_VALUE), new MaxIter(maxIterations), new InitialGuess(Doubles.toArray(startValues.getValues())), new ObjectiveFunction(optimizerFunction), GoalType.MAXIMIZE, new NelderMeadSimplex(parameters.size())); double logLikelihood = optimizerResults.getValue() != null ? optimizerResults.getValue() : Double.NaN; if (result.logLikelihood == null || logLikelihood > result.logLikelihood) { result = getResults(optimizerResults); if (result.logLikelihood == 0.0 || stopWhenSuccessful) { break; } } } catch (TooManyEvaluationsException | TooManyIterationsException | ConvergenceException e) { } } return result; }
From source file:edu.mit.isos.app.water.WaterControllerState.java
@Override public void iterateTick(ElementImpl element, long duration) { Set<WaterPlant> plants = new HashSet<WaterPlant>(); Set<WaterPipeline> pipelines = new HashSet<WaterPipeline>(); Set<WaterElementImpl> systems = new HashSet<WaterElementImpl>(); for (ElementImpl e : elements) { if (e instanceof WaterPlant && ((WaterPlant) e).isOperating()) { plants.add((WaterPlant) e);// w w w . j a v a 2 s . c om } if (e instanceof WaterPipeline) { pipelines.add((WaterPipeline) e); } if (e instanceof WaterElementImpl) { systems.add((WaterElementImpl) e); } } List<LinearConstraint> constraints = new ArrayList<LinearConstraint>(); double[] costCoefficients = new double[elements.size()]; double[] initialValues = new double[elements.size()]; for (WaterElementImpl e : systems) { // cost of lifting aquifer is 1 costCoefficients[elements.indexOf(e)] = 1; initialValues[elements.indexOf(e)] = ((WaterElementState) e.getState()).getProduced(e, duration) .getQuantity(ResourceType.WATER); } for (WaterPlant e : plants) { initialValues[elements.indexOf(e)] = e.getOperatingState().getProduced(e, duration) .getQuantity(ResourceType.WATER); double[] productionCoefficients = new double[elements.size()]; productionCoefficients[elements.indexOf(e)] = 1; constraints.add(new LinearConstraint(productionCoefficients, Relationship.LEQ, e.getOperatingState().productionCapacity.multiply(duration).getQuantity(ResourceType.WATER))); } for (WaterPipeline e : pipelines) { initialValues[elements.indexOf(e)] = e.getOperatingState().getOutput(e, duration) .getQuantity(ResourceType.WATER); double[] outputCoefficients = new double[elements.size()]; outputCoefficients[elements.indexOf(e)] = 1; constraints.add(new LinearConstraint(outputCoefficients, Relationship.LEQ, e.getOperatingState().outputCapacity.multiply(duration).getQuantity(ResourceType.WATER))); } for (WaterElementImpl e : systems) { double[] flowCoefficients = new double[elements.size()]; flowCoefficients[elements.indexOf(e)] = 1; // system production for (WaterPlant plant : plants) { if (plant.getLocation().equals(e.getLocation())) { flowCoefficients[elements.indexOf(plant)] = 1; // plant production } } for (WaterPipeline pipeline : pipelines) { if (pipeline.getLocation().getOrigin().equals(e.getLocation().getOrigin())) { flowCoefficients[elements.indexOf(pipeline)] = -1 / pipeline.getOperatingState().eta; // pipeline input } else if (pipeline.getLocation().getDestination().equals(e.getLocation().getOrigin())) { flowCoefficients[elements.indexOf(pipeline)] = 1; // pipeline output } } constraints.add(new LinearConstraint(flowCoefficients, Relationship.EQ, ((WaterElementState) e.getState()).getSent(e, duration).getQuantity(ResourceType.WATER))); } try { // Run optimization and get results. PointValuePair output = new SimplexSolver().optimize(GoalType.MINIMIZE, new MaxIter(1000), new NonNegativeConstraint(true), new LinearConstraintSet(constraints), new LinearObjectiveFunction(costCoefficients, 0d), new InitialGuess(initialValues)); for (WaterElementImpl e : systems) { e.getOperatingState().setProduced(e, ResourceFactory.create(ResourceType.WATER, output.getPoint()[elements.indexOf(e)]), duration); } for (WaterPlant e : plants) { e.getOperatingState().setProduced(e, ResourceFactory.create(ResourceType.WATER, output.getPoint()[elements.indexOf(e)]), duration); } for (WaterPipeline e : pipelines) { e.getOperatingState().setOutput(e, ResourceFactory.create(ResourceType.WATER, output.getPoint()[elements.indexOf(e)]), duration); } } catch (TooManyIterationsException ignore) { // Don't overwrite existing values. ignore.printStackTrace(); } catch (NoFeasibleSolutionException ignore) { // Don't overwrite existing values. ignore.printStackTrace(); } for (WaterElementImpl system : systems) { Resource received = system.getOperatingState().getConsumed(system, duration) .get(ResourceType.ELECTRICITY); for (WaterPlant plant : plants) { if (plant.getLocation().equals(system.getLocation())) { received = received.add( plant.getOperatingState().getConsumed(plant, duration).get(ResourceType.ELECTRICITY)); } } for (WaterPipeline pipeline : pipelines) { if (pipeline.getLocation().getOrigin().equals(system.getLocation().getOrigin())) { received = received.add(pipeline.getOperatingState().getInput(pipeline, duration) .get(ResourceType.ELECTRICITY)); } } system.getOperatingState().setReceived(system, received, duration); } }
From source file:com.insightml.math.optimization.AbstractOptimizable.java
private PointValuePair optimize(final MultivariateOptimizer optimizer, final double[] initialValues, final OptimizationData... data) { final OptimizationData[] d = new OptimizationData[5 + data.length]; d[0] = new MaxIter(convergence.maxIt + 1); d[1] = new MaxEval(convergence.maxIt * 2); d[2] = new ObjectiveFunction(this); d[3] = GoalType.MAXIMIZE;/*from w w w . ja v a2 s .co m*/ d[4] = new InitialGuess(fixBounds(initialValues)); for (int i = 0; i < data.length; ++i) { d[5 + i] = data[i]; } return optimizer.optimize(d); }
From source file:gdsc.smlm.fitting.BinomialFitter.java
/** * Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a fixed n value and * attempts to optimise p./*from ww w . j a va 2 s .c om*/ * * @param histogram * The input histogram * @param mean * The histogram mean (used to estimate p). Calculated if NaN. * @param n * The n to evaluate * @param zeroTruncated * True if the model should ignore n=0 (zero-truncated binomial) * @return The best fit (n, p) * @throws IllegalArgumentException * If any of the input data values are negative * @throws IllegalArgumentException * If any fitting a zero truncated binomial and there are no values above zero */ public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) { if (Double.isNaN(mean)) mean = getMean(histogram); if (zeroTruncated && histogram[0] > 0) { log("Fitting zero-truncated histogram but there are zero values - Renormalising to ignore zero"); double cumul = 0; for (int i = 1; i < histogram.length; i++) cumul += histogram[i]; if (cumul == 0) throw new IllegalArgumentException( "Fitting zero-truncated histogram but there are no non-zero values"); histogram[0] = 0; for (int i = 1; i < histogram.length; i++) histogram[i] /= cumul; } int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0); if (nFittedPoints < 1) { log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints, histogram.length, n, zeroTruncated); return null; } // The model is only fitting the probability p // For a binomial n*p = mean => p = mean/n double[] initialSolution = new double[] { FastMath.min(mean / n, 1) }; // Create the function BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated); double[] lB = new double[1]; double[] uB = new double[] { 1 }; SimpleBounds bounds = new SimpleBounds(lB, uB); // Fit // CMAESOptimizer or BOBYQAOptimizer support bounds // CMAESOptimiser based on Matlab code: // https://www.lri.fr/~hansen/cmaes.m // Take the defaults from the Matlab documentation int maxIterations = 2000; double stopFitness = 0; //Double.NEGATIVE_INFINITY; boolean isActiveCMA = true; int diagonalOnly = 0; int checkFeasableCount = 1; RandomGenerator random = new Well19937c(); boolean generateStatistics = false; ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10); // The sigma determines the search range for the variables. It should be 1/3 of the initial search region. OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 }); OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2)))); try { PointValuePair solution = null; boolean noRefit = maximumLikelihood; if (n == 1 && zeroTruncated) { // No need to fit solution = new PointValuePair(new double[] { 1 }, 0); noRefit = true; } else { GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE; // Iteratively fit CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, checker); for (int iteration = 0; iteration <= fitRestarts; iteration++) { try { // Start from the initial solution PointValuePair result = opt.optimize(new InitialGuess(initialSolution), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2)); //System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration, result.getValue(), // opt.getEvaluations()); if (solution == null || result.getValue() < solution.getValue()) { solution = result; } } catch (TooManyEvaluationsException e) { } catch (TooManyIterationsException e) { } if (solution == null) continue; try { // Also restart from the current optimum PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2)); //System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration, result.getValue(), // opt.getEvaluations()); if (result.getValue() < solution.getValue()) { solution = result; } } catch (TooManyEvaluationsException e) { } catch (TooManyIterationsException e) { } } if (solution == null) return null; } if (noRefit) { // Although we fit the log-likelihood, return the sum-of-squares to allow // comparison across different n double p = solution.getPointRef()[0]; double ss = 0; double[] obs = function.p; double[] exp = function.getP(p); for (int i = 0; i < obs.length; i++) ss += (obs[i] - exp[i]) * (obs[i] - exp[i]); return new PointValuePair(solution.getPointRef(), ss); } // We can do a LVM refit if the number of fitted points is more than 1 else if (nFittedPoints > 1) { // Improve SS fit with a gradient based LVM optimizer LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(); try { final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient( histogram, n, zeroTruncated); PointVectorValuePair lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE), new ModelFunctionJacobian(new MultivariateMatrixFunction() { public double[][] value(double[] point) throws IllegalArgumentException { return gradientFunction.jacobian(point); } }), new ModelFunction(gradientFunction), new Target(gradientFunction.p), new Weight(gradientFunction.getWeights()), new InitialGuess(solution.getPointRef())); double ss = 0; double[] obs = gradientFunction.p; double[] exp = lvmSolution.getValue(); for (int i = 0; i < obs.length; i++) ss += (obs[i] - exp[i]) * (obs[i] - exp[i]); // Check the pValue is valid since the LVM is not bounded. double p = lvmSolution.getPointRef()[0]; if (ss < solution.getValue() && p <= 1 && p >= 0) { //log("Re-fitting improved the SS from %s to %s (-%s%%)", // Utils.rounded(solution.getValue(), 4), Utils.rounded(ss, 4), // Utils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4)); return new PointValuePair(lvmSolution.getPoint(), ss); } } catch (TooManyIterationsException e) { log("Failed to re-fit: Too many iterations (%d)", optimizer.getIterations()); } catch (ConvergenceException e) { log("Failed to re-fit: %s", e.getMessage()); } catch (Exception e) { // Ignore this ... } } return solution; } catch (Exception e) { log("Failed to fit Binomial distribution with N=%d : %s", n, e.getMessage()); } return null; }
From source file:gdsc.smlm.fitting.nonlinear.MaximumLikelihoodFitter.java
public FitStatus fit(int n, double[] y, double[] y_fit, double[] a, double[] a_dev, double[] error, double noise) { numberOfFittedPoints = n;/* ww w. j av a2 s . c o m*/ LikelihoodWrapper maximumLikelihoodFunction; // We can use different likelihood wrapper functions: switch (likelihoodFunction) { case POISSON_GAMMA_GAUSSIAN: // Poisson-Gamma-Gaussian - EM-CCD data if (alpha > 0 && sigma > 0) { maximumLikelihoodFunction = new PoissonGammaGaussianLikelihoodWrapper(f, a, y, n, alpha, sigma); break; } case POISSON_GAUSSIAN: // Poisson-Gaussian - CCD data if (sigma > 0) { maximumLikelihoodFunction = new PoissonGaussianLikelihoodWrapper(f, a, y, n, sigma); break; } case POISSON: default: // Poisson - most counting data maximumLikelihoodFunction = new PoissonLikelihoodWrapper(f, a, y, n); } // Check if the method requires the gradient but it cannot be computed if (searchMethod.usesGradient && !maximumLikelihoodFunction.canComputeGradient()) { maximumLikelihoodFunction = new PoissonLikelihoodWrapper(f, a, y, n); } try { double[] startPoint = getInitialSolution(a); PointValuePair optimum = null; if (searchMethod == SearchMethod.POWELL || searchMethod == SearchMethod.POWELL_BOUNDED) { // Non-differentiable version using Powell Optimiser // This is as per the method in Numerical Recipes 10.5 (Direction Set (Powell's) method) // I could extend the optimiser and implement bounds on the directions moved. However the mapping // adapter seems to work OK. final boolean basisConvergence = false; // Perhaps these thresholds should be tighter? // The default is to use the sqrt() of the overall tolerance //final double lineRel = FastMath.sqrt(relativeThreshold); //final double lineAbs = FastMath.sqrt(absoluteThreshold); //final double lineRel = relativeThreshold * 1e2; //final double lineAbs = absoluteThreshold * 1e2; // Since we are fitting only a small number of parameters then just use the same tolerance // for each search direction final double lineRel = relativeThreshold; final double lineAbs = absoluteThreshold; CustomPowellOptimizer o = new CustomPowellOptimizer(relativeThreshold, absoluteThreshold, lineRel, lineAbs, null, basisConvergence); OptimizationData maxIterationData = null; if (getMaxIterations() > 0) maxIterationData = new MaxIter(getMaxIterations()); if (searchMethod == SearchMethod.POWELL) { if (powellFunction == null) { // We must map all the parameters into the same range. This is done in the Mortensen MLE // Python code by using the sqrt of the number of photons and background. if (mapGaussian) { Gaussian2DFunction gf = (Gaussian2DFunction) f; // Re-map signal and background using the sqrt int[] indices = gf.gradientIndices(); int[] map = new int[indices.length]; int count = 0; // Background is always first if (indices[0] == Gaussian2DFunction.BACKGROUND) { map[count++] = 0; } // Look for the Signal in multiple peak 2D Gaussians for (int i = 1; i < indices.length; i++) if (indices[i] % 6 == Gaussian2DFunction.SIGNAL) { map[count++] = i; } if (count > 0) { powellFunction = new MappedMultivariateLikelihood(maximumLikelihoodFunction, Arrays.copyOf(map, count)); } } if (powellFunction == null) { powellFunction = new MultivariateLikelihood(maximumLikelihoodFunction); } } // Update the maximum likelihood function in the Powell function wrapper powellFunction.fun = maximumLikelihoodFunction; OptimizationData positionChecker = null; // new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold); if (powellFunction.isMapped()) { MappedMultivariateLikelihood adapter = (MappedMultivariateLikelihood) powellFunction; optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(adapter.map(startPoint)), positionChecker); double[] solution = adapter.unmap(optimum.getPointRef()); optimum = new PointValuePair(solution, optimum.getValue()); } else { optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(startPoint), positionChecker); } } else { // Try using the mapping adapter for a bounded Powell search MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter( new MultivariateLikelihood(maximumLikelihoodFunction), lower, upper); optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded(startPoint))); double[] solution = adapter.unboundedToBounded(optimum.getPointRef()); optimum = new PointValuePair(solution, optimum.getValue()); } iterations = o.getIterations(); evaluations = o.getEvaluations(); } else if (searchMethod == SearchMethod.BOBYQA) { // Differentiable approximation using Powell's BOBYQA algorithm. // This is slower than the Powell optimiser and requires a high number of evaluations. int numberOfInterpolationPoints = this.getNumberOfFittedParameters() + 2; BOBYQAOptimizer o = new BOBYQAOptimizer(numberOfInterpolationPoints); optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lower, upper)); iterations = o.getIterations(); evaluations = o.getEvaluations(); } else if (searchMethod == SearchMethod.CMAES) { // TODO - Understand why the CMAES optimiser does not fit very well on test data. It appears // to converge too early and the likelihood scores are not as low as the other optimisers. // CMAESOptimiser based on Matlab code: // https://www.lri.fr/~hansen/cmaes.m // Take the defaults from the Matlab documentation double stopFitness = 0; //Double.NEGATIVE_INFINITY; boolean isActiveCMA = true; int diagonalOnly = 0; int checkFeasableCount = 1; RandomGenerator random = new Well19937c(); boolean generateStatistics = false; // The sigma determines the search range for the variables. It should be 1/3 of the initial search region. double[] sigma = new double[lower.length]; for (int i = 0; i < sigma.length; i++) sigma[i] = (upper[i] - lower[i]) / 3; int popSize = (int) (4 + Math.floor(3 * Math.log(sigma.length))); // The CMAES optimiser is random and restarting can overcome problems with quick convergence. // The Apache commons documentations states that convergence should occur between 30N and 300N^2 // function evaluations final int n30 = FastMath.min(sigma.length * sigma.length * 30, getMaxEvaluations() / 2); evaluations = 0; OptimizationData[] data = new OptimizationData[] { new InitialGuess(startPoint), new CMAESOptimizer.PopulationSize(popSize), new MaxEval(getMaxEvaluations()), new CMAESOptimizer.Sigma(sigma), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new SimpleBounds(lower, upper) }; // Iterate to prevent early convergence int repeat = 0; while (evaluations < n30) { if (repeat++ > 1) { // Update the start point and population size data[0] = new InitialGuess(optimum.getPointRef()); popSize *= 2; data[1] = new CMAESOptimizer.PopulationSize(popSize); } CMAESOptimizer o = new CMAESOptimizer(getMaxIterations(), stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, new SimpleValueChecker(relativeThreshold, absoluteThreshold)); PointValuePair result = o.optimize(data); iterations += o.getIterations(); evaluations += o.getEvaluations(); //System.out.printf("CMAES [%d] i=%d [%d], e=%d [%d]\n", repeat, o.getIterations(), iterations, // o.getEvaluations(), totalEvaluations); if (optimum == null || result.getValue() < optimum.getValue()) { optimum = result; } } } else if (searchMethod == SearchMethod.BFGS) { // BFGS can use an approximate line search minimisation where as Powell and conjugate gradient // methods require a more accurate line minimisation. The BFGS search does not do a full // minimisation but takes appropriate steps in the direction of the current gradient. // Do not use the convergence checker on the value of the function. Use the convergence on the // point coordinate and gradient //BFGSOptimizer o = new BFGSOptimizer(new SimpleValueChecker(rel, abs)); BFGSOptimizer o = new BFGSOptimizer(); // Configure maximum step length for each dimension using the bounds double[] stepLength = new double[lower.length]; for (int i = 0; i < stepLength.length; i++) { stepLength[i] = (upper[i] - lower[i]) * 0.3333333; if (stepLength[i] <= 0) stepLength[i] = Double.POSITIVE_INFINITY; } // The GoalType is always minimise so no need to pass this in OptimizationData positionChecker = null; //new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold); optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BFGSOptimizer.GradientTolerance(relativeThreshold), positionChecker, new BFGSOptimizer.StepLength(stepLength)); iterations = o.getIterations(); evaluations = o.getEvaluations(); } else { // The line search algorithm often fails. This is due to searching into a region where the // function evaluates to a negative so has been clipped. This means the upper bound of the line // cannot be found. // Note that running it on an easy problem (200 photons with fixed fitting (no background)) the algorithm // does sometimes produces results better than the Powell algorithm but it is slower. BoundedNonLinearConjugateGradientOptimizer o = new BoundedNonLinearConjugateGradientOptimizer( (searchMethod == SearchMethod.CONJUGATE_GRADIENT_FR) ? Formula.FLETCHER_REEVES : Formula.POLAK_RIBIERE, new SimpleValueChecker(relativeThreshold, absoluteThreshold)); // Note: The gradients may become unstable at the edge of the bounds. Or they will not change // direction if the true solution is on the bounds since the gradient will always continue // towards the bounds. This is key to the conjugate gradient method. It searches along a vector // until the direction of the gradient is in the opposite direction (using dot products, i.e. // cosine of angle between them) // NR 10.7 states there is no advantage of the variable metric DFP or BFGS methods over // conjugate gradient methods. So I will try these first. // Try this: // Adapt the conjugate gradient optimiser to use the gradient to pick the search direction // and then for the line minimisation. However if the function is out of bounds then clip the // variables at the bounds and continue. // If the current point is at the bounds and the gradient is to continue out of bounds then // clip the gradient too. // Or: just use the gradient for the search direction then use the line minimisation/rest // as per the Powell optimiser. The bounds should limit the search. // I tried a Bounded conjugate gradient optimiser with clipped variables: // This sometimes works. However when the variables go a long way out of the expected range the gradients // can have vastly different magnitudes. This results in the algorithm stalling since the gradients // can be close to zero and the some of the parameters are no longer adjusted. // Perhaps this can be looked for and the algorithm then gives up and resorts to a Powell optimiser from // the current point. // Changed the bracketing step to very small (default is 1, changed to 0.001). This improves the // performance. The gradient direction is very sensitive to small changes in the coordinates so a // tighter bracketing of the line search helps. // Tried using a non-gradient method for the line search copied from the Powell optimiser: // This also works when the bracketing step is small but the number of iterations is higher. // 24.10.2014: I have tried to get conjugate gradient to work but the gradient function // must not behave suitably for the optimiser. In the current state both methods of using a // Bounded Conjugate Gradient Optimiser perform poorly relative to other optimisers: // Simulated : n=1000, signal=200, x=0.53, y=0.47 // LVM : n=1000, signal=171, x=0.537, y=0.471 (1.003s) // Powell : n=1000, signal=187, x=0.537, y=0.48 (1.238s) // Gradient based PR (constrained): n=858, signal=161, x=0.533, y=0.474 (2.54s) // Gradient based PR (bounded): n=948, signal=161, x=0.533, y=0.473 (2.67s) // Non-gradient based : n=1000, signal=151.47, x=0.535, y=0.474 (1.626s) // The conjugate optimisers are slower, under predict the signal by the most and in the case of // the gradient based optimiser, fail to converge on some problems. This is worse when constrained // fitting is used and not tightly bounded fitting. // I will leave the code in as an option but would not recommend using it. I may remove it in the // future. // Note: It is strange that the non-gradient based line minimisation is more successful. // It may be that the gradient function is not accurate (due to round off error) or that it is // simply wrong when far from the optimum. My JUnit tests only evaluate the function within the // expected range of the answer. // Note the default step size on the Powell optimiser is 1 but the initial directions are unit vectors. // So our bracketing step should be a minimum of 1 / average length of the first gradient vector to prevent // the first step being too large when bracketing. final double gradient[] = new double[startPoint.length]; maximumLikelihoodFunction.likelihood(startPoint, gradient); double l = 0; for (double d : gradient) l += d * d; final double bracketingStep = FastMath.min(0.001, ((l > 1) ? 1.0 / l : 1)); //System.out.printf("Bracketing step = %f (length=%f)\n", bracketingStep, l); o.setUseGradientLineSearch(gradientLineMinimisation); optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BoundedNonLinearConjugateGradientOptimizer.BracketingStep(bracketingStep)); iterations = o.getIterations(); evaluations = o.getEvaluations(); //maximumLikelihoodFunction.value(solution, gradient); //System.out.printf("Iter = %d, %g @ %s : %s\n", iterations, ll, Arrays.toString(solution), // Arrays.toString(gradient)); } final double[] solution = optimum.getPointRef(); setSolution(a, solution); //System.out.printf("Iter = %d, Eval = %d, %g @ %s\n", iterations, evaluations, optimum.getValue(), // java.util.Arrays.toString(solution)); // Compute residuals for the FunctionSolver interface if (y_fit == null || y_fit.length < n) y_fit = new double[n]; f.initialise(a); residualSumOfSquares = 0; for (int i = 0; i < n; i++) { y_fit[i] = f.eval(i); final double residual = y[i] - y_fit[i]; residualSumOfSquares += residual * residual; } if (a_dev != null) { // Assume the Maximum Likelihood estimator returns the optimum fit (achieves the Cramer Roa // lower bounds) and so the covariance can be obtained from the Fisher Information Matrix. final int[] gradientIndices = f.gradientIndices(); final int nparams = gradientIndices.length; GradientCalculator calculator = GradientCalculatorFactory.newCalculator(nparams); final double[] I = calculator.fisherInformationDiagonal(n, a, f); for (int i = 0; i < gradientIndices.length; i++) a_dev[gradientIndices[i]] = 1.0 / Math.sqrt(I[i]); } error[0] = NonLinearFit.getError(residualSumOfSquares, noise, n, f.gradientIndices().length); totalSumOfSquares = getSumOfSquares(n, y); } catch (TooManyIterationsException e) { //System.out.printf("Too many iterations = %d\n", e.getMax()); //e.printStackTrace(); return FitStatus.FAILED_TO_CONVERGE; } catch (TooManyEvaluationsException e) { //System.out.printf("Too many evaluations = %d\n", e.getMax()); //e.printStackTrace(); return FitStatus.FAILED_TO_CONVERGE; } catch (ConvergenceException e) { // Occurs when QR decomposition fails - mark as a singular non-linear model (no solution) //System.out.printf("Singular non linear model = %s\n", e.getMessage()); return FitStatus.SINGULAR_NON_LINEAR_MODEL; } catch (BFGSOptimizer.LineSearchRoundoffException e) { //System.out.println("BFGS error: " + e.getMessage()); //e.printStackTrace(); return FitStatus.FAILED_TO_CONVERGE; } catch (Exception e) { //System.out.printf("Unknown error = %s\n", e.getMessage()); e.printStackTrace(); return FitStatus.UNKNOWN; } return FitStatus.OK; }
From source file:gdsc.smlm.ij.plugins.BlinkEstimator.java
/** * Fit the dark time to counts of molecules curve. Only use the first n fitted points. * <p>//from www . j av a2 s . c o m * Calculates:<br/> * N = The number of photoblinking molecules in the sample<br/> * nBlink = The average number of blinks per flourophore<br/> * tOff = The off-time * * @param td * The dark time * @param ntd * The counts of molecules * @param nFittedPoints * @param log * Write the fitting results to the ImageJ log window * @return The fitted parameters [N, nBlink, tOff], or null if no fit was possible */ public double[] fit(double[] td, double[] ntd, int nFittedPoints, boolean log) { blinkingModel = new BlinkingFunction(); blinkingModel.setLogging(true); for (int i = 0; i < nFittedPoints; i++) blinkingModel.addPoint(td[i], ntd[i]); // Different convergence thresholds seem to have no effect on the resulting fit, only the number of // iterations for convergence double initialStepBoundFactor = 100; double costRelativeTolerance = 1e-6; double parRelativeTolerance = 1e-6; double orthoTolerance = 1e-6; double threshold = Precision.SAFE_MIN; LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor, costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold); try { double[] obs = blinkingModel.getY(); PointVectorValuePair optimum = optimizer.optimize(new MaxIter(1000), new MaxEval(Integer.MAX_VALUE), new ModelFunctionJacobian(new MultivariateMatrixFunction() { public double[][] value(double[] point) throws IllegalArgumentException { return blinkingModel.jacobian(point); } }), new ModelFunction(blinkingModel), new Target(obs), new Weight(blinkingModel.getWeights()), new InitialGuess(new double[] { ntd[0], 0.1, td[1] })); blinkingModel.setLogging(false); double[] parameters = optimum.getPoint(); double[] exp = optimum.getValue(); double mean = 0; for (double d : obs) mean += d; mean /= obs.length; double ssResiduals = 0, ssTotal = 0; for (int i = 0; i < obs.length; i++) { ssResiduals += (obs[i] - exp[i]) * (obs[i] - exp[i]); ssTotal += (obs[i] - mean) * (obs[i] - mean); } r2 = 1 - ssResiduals / ssTotal; adjustedR2 = getAdjustedCoefficientOfDetermination(ssResiduals, ssTotal, obs.length, parameters.length); if (log) { Utils.log(" Fit %d points. R^2 = %s. Adjusted R^2 = %s", obs.length, Utils.rounded(r2, 4), Utils.rounded(adjustedR2, 4)); Utils.log(" N=%s, nBlink=%s, tOff=%s (%s frames)", Utils.rounded(parameters[0], 4), Utils.rounded(parameters[1], 4), Utils.rounded(parameters[2], 4), Utils.rounded(parameters[2] / msPerFrame, 4)); } return parameters; } catch (TooManyIterationsException e) { if (log) Utils.log(" Failed to fit %d points: Too many iterations (%d)", blinkingModel.size(), optimizer.getIterations()); return null; } catch (ConvergenceException e) { if (log) Utils.log(" Failed to fit %d points", blinkingModel.size()); return null; } }
From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMMolecules.java
private double[] optimiseLeastSquares(float[] x, float[] y, double[] initialSolution) { // Least-squares optimisation using numerical gradients final SkewNormalDifferentiableFunction myFunction = new SkewNormalDifferentiableFunction(initialSolution); myFunction.addData(x, y);//w w w. j ava2 s. c o m LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(); PointVectorValuePair optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE), new ModelFunctionJacobian(new MultivariateMatrixFunction() { public double[][] value(double[] point) throws IllegalArgumentException { return myFunction.jacobian(point); } }), new ModelFunction(myFunction), new Target(myFunction.calculateTarget()), new Weight(myFunction.calculateWeights()), new InitialGuess(initialSolution)); double[] skewParameters = optimum.getPoint(); return skewParameters; }
From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java
/** * Fits the correlation curve with r>0 to the random model using the estimated density and precision. Parameters * must be fit within a tolerance of the starting values. * /*from w w w. j a v a 2 s .com*/ * @param gr * @param sigmaS * The estimated precision * @param proteinDensity * The estimate protein density * @return The fitted parameters [precision, density] */ private double[] fitRandomModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) { final RandomModelFunction myFunction = new RandomModelFunction(); randomModel = myFunction; log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", randomModel.getName(), sigmaS, proteinDensity * 1e6); randomModel.setLogging(true); for (int i = offset; i < gr[0].length; i++) { // Only fit the curve above the estimated resolution (points below it will be subject to error) if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision) randomModel.addPoint(gr[0][i], gr[1][i]); } LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(); PointVectorValuePair optimum; try { optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE), new ModelFunctionJacobian(new MultivariateMatrixFunction() { public double[][] value(double[] point) throws IllegalArgumentException { return myFunction.jacobian(point); } }), new ModelFunction(myFunction), new Target(myFunction.getY()), new Weight(myFunction.getWeights()), new InitialGuess(new double[] { sigmaS, proteinDensity })); } catch (TooManyIterationsException e) { log("Failed to fit %s: Too many iterations (%d)", randomModel.getName(), optimizer.getIterations()); return null; } catch (ConvergenceException e) { log("Failed to fit %s: %s", randomModel.getName(), e.getMessage()); return null; } randomModel.setLogging(false); double[] parameters = optimum.getPoint(); // Ensure the width is positive parameters[0] = Math.abs(parameters[0]); double ss = 0; double[] obs = randomModel.getY(); double[] exp = optimum.getValue(); for (int i = 0; i < obs.length; i++) ss += (obs[i] - exp[i]) * (obs[i] - exp[i]); ic1 = Maths.getInformationCriterion(ss, randomModel.size(), parameters.length); final double fitSigmaS = parameters[0]; final double fitProteinDensity = parameters[1]; // Check the fitted parameters are within tolerance of the initial estimates double e1 = parameterDrift(sigmaS, fitSigmaS); double e2 = parameterDrift(proteinDensity, fitProteinDensity); log(" %s fit: SS = %f. cAIC = %f. %d evaluations", randomModel.getName(), ss, ic1, optimizer.getEvaluations()); log(" %s parameters:", randomModel.getName()); log(" Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4)); log(" Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4), Utils.rounded(e2, 4)); valid1 = true; if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) { log(" Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)", randomModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4), fitProteinDensity * 1e6, Utils.rounded(e2, 4)); valid1 = false; } if (valid1) { // --------- // TODO - My data does not comply with this criteria. // This could be due to the PC-PALM Molecule code limiting the nmPerPixel to fit the images in memory // thus removing correlations at small r. // It could also be due to the nature of the random simulations being 3D not 2D membranes // as per the PC-PALM paper. // --------- // Evaluate g(r)protein where: // g(r)peaks = g(r)protein + g(r)stoch // g(r)peaks ~ 1 + g(r)stoch // Verify g(r)protein should be <1.5 for all r>0 double[] gr_stoch = randomModel.value(parameters); double[] gr_peaks = randomModel.getY(); double[] gr_ = randomModel.getX(); //SummaryStatistics stats = new SummaryStatistics(); for (int i = 0; i < gr_peaks.length; i++) { // Only evaluate above the fitted average precision if (gr_[i] < fitSigmaS) continue; // Note the RandomModelFunction evaluates g(r)stoch + 1; double gr_protein_i = gr_peaks[i] - (gr_stoch[i] - 1); if (gr_protein_i > gr_protein_threshold) { // Failed fit log(" Failed to fit %s: g(r)protein %s > %s @ r=%s", randomModel.getName(), Utils.rounded(gr_protein_i, 4), Utils.rounded(gr_protein_threshold, 4), Utils.rounded(gr_[i], 4)); valid1 = false; } //stats.addValue(gr_i); //System.out.printf("g(r)protein @ %f = %f\n", gr[0][i], gr_protein_i); } } addResult(randomModel.getName(), resultColour, valid1, fitSigmaS, fitProteinDensity, 0, 0, 0, 0, ic1); return parameters; }