Example usage for org.apache.commons.math3.optim InitialGuess InitialGuess

List of usage examples for org.apache.commons.math3.optim InitialGuess InitialGuess

Introduction

In this page you can find the example usage for org.apache.commons.math3.optim InitialGuess InitialGuess.

Prototype

public InitialGuess(double[] startPoint) 

Source Link

Usage

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the random model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * /*from   w  w w .j av  a2 s .  com*/
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimate protein density
 * @return The fitted parameters [precision, density]
 */
private double[] fitRandomModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final RandomModelFunction myFunction = new RandomModelFunction();
    randomModel = myFunction;

    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", randomModel.getName(),
            sigmaS, proteinDensity * 1e6);

    randomModel.setLogging(true);

    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            randomModel.addPoint(gr[0][i], gr[1][i]);
    }
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();

    PointVectorValuePair optimum;
    try {
        optimum = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return myFunction.jacobian(point);
                    }
                }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                new Weight(myFunction.getWeights()), new InitialGuess(new double[] { sigmaS, proteinDensity }));
    } catch (TooManyIterationsException e) {
        log("Failed to fit %s: Too many iterations (%d)", randomModel.getName(), optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        log("Failed to fit %s: %s", randomModel.getName(), e.getMessage());
        return null;
    }

    randomModel.setLogging(false);

    double[] parameters = optimum.getPoint();
    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);

    double ss = 0;
    double[] obs = randomModel.getY();
    double[] exp = optimum.getValue();
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic1 = Maths.getInformationCriterion(ss, randomModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];

    // Check the fitted parameters are within tolerance of the initial estimates
    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", randomModel.getName(), ss, ic1,
            optimizer.getEvaluations());
    log("  %s parameters:", randomModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));

    valid1 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                randomModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid1 = false;
    }

    if (valid1) {
        // ---------
        // TODO - My data does not comply with this criteria. 
        // This could be due to the PC-PALM Molecule code limiting the nmPerPixel to fit the images in memory 
        // thus removing correlations at small r.
        // It could also be due to the nature of the random simulations being 3D not 2D membranes 
        // as per the PC-PALM paper. 
        // ---------
        // Evaluate g(r)protein where:
        // g(r)peaks = g(r)protein + g(r)stoch
        // g(r)peaks ~ 1           + g(r)stoch
        // Verify g(r)protein should be <1.5 for all r>0
        double[] gr_stoch = randomModel.value(parameters);
        double[] gr_peaks = randomModel.getY();
        double[] gr_ = randomModel.getX();

        //SummaryStatistics stats = new SummaryStatistics();
        for (int i = 0; i < gr_peaks.length; i++) {
            // Only evaluate above the fitted average precision 
            if (gr_[i] < fitSigmaS)
                continue;

            // Note the RandomModelFunction evaluates g(r)stoch + 1;
            double gr_protein_i = gr_peaks[i] - (gr_stoch[i] - 1);

            if (gr_protein_i > gr_protein_threshold) {
                // Failed fit
                log("  Failed to fit %s: g(r)protein %s > %s @ r=%s", randomModel.getName(),
                        Utils.rounded(gr_protein_i, 4), Utils.rounded(gr_protein_threshold, 4),
                        Utils.rounded(gr_[i], 4));
                valid1 = false;
            }
            //stats.addValue(gr_i);
            //System.out.printf("g(r)protein @ %f = %f\n", gr[0][i], gr_protein_i);
        }
    }

    addResult(randomModel.getName(), resultColour, valid1, fitSigmaS, fitProteinDensity, 0, 0, 0, 0, ic1);

    return parameters;
}

From source file:edu.cmu.tetrad.search.BinaryTetradTest.java

private void estimateTwoFactorModel(double params[]) {
    double bestScore = Double.MAX_VALUE;
    double bestParams[] = new double[params.length];
    for (int i = 0; i < 5; i++) {
        for (int c = 0; c < 11; c++) {
            params[c] = RandomUtil.getInstance().nextDouble() / 2. + 0.2;
        }/*from  w w  w .j av  a2  s.c  o  m*/

        MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);

        PointValuePair pair = search.optimize(new InitialGuess(params),
                new ObjectiveFunction(new FittingFunction(this)), GoalType.MINIMIZE, new MaxEval(100000));

        double newScore = scoreParams(pair.getPoint());

        if (newScore < bestScore) {
            System.arraycopy(params, 0, bestParams, 0, params.length);
            bestScore = newScore;
        }
        //System.out.println(scoreParams(params));
        //for (int c = 0; c < 11; c++)
        //    System.out.println(params[c]); System.exit(0);
    }
    System.arraycopy(bestParams, 0, params, 0, params.length);
    //System.out.println();
}

From source file:com.itemanalysis.psychometrics.irt.equating.HaebaraMethodTest.java

@Test
public void stockingLordTestMixedFormat5() {
    System.out.println("StockingLordMethod Test 6: Mixed format test, backwards, Graded Response 2");
    LinkedHashMap<String, ItemResponseModel> irmX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> irmY = new LinkedHashMap<String, ItemResponseModel>();

    //3pl items//from   ww w .  j a v a2 s. c  om
    irmX.put("v1", new Irm3PL(1.0755, -1.8758, 0.1240, 1.7));
    irmX.put("v2", new Irm3PL(0.6428, -0.9211, 0.1361, 1.7));
    irmX.put("v3", new Irm3PL(0.6198, -1.3362, 0.1276, 1.7));
    irmX.put("v4", new Irm3PL(0.6835, -1.8967, 0.1619, 1.7));
    irmX.put("v5", new Irm3PL(0.9892, -0.6427, 0.2050, 1.7));
    irmX.put("v6", new Irm3PL(0.5784, -0.8181, 0.1168, 1.7));
    irmX.put("v7", new Irm3PL(0.9822, -0.9897, 0.1053, 1.7));
    irmX.put("v8", new Irm3PL(1.6026, -1.2382, 0.1202, 1.7));
    irmX.put("v9", new Irm3PL(0.8988, -0.5180, 0.1320, 1.7));
    irmX.put("v10", new Irm3PL(1.2525, -0.7164, 0.1493, 1.7));

    //gpcm items
    double[] step1 = { -2.1415, 0.0382, 0.6551 };
    irmX.put("v11", new IrmGRM(1.1196, step1, 1.7));

    double[] step2 = { -1.7523, -1.0660, 0.3533 };
    irmX.put("v12", new IrmGRM(1.2290, step2, 1.7));

    double[] step3 = { -2.3126, -1.8816, 0.7757 };
    irmX.put("v13", new IrmGRM(0.6405, step3, 1.7));

    double[] step4 = { -1.9728, -0.2810, 1.1387 };
    irmX.put("v14", new IrmGRM(1.1622, step4, 1.7));

    double[] step5 = { -2.2207, -0.8252, 0.9702 };
    irmX.put("v15", new IrmGRM(1.2249, step5, 1.7));

    //3pl items
    irmY.put("v1", new Irm3PL(0.7444, -1.5617, 0.1609, 1.7));
    irmY.put("v2", new Irm3PL(0.5562, -0.1031, 0.1753, 1.7));
    irmY.put("v3", new Irm3PL(0.5262, -1.0676, 0.1602, 1.7));
    irmY.put("v4", new Irm3PL(0.6388, -1.3880, 0.1676, 1.7));
    irmY.put("v5", new Irm3PL(0.8793, -0.2051, 0.1422, 1.7));
    irmY.put("v6", new Irm3PL(0.4105, 0.0555, 0.2120, 1.7));
    irmY.put("v7", new Irm3PL(0.7686, -0.3800, 0.2090, 1.7));
    irmY.put("v8", new Irm3PL(1.0539, -0.7570, 0.1270, 1.7));
    irmY.put("v9", new Irm3PL(0.7400, 0.0667, 0.1543, 1.7));
    irmY.put("v10", new Irm3PL(0.7479, 0.0281, 0.1489, 1.7));

    //gpcm items
    double[] step6 = { -1.7786, 0.7177, 1.45011 };
    irmY.put("v11", new IrmGRM(0.9171, step6, 1.7));

    double[] step7 = { -1.4115, -0.4946, 1.15969 };
    irmY.put("v12", new IrmGRM(0.9751, step7, 1.7));

    double[] step8 = { -1.8478, -1.4078, 1.51339 };
    irmY.put("v13", new IrmGRM(0.5890, step8, 1.7));

    double[] step9 = { -1.6151, 0.3002, 2.04728 };
    irmY.put("v14", new IrmGRM(0.9804, step9, 1.7));

    double[] step10 = { -1.9355, -0.2267, 1.88991 };
    irmY.put("v15", new IrmGRM(1.0117, step10, 1.7));

    UserSuppliedDistributionApproximation distX = new UserSuppliedDistributionApproximation(points, xDensity);
    UserSuppliedDistributionApproximation distY = new UserSuppliedDistributionApproximation(points, yDensity);

    HaebaraMethod hb = new HaebaraMethod(irmX, irmY, distX, distY, EquatingCriterionType.Q1);
    hb.setPrecision(4);
    double[] startValues = { 0, 1 };

    int numIterpolationPoints = 2 * 2;//two dimensions A and B
    BOBYQAOptimizer underlying = new BOBYQAOptimizer(numIterpolationPoints);
    RandomGenerator g = new JDKRandomGenerator();
    RandomVectorGenerator generator = new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
    MultiStartMultivariateOptimizer optimizer = new MultiStartMultivariateOptimizer(underlying, 10, generator);
    org.apache.commons.math3.optim.PointValuePair optimum = optimizer.optimize(new MaxEval(1000),
            new ObjectiveFunction(hb), org.apache.commons.math3.optim.nonlinear.scalar.GoalType.MINIMIZE,
            SimpleBounds.unbounded(2), new InitialGuess(startValues));

    double[] hbCoefficients = optimum.getPoint();
    hb.setIntercept(hbCoefficients[0]);
    hb.setScale(hbCoefficients[1]);

    System.out.println("  Iterations: " + optimizer.getEvaluations());
    System.out.println("  fmin: " + optimum.getValue());
    System.out.println("  B = " + hbCoefficients[0] + "  A = " + hbCoefficients[1]);

    assertEquals("  Intercept test", 0.731176, hb.getIntercept(), 1e-4);
    assertEquals("  Scale test", 1.209901, hb.getScale(), 1e-4);

    System.out.println();

}

From source file:com.itemanalysis.psychometrics.irt.equating.StockingLordMethodTest.java

@Test
public void stockingLordTestMixedFormatGradedResponse2() {
    System.out.println("StockingLordMethod Test 5: Mixed format test, symmetric, Graded Response 2");
    LinkedHashMap<String, ItemResponseModel> irmX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> irmY = new LinkedHashMap<String, ItemResponseModel>();

    //3pl items//from  w  ww . ja v a2 s  .  c o  m
    irmX.put("v1", new Irm3PL(1.0755, -1.8758, 0.1240, 1.7));
    irmX.put("v2", new Irm3PL(0.6428, -0.9211, 0.1361, 1.7));
    irmX.put("v3", new Irm3PL(0.6198, -1.3362, 0.1276, 1.7));
    irmX.put("v4", new Irm3PL(0.6835, -1.8967, 0.1619, 1.7));
    irmX.put("v5", new Irm3PL(0.9892, -0.6427, 0.2050, 1.7));
    irmX.put("v6", new Irm3PL(0.5784, -0.8181, 0.1168, 1.7));
    irmX.put("v7", new Irm3PL(0.9822, -0.9897, 0.1053, 1.7));
    irmX.put("v8", new Irm3PL(1.6026, -1.2382, 0.1202, 1.7));
    irmX.put("v9", new Irm3PL(0.8988, -0.5180, 0.1320, 1.7));
    irmX.put("v10", new Irm3PL(1.2525, -0.7164, 0.1493, 1.7));

    //gpcm items
    double[] step1 = { -2.1415, 0.0382, 0.6551 };
    irmX.put("v11", new IrmGRM(1.1196, step1, 1.7));

    double[] step2 = { -1.7523, -1.0660, 0.3533 };
    irmX.put("v12", new IrmGRM(1.2290, step2, 1.7));

    double[] step3 = { -2.3126, -1.8816, 0.7757 };
    irmX.put("v13", new IrmGRM(0.6405, step3, 1.7));

    double[] step4 = { -1.9728, -0.2810, 1.1387 };
    irmX.put("v14", new IrmGRM(1.1622, step4, 1.7));

    double[] step5 = { -2.2207, -0.8252, 0.9702 };
    irmX.put("v15", new IrmGRM(1.2249, step5, 1.7));

    //3pl items
    irmY.put("v1", new Irm3PL(0.7444, -1.5617, 0.1609, 1.7));
    irmY.put("v2", new Irm3PL(0.5562, -0.1031, 0.1753, 1.7));
    irmY.put("v3", new Irm3PL(0.5262, -1.0676, 0.1602, 1.7));
    irmY.put("v4", new Irm3PL(0.6388, -1.3880, 0.1676, 1.7));
    irmY.put("v5", new Irm3PL(0.8793, -0.2051, 0.1422, 1.7));
    irmY.put("v6", new Irm3PL(0.4105, 0.0555, 0.2120, 1.7));
    irmY.put("v7", new Irm3PL(0.7686, -0.3800, 0.2090, 1.7));
    irmY.put("v8", new Irm3PL(1.0539, -0.7570, 0.1270, 1.7));
    irmY.put("v9", new Irm3PL(0.7400, 0.0667, 0.1543, 1.7));
    irmY.put("v10", new Irm3PL(0.7479, 0.0281, 0.1489, 1.7));

    //gpcm items
    double[] step6 = { -1.7786, 0.7177, 1.45011 };
    irmY.put("v11", new IrmGRM(0.9171, step6, 1.7));

    double[] step7 = { -1.4115, -0.4946, 1.15969 };
    irmY.put("v12", new IrmGRM(0.9751, step7, 1.7));

    double[] step8 = { -1.8478, -1.4078, 1.51339 };
    irmY.put("v13", new IrmGRM(0.5890, step8, 1.7));

    double[] step9 = { -1.6151, 0.3002, 2.04728 };
    irmY.put("v14", new IrmGRM(0.9804, step9, 1.7));

    double[] step10 = { -1.9355, -0.2267, 1.88991 };
    irmY.put("v15", new IrmGRM(1.0117, step10, 1.7));

    UserSuppliedDistributionApproximation distX = new UserSuppliedDistributionApproximation(points, xDensity);
    UserSuppliedDistributionApproximation distY = new UserSuppliedDistributionApproximation(points, yDensity);

    StockingLordMethod sl = new StockingLordMethod(irmX, irmY, distX, distY, EquatingCriterionType.Q1Q2);
    sl.setPrecision(4);
    double[] startValues = { 0, 1 };

    int numIterpolationPoints = 2 * 2;//two dimensions A and B
    BOBYQAOptimizer underlying = new BOBYQAOptimizer(numIterpolationPoints);
    RandomGenerator g = new JDKRandomGenerator();
    RandomVectorGenerator generator = new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
    MultiStartMultivariateOptimizer optimizer = new MultiStartMultivariateOptimizer(underlying, 10, generator);
    PointValuePair optimum = optimizer.optimize(new MaxEval(1000), new ObjectiveFunction(sl), GoalType.MINIMIZE,
            SimpleBounds.unbounded(2), new InitialGuess(startValues));

    double[] slCoefficients = optimum.getPoint();
    sl.setIntercept(slCoefficients[0]);
    sl.setScale(slCoefficients[1]);

    System.out.println("  Iterations: " + optimizer.getEvaluations());
    System.out.println("  fmin: " + optimum.getValue());
    System.out.println("  B = " + slCoefficients[0] + "  A = " + slCoefficients[1]);

    assertEquals("  Intercept test", 0.735177, sl.getIntercept(), 1e-4);
    assertEquals("  Scale test", 1.213684, sl.getScale(), 1e-4);

    System.out.println();

}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the clustered model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * //from w ww.j  a va2 s .c o  m
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimated protein density
 * @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
 */
private double[] fitClusteredModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final ClusteredModelFunctionGradient myFunction = new ClusteredModelFunctionGradient();
    clusteredModel = myFunction;
    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2",
            clusteredModel.getName(), sigmaS, proteinDensity * 1e6);

    clusteredModel.setLogging(true);
    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            clusteredModel.addPoint(gr[0][i], gr[1][i]);
    }

    double[] parameters;
    // The model is: sigma, density, range, amplitude
    double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1 };
    int evaluations = 0;

    // Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
    // LVM fitting does not support constrained fitting so use a bounded optimiser.
    SumOfSquaresModelFunction clusteredModelMulti = new SumOfSquaresModelFunction(clusteredModel);
    double[] x = clusteredModelMulti.x;

    // Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
    double limit = (fittingTolerance > 0) ? 1 + fittingTolerance / 100 : 2;
    double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0 };
    // The amplitude and range should not extend beyond the limits of the g(r) curve.
    double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, Maths.max(x),
            Maths.max(gr[1]) };
    log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", clusteredModel.getName(),
            Utils.rounded(lB[0], 4), Utils.rounded(uB[0], 4), Utils.rounded(lB[1] * 1e6, 4),
            Utils.rounded(uB[1] * 1e6, 4));

    PointValuePair constrainedSolution = runBoundedOptimiser(gr, initialSolution, lB, uB, clusteredModelMulti);

    if (constrainedSolution == null)
        return null;

    parameters = constrainedSolution.getPointRef();
    evaluations = boundedEvaluations;

    // Refit using a LVM  
    if (useLSE) {
        log("Re-fitting %s using a gradient optimisation", clusteredModel.getName());
        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return myFunction.jacobian(point);
                        }
                    }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                    new Weight(myFunction.getWeights()), new InitialGuess(parameters));
            evaluations += optimizer.getEvaluations();

            double ss = 0;
            double[] obs = clusteredModel.getY();
            double[] exp = lvmSolution.getValue();
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            if (ss < constrainedSolution.getValue()) {
                log("Re-fitting %s improved the SS from %s to %s (-%s%%)", clusteredModel.getName(),
                        Utils.rounded(constrainedSolution.getValue(), 4), Utils.rounded(ss, 4),
                        Utils.rounded(
                                100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(),
                                4));
                parameters = lvmSolution.getPoint();
            }
        } catch (TooManyIterationsException e) {
            log("Failed to re-fit %s: Too many iterations (%d)", clusteredModel.getName(),
                    optimizer.getIterations());
        } catch (ConvergenceException e) {
            log("Failed to re-fit %s: %s", clusteredModel.getName(), e.getMessage());
        }
    }

    clusteredModel.setLogging(false);

    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);
    //parameters[2] = Math.abs(parameters[2]);

    double ss = 0;
    double[] obs = clusteredModel.getY();
    double[] exp = clusteredModel.value(parameters);
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic2 = Maths.getInformationCriterion(ss, clusteredModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];
    final double domainRadius = parameters[2]; //The radius of the cluster domain
    final double domainDensity = parameters[3]; //The density of the cluster domain

    // This is from the PC-PALM paper. However that paper fits the g(r)protein exponential convolved in 2D
    // with the g(r)PSF. In this method we have just fit the exponential
    final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;

    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", clusteredModel.getName(), ss, ic2, evaluations);
    log("  %s parameters:", clusteredModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));
    log("    Domain radius = %s nm", Utils.rounded(domainRadius, 4));
    log("    Domain density = %s", Utils.rounded(domainDensity, 4));
    log("    nCluster = %s", Utils.rounded(nCluster, 4));

    // Check the fitted parameters are within tolerance of the initial estimates
    valid2 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                clusteredModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid2 = false;
    }

    // Check extra parameters. Domain radius should be higher than the precision. Density should be positive
    if (domainRadius < fitSigmaS) {
        log("  Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)",
                clusteredModel.getName(), Utils.rounded(domainRadius, 4), Utils.rounded(fitSigmaS, 4));
        valid2 = false;
    }
    if (domainDensity < 0) {
        log("  Failed to fit %s: Domain density is negative (%s)", clusteredModel.getName(),
                Utils.rounded(domainDensity, 4));
        valid2 = false;
    }

    if (ic2 > ic1) {
        log("  Failed to fit %s - Information Criterion has increased %s%%", clusteredModel.getName(),
                Utils.rounded((100 * (ic2 - ic1) / ic1), 4));
        valid2 = false;
    }

    addResult(clusteredModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius,
            domainDensity, nCluster, 0, ic2);

    return parameters;
}

From source file:com.itemanalysis.psychometrics.irt.equating.StockingLordMethodTest.java

@Test
public void stockingLordTestMixedFormatGradedResponse3() {
    System.out.println("StockingLordMethod Test 5: Mixed format test, backwards, Graded Response 3");
    LinkedHashMap<String, ItemResponseModel> irmX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> irmY = new LinkedHashMap<String, ItemResponseModel>();

    //3pl items/*w ww. ja v a  2s . c o  m*/
    irmX.put("v1", new Irm3PL(1.0755, -1.8758, 0.1240, 1.7));
    irmX.put("v2", new Irm3PL(0.6428, -0.9211, 0.1361, 1.7));
    irmX.put("v3", new Irm3PL(0.6198, -1.3362, 0.1276, 1.7));
    irmX.put("v4", new Irm3PL(0.6835, -1.8967, 0.1619, 1.7));
    irmX.put("v5", new Irm3PL(0.9892, -0.6427, 0.2050, 1.7));
    irmX.put("v6", new Irm3PL(0.5784, -0.8181, 0.1168, 1.7));
    irmX.put("v7", new Irm3PL(0.9822, -0.9897, 0.1053, 1.7));
    irmX.put("v8", new Irm3PL(1.6026, -1.2382, 0.1202, 1.7));
    irmX.put("v9", new Irm3PL(0.8988, -0.5180, 0.1320, 1.7));
    irmX.put("v10", new Irm3PL(1.2525, -0.7164, 0.1493, 1.7));

    //gpcm items
    double[] step1 = { -2.1415, 0.0382, 0.6551 };
    irmX.put("v11", new IrmGRM(1.1196, step1, 1.7));

    double[] step2 = { -1.7523, -1.0660, 0.3533 };
    irmX.put("v12", new IrmGRM(1.2290, step2, 1.7));

    double[] step3 = { -2.3126, -1.8816, 0.7757 };
    irmX.put("v13", new IrmGRM(0.6405, step3, 1.7));

    double[] step4 = { -1.9728, -0.2810, 1.1387 };
    irmX.put("v14", new IrmGRM(1.1622, step4, 1.7));

    double[] step5 = { -2.2207, -0.8252, 0.9702 };
    irmX.put("v15", new IrmGRM(1.2249, step5, 1.7));

    //3pl items
    irmY.put("v1", new Irm3PL(0.7444, -1.5617, 0.1609, 1.7));
    irmY.put("v2", new Irm3PL(0.5562, -0.1031, 0.1753, 1.7));
    irmY.put("v3", new Irm3PL(0.5262, -1.0676, 0.1602, 1.7));
    irmY.put("v4", new Irm3PL(0.6388, -1.3880, 0.1676, 1.7));
    irmY.put("v5", new Irm3PL(0.8793, -0.2051, 0.1422, 1.7));
    irmY.put("v6", new Irm3PL(0.4105, 0.0555, 0.2120, 1.7));
    irmY.put("v7", new Irm3PL(0.7686, -0.3800, 0.2090, 1.7));
    irmY.put("v8", new Irm3PL(1.0539, -0.7570, 0.1270, 1.7));
    irmY.put("v9", new Irm3PL(0.7400, 0.0667, 0.1543, 1.7));
    irmY.put("v10", new Irm3PL(0.7479, 0.0281, 0.1489, 1.7));

    //gpcm items
    double[] step6 = { -1.7786, 0.7177, 1.45011 };
    irmY.put("v11", new IrmGRM(0.9171, step6, 1.7));

    double[] step7 = { -1.4115, -0.4946, 1.15969 };
    irmY.put("v12", new IrmGRM(0.9751, step7, 1.7));

    double[] step8 = { -1.8478, -1.4078, 1.51339 };
    irmY.put("v13", new IrmGRM(0.5890, step8, 1.7));

    double[] step9 = { -1.6151, 0.3002, 2.04728 };
    irmY.put("v14", new IrmGRM(0.9804, step9, 1.7));

    double[] step10 = { -1.9355, -0.2267, 1.88991 };
    irmY.put("v15", new IrmGRM(1.0117, step10, 1.7));

    UserSuppliedDistributionApproximation distX = new UserSuppliedDistributionApproximation(points, xDensity);
    UserSuppliedDistributionApproximation distY = new UserSuppliedDistributionApproximation(points, yDensity);

    StockingLordMethod sl = new StockingLordMethod(irmX, irmY, distX, distY, EquatingCriterionType.Q1);
    sl.setPrecision(4);
    double[] startValues = { 0, 1 };

    int numIterpolationPoints = 2 * 2;//two dimensions A and B
    BOBYQAOptimizer underlying = new BOBYQAOptimizer(numIterpolationPoints);
    RandomGenerator g = new JDKRandomGenerator();
    RandomVectorGenerator generator = new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
    MultiStartMultivariateOptimizer optimizer = new MultiStartMultivariateOptimizer(underlying, 10, generator);
    PointValuePair optimum = optimizer.optimize(new MaxEval(1000), new ObjectiveFunction(sl), GoalType.MINIMIZE,
            SimpleBounds.unbounded(2), new InitialGuess(startValues));

    double[] slCoefficients = optimum.getPoint();
    sl.setIntercept(slCoefficients[0]);
    sl.setScale(slCoefficients[1]);

    System.out.println("  Iterations: " + optimizer.getEvaluations());
    System.out.println("  fmin: " + optimum.getValue());
    System.out.println("  B = " + slCoefficients[0] + "  A = " + slCoefficients[1]);

    assertEquals("  Intercept test", 0.739394, sl.getIntercept(), 1e-4);
    assertEquals("  Scale test", 1.218372, sl.getScale(), 1e-4);

    System.out.println();

}

From source file:edu.cmu.tetrad.search.Lofs2.java

private void optimizeRow(final int rowIndex, final TetradMatrix data, final double range,
        final List<List<Integer>> rows, final List<List<Double>> parameters) {
    System.out.println("A");

    final int numParams = rows.get(rowIndex).size();

    final double[] dLeftMin = new double[numParams];
    final double[] dRightMin = new double[numParams];

    double[] values = new double[numParams];
    double delta = 0.1;

    if (false) { //isEdgeCorrected()) {
        double min = -2;
        double max = 2;

        int[] dims = new int[values.length];

        int numBins = 5;
        for (int i = 0; i < values.length; i++)
            dims[i] = numBins;//from w  w w  . j  a v  a  2  s  .  c  o m

        CombinationGenerator gen = new CombinationGenerator(dims);
        int[] comb;
        List<Double> maxParams = new ArrayList<Double>();

        for (int i = 0; i < values.length; i++)
            maxParams.add(0.0);

        double maxV = Double.NEGATIVE_INFINITY;

        while ((comb = gen.next()) != null) {
            List<Double> params = new ArrayList<Double>();

            for (int i = 0; i < values.length; i++) {
                params.add(min + (max - min) * (comb[i] / (double) numBins));
            }

            parameters.set(rowIndex, params);

            double v = scoreRow(rowIndex, data, rows, parameters);

            if (v > maxV) {
                maxV = v;
                maxParams = params;
            }
        }

        System.out.println("maxparams = " + maxParams);

        parameters.set(rowIndex, maxParams);

        for (int i = 0; i < values.length; i++) {
            dLeftMin[i] = -range;
            dRightMin[i] = range;
            values[i] = maxParams.get(i);
        }
    } else if (false) {
        for (int i = 0; i < numParams; i++) {
            parameters.get(rowIndex).set(i, -range);
            double vLeft = scoreRow(rowIndex, data, rows, parameters);
            double dLeft = -range;

            // Search from the left for the first valley; mark that as dleft.
            for (double d = -range + delta; d < range; d += delta) {
                parameters.get(rowIndex).set(i, d);
                double v = scoreRow(rowIndex, data, rows, parameters);
                if (Double.isNaN(v))
                    continue;
                if (v > vLeft)
                    break;
                vLeft = v;
                dLeft = d;
            }

            parameters.get(rowIndex).set(i, range);
            double vRight = scoreRow(rowIndex, data, rows, parameters);
            double dRight = range;

            // Similarly for dright. Will take dleft and dright to be bounds for the parameter,
            // to avoid high scores at the boundaries.
            for (double d = range - delta; d > -range; d -= delta) {
                parameters.get(rowIndex).set(i, d);
                double v = scoreRow(rowIndex, data, rows, parameters);
                if (Double.isNaN(v))
                    continue;
                if (v > vRight)
                    break;
                vRight = v;
                dRight = d;
            }

            // If dleft dright ended up reversed, re-reverse them.
            if (dLeft > dRight) {
                double temp = dRight;
                dLeft = dRight;
                dRight = temp;
            }

            System.out.println("dLeft = " + dLeft + " dRight = " + dRight);

            dLeftMin[i] = dLeft;
            dRightMin[i] = dRight;

            values[i] = (dLeft + dRight) / 2.0;
        }
    } else {

        System.out.println("B");

        // Default case: search for the maximum score over the entire range.
        for (int i = 0; i < numParams; i++) {
            dLeftMin[i] = -range;
            dRightMin[i] = range;

            values[i] = 0;
        }
    }

    MultivariateFunction function = new MultivariateFunction() {
        public double value(double[] values) {
            System.out.println(Arrays.toString(values));

            for (int i = 0; i < values.length; i++) {
                parameters.get(rowIndex).set(i, values[i]);
            }

            double v = scoreRow(rowIndex, data, rows, parameters);

            if (Double.isNaN(v)) {
                return Double.POSITIVE_INFINITY; // was 10000
            }

            return -v;
        }
    };

    try {
        MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);

        PointValuePair pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function),
                GoalType.MINIMIZE, new MaxEval(100000));

        values = pair.getPoint();
    } catch (Exception e) {
        e.printStackTrace();

        for (int i = 0; i < values.length; i++) {
            parameters.get(rowIndex).set(i, Double.NaN);
        }
    }
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the MSD using a linear fit that must pass through 0,0.
 * <p>//from w ww.  j  a  v a2s .  c  o m
 * Update the plot by adding the fit line.
 * 
 * @param x
 * @param y
 * @param title
 * @param plot
 * @return
 */
private double fitMSD(double[] x, double[] y, String title, Plot2 plot) {
    double D = 0;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    PointVectorValuePair lvmSolution;
    try {
        final LinearFunction function = new LinearFunction(x, y, settings.fitLength);
        double[] parameters = new double[] { function.guess() };
        lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(parameters));

        double ss = 0;
        double[] obs = function.getY();
        double[] exp = lvmSolution.getValue();
        for (int i = 0; i < obs.length; i++)
            ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);

        D = lvmSolution.getPoint()[0] / 4;
        Utils.log("Linear fit (%d points) : Gradient = %s, D = %s um^2/s, SS = %f (%d evaluations)", obs.length,
                Utils.rounded(lvmSolution.getPoint()[0], 4), Utils.rounded(D, 4), ss,
                optimizer.getEvaluations());

        // Add the fit to the plot
        plot.setColor(Color.magenta);
        plot.drawLine(0, 0, x[x.length - 1], x[x.length - 1] * 4 * D);
        display(title, plot);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }
    return D;
}

From source file:gdsc.utils.Cell_Outliner.java

/**
 * Find an ellipse that optimises the fit to the polygon detected edges.
 * /*from   www.ja v  a2s.c o m*/
 * @param roi
 * @param params
 * @param weightMap
 * @param angle
 * @return
 */
private double[] fitPolygonalCell(PolygonRoi roi, double[] params, FloatProcessor weightMap,
        FloatProcessor angle) {
    // Get an estimate of the starting parameters using the current polygon
    double[] startPoint = params;
    startPoint = estimateStartPoint(roi, weightMap.getWidth(), weightMap.getHeight());

    int maxEval = 2000;
    final DifferentiableEllipticalFitFunction func = new DifferentiableEllipticalFitFunction(roi, weightMap);

    double relativeThreshold = 100 * Precision.EPSILON;
    double absoluteThreshold = 100 * Precision.SAFE_MIN;
    ConvergenceChecker<PointVectorValuePair> checker = new SimplePointChecker<PointVectorValuePair>(
            relativeThreshold, absoluteThreshold);
    double initialStepBoundFactor = 10;
    double costRelativeTolerance = 1e-10;
    double parRelativeTolerance = 1e-10;
    double orthoTolerance = 1e-10;
    double threshold = Precision.SAFE_MIN;

    LevenbergMarquardtOptimizer optimiser = new LevenbergMarquardtOptimizer(initialStepBoundFactor, checker,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);

    try {
        PointVectorValuePair solution = optimiser.optimize(new MaxIter(maxEval), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return func.jacobian(point);
                    }
                }), new ModelFunction(func), new Target(func.calculateTarget()),
                new Weight(func.calculateWeights()), new InitialGuess(startPoint));

        if (debug)
            IJ.log(String.format("Eval = %d (Iter = %d), RMS = %f", optimiser.getEvaluations(),
                    optimiser.getIterations(), optimiser.getRMS()));
        return solution.getPointRef();
    } catch (Exception e) {
        IJ.log("Failed to find an elliptical solution, defaulting to polygon");
        e.printStackTrace();
    }

    return null;
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

private PointValuePair runBoundedOptimiser(double[][] gr, double[] initialSolution, double[] lB, double[] uB,
        SumOfSquaresModelFunction function) {
    // Create the functions to optimise
    ObjectiveFunction objective = new ObjectiveFunction(new SumOfSquaresMultivariateFunction(function));
    ObjectiveFunctionGradient gradient = new ObjectiveFunctionGradient(
            new SumOfSquaresMultivariateVectorFunction(function));

    final boolean debug = false;

    // Try a BFGS optimiser since this will produce a deterministic solution and can respect bounds.
    PointValuePair optimum = null;/*  ww  w  . j a va 2 s . c  o  m*/
    boundedEvaluations = 0;
    final MaxEval maxEvaluations = new MaxEval(2000);
    MultivariateOptimizer opt = null;
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            opt = new BFGSOptimizer();
            final double relativeThreshold = 1e-6;

            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lB.length];
            for (int i = 0; i < stepLength.length; i++)
                stepLength[i] = (uB[i] - lB[i]) * 0.3333333;

            // The GoalType is always minimise so no need to pass this in
            optimum = opt.optimize(maxEvaluations, gradient, objective,
                    new InitialGuess((optimum == null) ? initialSolution : optimum.getPointRef()),
                    new SimpleBounds(lB, uB), new BFGSOptimizer.GradientTolerance(relativeThreshold),
                    new BFGSOptimizer.StepLength(stepLength));
            if (debug)
                System.out.printf("BFGS Iter %d = %g (%d)\n", iteration, optimum.getValue(),
                        opt.getEvaluations());
        } catch (TooManyEvaluationsException e) {
            break; // No need to restart
        } catch (RuntimeException e) {
            break; // No need to restart
        } finally {
            boundedEvaluations += opt.getEvaluations();
        }
    }

    // Try a CMAES optimiser which is non-deterministic. To overcome this we perform restarts.

    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    double stopFitness = 0; //Double.NEGATIVE_INFINITY;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    RandomGenerator random = new Well44497b(); //Well19937c();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    double[] range = new double[lB.length];
    for (int i = 0; i < lB.length; i++)
        range[i] = (uB[i] - lB[i]) / 3;
    OptimizationData sigma = new CMAESOptimizer.Sigma(range);
    OptimizationData popSize = new CMAESOptimizer.PopulationSize(
            (int) (4 + Math.floor(3 * Math.log(initialSolution.length))));
    SimpleBounds bounds = new SimpleBounds(lB, uB);

    opt = new CMAESOptimizer(maxEvaluations.getMaxEval(), stopFitness, isActiveCMA, diagonalOnly,
            checkFeasableCount, random, generateStatistics, checker);
    // Restart the optimiser several times and take the best answer.
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            // Start from the initial solution
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(initialSolution), objective,
                    GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration,
                        constrainedSolution.getValue(), opt.getEvaluations());
            boundedEvaluations += opt.getEvaluations();
            if (optimum == null || constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
        if (optimum == null)
            continue;
        try {
            // Also restart from the current optimum
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(optimum.getPointRef()),
                    objective, GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration,
                        constrainedSolution.getValue(), opt.getEvaluations());
            if (constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
    }
    return optimum;
}