Example usage for org.apache.commons.math3.optim MaxEval MaxEval

List of usage examples for org.apache.commons.math3.optim MaxEval MaxEval

Introduction

In this page you can find the example usage for org.apache.commons.math3.optim MaxEval MaxEval.

Prototype

public MaxEval(int max) 

Source Link

Usage

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

/**
 * Fits the correlation curve with r>0 to the clustered model using the estimated density and precision. Parameters
 * must be fit within a tolerance of the starting values.
 * /*from   ww w.  j a  va2  s.  co  m*/
 * @param gr
 * @param sigmaS
 *            The estimated precision
 * @param proteinDensity
 *            The estimated protein density
 * @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
 */
private double[] fitClusteredModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
    final ClusteredModelFunctionGradient myFunction = new ClusteredModelFunctionGradient();
    clusteredModel = myFunction;
    log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2",
            clusteredModel.getName(), sigmaS, proteinDensity * 1e6);

    clusteredModel.setLogging(true);
    for (int i = offset; i < gr[0].length; i++) {
        // Only fit the curve above the estimated resolution (points below it will be subject to error)
        if (gr[0][i] > sigmaS * fitAboveEstimatedPrecision)
            clusteredModel.addPoint(gr[0][i], gr[1][i]);
    }

    double[] parameters;
    // The model is: sigma, density, range, amplitude
    double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1 };
    int evaluations = 0;

    // Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
    // LVM fitting does not support constrained fitting so use a bounded optimiser.
    SumOfSquaresModelFunction clusteredModelMulti = new SumOfSquaresModelFunction(clusteredModel);
    double[] x = clusteredModelMulti.x;

    // Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
    double limit = (fittingTolerance > 0) ? 1 + fittingTolerance / 100 : 2;
    double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0 };
    // The amplitude and range should not extend beyond the limits of the g(r) curve.
    double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, Maths.max(x),
            Maths.max(gr[1]) };
    log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", clusteredModel.getName(),
            Utils.rounded(lB[0], 4), Utils.rounded(uB[0], 4), Utils.rounded(lB[1] * 1e6, 4),
            Utils.rounded(uB[1] * 1e6, 4));

    PointValuePair constrainedSolution = runBoundedOptimiser(gr, initialSolution, lB, uB, clusteredModelMulti);

    if (constrainedSolution == null)
        return null;

    parameters = constrainedSolution.getPointRef();
    evaluations = boundedEvaluations;

    // Refit using a LVM  
    if (useLSE) {
        log("Re-fitting %s using a gradient optimisation", clusteredModel.getName());
        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return myFunction.jacobian(point);
                        }
                    }), new ModelFunction(myFunction), new Target(myFunction.getY()),
                    new Weight(myFunction.getWeights()), new InitialGuess(parameters));
            evaluations += optimizer.getEvaluations();

            double ss = 0;
            double[] obs = clusteredModel.getY();
            double[] exp = lvmSolution.getValue();
            for (int i = 0; i < obs.length; i++)
                ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            if (ss < constrainedSolution.getValue()) {
                log("Re-fitting %s improved the SS from %s to %s (-%s%%)", clusteredModel.getName(),
                        Utils.rounded(constrainedSolution.getValue(), 4), Utils.rounded(ss, 4),
                        Utils.rounded(
                                100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(),
                                4));
                parameters = lvmSolution.getPoint();
            }
        } catch (TooManyIterationsException e) {
            log("Failed to re-fit %s: Too many iterations (%d)", clusteredModel.getName(),
                    optimizer.getIterations());
        } catch (ConvergenceException e) {
            log("Failed to re-fit %s: %s", clusteredModel.getName(), e.getMessage());
        }
    }

    clusteredModel.setLogging(false);

    // Ensure the width is positive
    parameters[0] = Math.abs(parameters[0]);
    //parameters[2] = Math.abs(parameters[2]);

    double ss = 0;
    double[] obs = clusteredModel.getY();
    double[] exp = clusteredModel.value(parameters);
    for (int i = 0; i < obs.length; i++)
        ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
    ic2 = Maths.getInformationCriterion(ss, clusteredModel.size(), parameters.length);

    final double fitSigmaS = parameters[0];
    final double fitProteinDensity = parameters[1];
    final double domainRadius = parameters[2]; //The radius of the cluster domain
    final double domainDensity = parameters[3]; //The density of the cluster domain

    // This is from the PC-PALM paper. However that paper fits the g(r)protein exponential convolved in 2D
    // with the g(r)PSF. In this method we have just fit the exponential
    final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;

    double e1 = parameterDrift(sigmaS, fitSigmaS);
    double e2 = parameterDrift(proteinDensity, fitProteinDensity);

    log("  %s fit: SS = %f. cAIC = %f. %d evaluations", clusteredModel.getName(), ss, ic2, evaluations);
    log("  %s parameters:", clusteredModel.getName());
    log("    Average precision = %s nm (%s%%)", Utils.rounded(fitSigmaS, 4), Utils.rounded(e1, 4));
    log("    Average protein density = %s um^-2 (%s%%)", Utils.rounded(fitProteinDensity * 1e6, 4),
            Utils.rounded(e2, 4));
    log("    Domain radius = %s nm", Utils.rounded(domainRadius, 4));
    log("    Domain density = %s", Utils.rounded(domainDensity, 4));
    log("    nCluster = %s", Utils.rounded(nCluster, 4));

    // Check the fitted parameters are within tolerance of the initial estimates
    valid2 = true;
    if (fittingTolerance > 0 && (Math.abs(e1) > fittingTolerance || Math.abs(e2) > fittingTolerance)) {
        log("  Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%), average protein density = %g um^-2 (%s%%)",
                clusteredModel.getName(), Utils.rounded(fittingTolerance, 4), fitSigmaS, Utils.rounded(e1, 4),
                fitProteinDensity * 1e6, Utils.rounded(e2, 4));
        valid2 = false;
    }

    // Check extra parameters. Domain radius should be higher than the precision. Density should be positive
    if (domainRadius < fitSigmaS) {
        log("  Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)",
                clusteredModel.getName(), Utils.rounded(domainRadius, 4), Utils.rounded(fitSigmaS, 4));
        valid2 = false;
    }
    if (domainDensity < 0) {
        log("  Failed to fit %s: Domain density is negative (%s)", clusteredModel.getName(),
                Utils.rounded(domainDensity, 4));
        valid2 = false;
    }

    if (ic2 > ic1) {
        log("  Failed to fit %s - Information Criterion has increased %s%%", clusteredModel.getName(),
                Utils.rounded((100 * (ic2 - ic1) / ic1), 4));
        valid2 = false;
    }

    addResult(clusteredModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius,
            domainDensity, nCluster, 0, ic2);

    return parameters;
}

From source file:com.itemanalysis.psychometrics.irt.equating.StockingLordMethodTest.java

@Test
public void stockingLordTestMixedFormatGradedResponse3() {
    System.out.println("StockingLordMethod Test 5: Mixed format test, backwards, Graded Response 3");
    LinkedHashMap<String, ItemResponseModel> irmX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> irmY = new LinkedHashMap<String, ItemResponseModel>();

    //3pl items/*from  w  w w .  j  a  v  a2s.c om*/
    irmX.put("v1", new Irm3PL(1.0755, -1.8758, 0.1240, 1.7));
    irmX.put("v2", new Irm3PL(0.6428, -0.9211, 0.1361, 1.7));
    irmX.put("v3", new Irm3PL(0.6198, -1.3362, 0.1276, 1.7));
    irmX.put("v4", new Irm3PL(0.6835, -1.8967, 0.1619, 1.7));
    irmX.put("v5", new Irm3PL(0.9892, -0.6427, 0.2050, 1.7));
    irmX.put("v6", new Irm3PL(0.5784, -0.8181, 0.1168, 1.7));
    irmX.put("v7", new Irm3PL(0.9822, -0.9897, 0.1053, 1.7));
    irmX.put("v8", new Irm3PL(1.6026, -1.2382, 0.1202, 1.7));
    irmX.put("v9", new Irm3PL(0.8988, -0.5180, 0.1320, 1.7));
    irmX.put("v10", new Irm3PL(1.2525, -0.7164, 0.1493, 1.7));

    //gpcm items
    double[] step1 = { -2.1415, 0.0382, 0.6551 };
    irmX.put("v11", new IrmGRM(1.1196, step1, 1.7));

    double[] step2 = { -1.7523, -1.0660, 0.3533 };
    irmX.put("v12", new IrmGRM(1.2290, step2, 1.7));

    double[] step3 = { -2.3126, -1.8816, 0.7757 };
    irmX.put("v13", new IrmGRM(0.6405, step3, 1.7));

    double[] step4 = { -1.9728, -0.2810, 1.1387 };
    irmX.put("v14", new IrmGRM(1.1622, step4, 1.7));

    double[] step5 = { -2.2207, -0.8252, 0.9702 };
    irmX.put("v15", new IrmGRM(1.2249, step5, 1.7));

    //3pl items
    irmY.put("v1", new Irm3PL(0.7444, -1.5617, 0.1609, 1.7));
    irmY.put("v2", new Irm3PL(0.5562, -0.1031, 0.1753, 1.7));
    irmY.put("v3", new Irm3PL(0.5262, -1.0676, 0.1602, 1.7));
    irmY.put("v4", new Irm3PL(0.6388, -1.3880, 0.1676, 1.7));
    irmY.put("v5", new Irm3PL(0.8793, -0.2051, 0.1422, 1.7));
    irmY.put("v6", new Irm3PL(0.4105, 0.0555, 0.2120, 1.7));
    irmY.put("v7", new Irm3PL(0.7686, -0.3800, 0.2090, 1.7));
    irmY.put("v8", new Irm3PL(1.0539, -0.7570, 0.1270, 1.7));
    irmY.put("v9", new Irm3PL(0.7400, 0.0667, 0.1543, 1.7));
    irmY.put("v10", new Irm3PL(0.7479, 0.0281, 0.1489, 1.7));

    //gpcm items
    double[] step6 = { -1.7786, 0.7177, 1.45011 };
    irmY.put("v11", new IrmGRM(0.9171, step6, 1.7));

    double[] step7 = { -1.4115, -0.4946, 1.15969 };
    irmY.put("v12", new IrmGRM(0.9751, step7, 1.7));

    double[] step8 = { -1.8478, -1.4078, 1.51339 };
    irmY.put("v13", new IrmGRM(0.5890, step8, 1.7));

    double[] step9 = { -1.6151, 0.3002, 2.04728 };
    irmY.put("v14", new IrmGRM(0.9804, step9, 1.7));

    double[] step10 = { -1.9355, -0.2267, 1.88991 };
    irmY.put("v15", new IrmGRM(1.0117, step10, 1.7));

    UserSuppliedDistributionApproximation distX = new UserSuppliedDistributionApproximation(points, xDensity);
    UserSuppliedDistributionApproximation distY = new UserSuppliedDistributionApproximation(points, yDensity);

    StockingLordMethod sl = new StockingLordMethod(irmX, irmY, distX, distY, EquatingCriterionType.Q1);
    sl.setPrecision(4);
    double[] startValues = { 0, 1 };

    int numIterpolationPoints = 2 * 2;//two dimensions A and B
    BOBYQAOptimizer underlying = new BOBYQAOptimizer(numIterpolationPoints);
    RandomGenerator g = new JDKRandomGenerator();
    RandomVectorGenerator generator = new UncorrelatedRandomVectorGenerator(2, new GaussianRandomGenerator(g));
    MultiStartMultivariateOptimizer optimizer = new MultiStartMultivariateOptimizer(underlying, 10, generator);
    PointValuePair optimum = optimizer.optimize(new MaxEval(1000), new ObjectiveFunction(sl), GoalType.MINIMIZE,
            SimpleBounds.unbounded(2), new InitialGuess(startValues));

    double[] slCoefficients = optimum.getPoint();
    sl.setIntercept(slCoefficients[0]);
    sl.setScale(slCoefficients[1]);

    System.out.println("  Iterations: " + optimizer.getEvaluations());
    System.out.println("  fmin: " + optimum.getValue());
    System.out.println("  B = " + slCoefficients[0] + "  A = " + slCoefficients[1]);

    assertEquals("  Intercept test", 0.739394, sl.getIntercept(), 1e-4);
    assertEquals("  Scale test", 1.218372, sl.getScale(), 1e-4);

    System.out.println();

}

From source file:edu.cmu.tetrad.search.Lofs2.java

private void optimizeRow(final int rowIndex, final TetradMatrix data, final double range,
        final List<List<Integer>> rows, final List<List<Double>> parameters) {
    System.out.println("A");

    final int numParams = rows.get(rowIndex).size();

    final double[] dLeftMin = new double[numParams];
    final double[] dRightMin = new double[numParams];

    double[] values = new double[numParams];
    double delta = 0.1;

    if (false) { //isEdgeCorrected()) {
        double min = -2;
        double max = 2;

        int[] dims = new int[values.length];

        int numBins = 5;
        for (int i = 0; i < values.length; i++)
            dims[i] = numBins;//from   w w  w. ja  va  2 s.c o m

        CombinationGenerator gen = new CombinationGenerator(dims);
        int[] comb;
        List<Double> maxParams = new ArrayList<Double>();

        for (int i = 0; i < values.length; i++)
            maxParams.add(0.0);

        double maxV = Double.NEGATIVE_INFINITY;

        while ((comb = gen.next()) != null) {
            List<Double> params = new ArrayList<Double>();

            for (int i = 0; i < values.length; i++) {
                params.add(min + (max - min) * (comb[i] / (double) numBins));
            }

            parameters.set(rowIndex, params);

            double v = scoreRow(rowIndex, data, rows, parameters);

            if (v > maxV) {
                maxV = v;
                maxParams = params;
            }
        }

        System.out.println("maxparams = " + maxParams);

        parameters.set(rowIndex, maxParams);

        for (int i = 0; i < values.length; i++) {
            dLeftMin[i] = -range;
            dRightMin[i] = range;
            values[i] = maxParams.get(i);
        }
    } else if (false) {
        for (int i = 0; i < numParams; i++) {
            parameters.get(rowIndex).set(i, -range);
            double vLeft = scoreRow(rowIndex, data, rows, parameters);
            double dLeft = -range;

            // Search from the left for the first valley; mark that as dleft.
            for (double d = -range + delta; d < range; d += delta) {
                parameters.get(rowIndex).set(i, d);
                double v = scoreRow(rowIndex, data, rows, parameters);
                if (Double.isNaN(v))
                    continue;
                if (v > vLeft)
                    break;
                vLeft = v;
                dLeft = d;
            }

            parameters.get(rowIndex).set(i, range);
            double vRight = scoreRow(rowIndex, data, rows, parameters);
            double dRight = range;

            // Similarly for dright. Will take dleft and dright to be bounds for the parameter,
            // to avoid high scores at the boundaries.
            for (double d = range - delta; d > -range; d -= delta) {
                parameters.get(rowIndex).set(i, d);
                double v = scoreRow(rowIndex, data, rows, parameters);
                if (Double.isNaN(v))
                    continue;
                if (v > vRight)
                    break;
                vRight = v;
                dRight = d;
            }

            // If dleft dright ended up reversed, re-reverse them.
            if (dLeft > dRight) {
                double temp = dRight;
                dLeft = dRight;
                dRight = temp;
            }

            System.out.println("dLeft = " + dLeft + " dRight = " + dRight);

            dLeftMin[i] = dLeft;
            dRightMin[i] = dRight;

            values[i] = (dLeft + dRight) / 2.0;
        }
    } else {

        System.out.println("B");

        // Default case: search for the maximum score over the entire range.
        for (int i = 0; i < numParams; i++) {
            dLeftMin[i] = -range;
            dRightMin[i] = range;

            values[i] = 0;
        }
    }

    MultivariateFunction function = new MultivariateFunction() {
        public double value(double[] values) {
            System.out.println(Arrays.toString(values));

            for (int i = 0; i < values.length; i++) {
                parameters.get(rowIndex).set(i, values[i]);
            }

            double v = scoreRow(rowIndex, data, rows, parameters);

            if (Double.isNaN(v)) {
                return Double.POSITIVE_INFINITY; // was 10000
            }

            return -v;
        }
    };

    try {
        MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);

        PointValuePair pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function),
                GoalType.MINIMIZE, new MaxEval(100000));

        values = pair.getPoint();
    } catch (Exception e) {
        e.printStackTrace();

        for (int i = 0; i < values.length; i++) {
            parameters.get(rowIndex).set(i, Double.NaN);
        }
    }
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the MSD using a linear fit that must pass through 0,0.
 * <p>/*w ww .  j av a 2s  .co  m*/
 * Update the plot by adding the fit line.
 * 
 * @param x
 * @param y
 * @param title
 * @param plot
 * @return
 */
private double fitMSD(double[] x, double[] y, String title, Plot2 plot) {
    double D = 0;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    PointVectorValuePair lvmSolution;
    try {
        final LinearFunction function = new LinearFunction(x, y, settings.fitLength);
        double[] parameters = new double[] { function.guess() };
        lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(parameters));

        double ss = 0;
        double[] obs = function.getY();
        double[] exp = lvmSolution.getValue();
        for (int i = 0; i < obs.length; i++)
            ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);

        D = lvmSolution.getPoint()[0] / 4;
        Utils.log("Linear fit (%d points) : Gradient = %s, D = %s um^2/s, SS = %f (%d evaluations)", obs.length,
                Utils.rounded(lvmSolution.getPoint()[0], 4), Utils.rounded(D, 4), ss,
                optimizer.getEvaluations());

        // Add the fit to the plot
        plot.setColor(Color.magenta);
        plot.drawLine(0, 0, x[x.length - 1], x[x.length - 1] * 4 * D);
        display(title, plot);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }
    return D;
}

From source file:gdsc.utils.Cell_Outliner.java

/**
 * Find an ellipse that optimises the fit to the polygon detected edges.
 * //www . j a va  2 s  . c om
 * @param roi
 * @param params
 * @param weightMap
 * @param angle
 * @return
 */
private double[] fitPolygonalCell(PolygonRoi roi, double[] params, FloatProcessor weightMap,
        FloatProcessor angle) {
    // Get an estimate of the starting parameters using the current polygon
    double[] startPoint = params;
    startPoint = estimateStartPoint(roi, weightMap.getWidth(), weightMap.getHeight());

    int maxEval = 2000;
    final DifferentiableEllipticalFitFunction func = new DifferentiableEllipticalFitFunction(roi, weightMap);

    double relativeThreshold = 100 * Precision.EPSILON;
    double absoluteThreshold = 100 * Precision.SAFE_MIN;
    ConvergenceChecker<PointVectorValuePair> checker = new SimplePointChecker<PointVectorValuePair>(
            relativeThreshold, absoluteThreshold);
    double initialStepBoundFactor = 10;
    double costRelativeTolerance = 1e-10;
    double parRelativeTolerance = 1e-10;
    double orthoTolerance = 1e-10;
    double threshold = Precision.SAFE_MIN;

    LevenbergMarquardtOptimizer optimiser = new LevenbergMarquardtOptimizer(initialStepBoundFactor, checker,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);

    try {
        PointVectorValuePair solution = optimiser.optimize(new MaxIter(maxEval), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return func.jacobian(point);
                    }
                }), new ModelFunction(func), new Target(func.calculateTarget()),
                new Weight(func.calculateWeights()), new InitialGuess(startPoint));

        if (debug)
            IJ.log(String.format("Eval = %d (Iter = %d), RMS = %f", optimiser.getEvaluations(),
                    optimiser.getIterations(), optimiser.getRMS()));
        return solution.getPointRef();
    } catch (Exception e) {
        IJ.log("Failed to find an elliptical solution, defaulting to polygon");
        e.printStackTrace();
    }

    return null;
}

From source file:com.itemanalysis.psychometrics.irt.equating.StockingLordMethodTest.java

/**
 * This test uses Rasch model items.//from  www .  j  a  va2 s. c  o m
 *
 *
 */
@Test
public void raschModelTest() {
    System.out.println("Stocking-Lord test: Rasch model");

    LinkedHashMap<String, ItemResponseModel> itemFormX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> itemFormY = new LinkedHashMap<String, ItemResponseModel>();

    itemFormX.put("Item1", new Irm3PL(-3.188047976, 1.0));
    itemFormX.put("Item2", new Irm3PL(1.031760328, 1.0));
    itemFormX.put("Item3", new Irm3PL(0.819040914, 1.0));
    itemFormX.put("Item4", new Irm3PL(-2.706947360, 1.0));
    itemFormX.put("Item5", new Irm3PL(-0.094527077, 1.0));
    itemFormX.put("Item6", new Irm3PL(0.689697135, 1.0));
    itemFormX.put("Item7", new Irm3PL(-0.551837153, 1.0));
    itemFormX.put("Item8", new Irm3PL(-0.359559276, 1.0));

    itemFormY.put("Item1", new Irm3PL(-3.074599226, 1.0));
    itemFormY.put("Item2", new Irm3PL(1.012824350, 1.0));
    itemFormY.put("Item3", new Irm3PL(0.868538408, 1.0));
    itemFormY.put("Item4", new Irm3PL(-2.404483603, 1.0));
    itemFormY.put("Item5", new Irm3PL(0.037402866, 1.0));
    itemFormY.put("Item6", new Irm3PL(0.700747420, 1.0));
    itemFormY.put("Item7", new Irm3PL(-0.602555046, 1.0));
    itemFormY.put("Item8", new Irm3PL(-0.350426446, 1.0));

    double f = 0;
    double[] param1 = new double[2];
    double[] param2 = new double[2];

    UniformDistributionApproximation distX = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default
    UniformDistributionApproximation distY = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default

    StockingLordMethod sl = new StockingLordMethod(itemFormX, itemFormY, distX, distY,
            EquatingCriterionType.Q1Q2);
    sl.setPrecision(6);
    double[] startValues = { 0 };//Using a start value for the intercept only for the Rasch family of models.

    DefaultUncminOptimizer optimizer = new DefaultUncminOptimizer();

    try {
        optimizer.minimize(sl, startValues);
        double[] r = optimizer.getParameters();
        param1[0] = r[0];
        param1[1] = 1;
        f = optimizer.getFunctionValue();
        sl.setIntercept(param1[0]);
        sl.setScale(param1[1]);

    } catch (UncminException ex) {
        ex.printStackTrace();
    }

    //Check Brent Optimizer values against results from plink.
    System.out.println("  UNCMIN Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + sl.getIntercept() + "  A = " + sl.getScale());

    assertEquals("  Intercept test", 0.057566, sl.getIntercept(), 1e-3);//true result from plink package in R
    assertEquals("  Scale test", 1.0, sl.getScale(), 1e-4);

    BrentOptimizer brentOptimizer = new BrentOptimizer(1e-8, 1e-8);
    UnivariatePointValuePair pair = brentOptimizer.optimize(new MaxEval(200),
            new UnivariateObjectiveFunction(sl), GoalType.MINIMIZE, new SearchInterval(-3, 3));

    param2 = new double[2];
    sl.setIntercept(pair.getPoint());
    sl.setScale(1.0);
    f = pair.getValue();

    param2[0] = pair.getPoint();
    param2[1] = 1;
    sl.setIntercept(param2[0]);
    sl.setScale(param2[1]);
    f = pair.getValue();

    //Check Brent results against plink results.
    System.out.println();
    System.out.println("  Brent Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + sl.getIntercept() + "  A = " + sl.getScale());

    assertEquals("  Intercept test", 0.057566, sl.getIntercept(), 1e-3);//true result from plink package in R
    assertEquals("  Scale test", 1.0, sl.getScale(), 1e-4);

    //Compare UMNCMIN and Brent optimizer results.
    assertEquals("  UNCMIN/Brent intercept test", param1[0], param2[0], 1e-6);
    assertEquals("  UNCMIN/Brent slope test", param1[1], param2[1], 1e-6);

}

From source file:com.itemanalysis.psychometrics.irt.equating.HaebaraMethodTest.java

/**
 * This test uses a combination of 3PL and PCM items. Results compared to plink
 *
 * plink code:/*from w  w  w.java  2  s .c  om*/
 *
 * library(plink)
 *
 * fX<-matrix(c(
 * 1, -3.188047976, 0,NA,NA,
 * 1,  1.031760328, 0,NA,NA,
 * 1,  0.819040914, 0,NA,NA,
 * 1, -2.706947360, 0,NA,NA,
 * 1, -0.094527077, 0,NA,NA,
 * 1,  0.689697135, 0,NA,NA,
 * 1, -0.551837153, 0,NA,NA,
 * 1, -0.359559276, 0,NA,NA,
 * 1, -1.451470831, -0.146619694, -0.636399040, 0.783018734),
 * nrow=9, byrow=TRUE)
 * fX<-as.data.frame(fX)
 * names(fX)<-c("aparam", "bparam","cparam","s1","s2")
 *
 * fY<-matrix(c(
 * 1,-3.074599226,0,NA,NA,
 * 1,1.01282435,0,NA,NA,
 * 1,0.868538408,0,NA,NA,
 * 1,-2.404483603,0,NA,NA,
 * 1,0.037402866,0,NA,NA,
 * 1,0.70074742,0,NA,NA,
 * 1,-0.602555046,0,NA,NA,
 * 1,-0.350426446,0,NA,NA,
 * 1,-1.267744832,-0.185885988,-0.61535623,0.801242218),
 * nrow=9, byrow=TRUE)
 * fY<-as.data.frame(fY)
 * names(fY)<-c("aparam", "bparam","cparam","s1","s2")
 *
 * common<-cbind(1:9, 1:9)
 * cat<-c(rep(2,8),4)
 *
 * pmX <- as.poly.mod(9,c("drm","gpcm"),list(1:8,9))
 * pmY <- as.poly.mod(9,c("drm","gpcm"),list(1:8,9))
 *
 * pars <- as.irt.pars(list(fx=fX,fy=fY), common, cat=list(fx=cat,fy=cat),
 * poly.mod=list(pmX,pmY), location=c(TRUE,TRUE))
 *
 * plink(pars, startvals=c(1,0), rescale="SL", base.grp=2, D=1.0, symmetric=TRUE)
 *
 *
 */
@Test
public void mixedFormtRaschPCMTest() {
    System.out.println("Mixed format Haebara test: Rasch and PCM");

    LinkedHashMap<String, ItemResponseModel> itemFormX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> itemFormY = new LinkedHashMap<String, ItemResponseModel>();

    itemFormX.put("Item1", new Irm3PL(-3.188047976, 1.0));
    itemFormX.put("Item2", new Irm3PL(1.031760328, 1.0));
    itemFormX.put("Item3", new Irm3PL(0.819040914, 1.0));
    itemFormX.put("Item4", new Irm3PL(-2.706947360, 1.0));
    itemFormX.put("Item5", new Irm3PL(-0.094527077, 1.0));
    itemFormX.put("Item6", new Irm3PL(0.689697135, 1.0));
    itemFormX.put("Item7", new Irm3PL(-0.551837153, 1.0));
    itemFormX.put("Item8", new Irm3PL(-0.359559276, 1.0));
    double[] step1x = { -0.146619694, -0.636399040, 0.783018734 };
    itemFormX.put("Item9", new IrmPCM(-1.451470831, step1x, 1.0));

    itemFormY.put("Item1", new Irm3PL(-3.074599226, 1.0));
    itemFormY.put("Item2", new Irm3PL(1.012824350, 1.0));
    itemFormY.put("Item3", new Irm3PL(0.868538408, 1.0));
    itemFormY.put("Item4", new Irm3PL(-2.404483603, 1.0));
    itemFormY.put("Item5", new Irm3PL(0.037402866, 1.0));
    itemFormY.put("Item6", new Irm3PL(0.700747420, 1.0));
    itemFormY.put("Item7", new Irm3PL(-0.602555046, 1.0));
    itemFormY.put("Item8", new Irm3PL(-0.350426446, 1.0));
    double[] step1y = { -0.185885988, -0.61535623, 0.801242218 };
    itemFormY.put("Item9", new IrmPCM(-1.267744832, step1y, 1.0));

    double f = 0;
    double[] param1 = new double[2];
    double[] param2 = new double[2];

    UniformDistributionApproximation distX = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default
    UniformDistributionApproximation distY = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default

    HaebaraMethod hb = new HaebaraMethod(itemFormX, itemFormY, distX, distY, EquatingCriterionType.Q1Q2);
    hb.setPrecision(6);
    double[] startValues = { 0.0 };//Using a start value for the intercept only for the Rasch family of models.

    DefaultUncminOptimizer optimizer = new DefaultUncminOptimizer();

    try {
        optimizer.minimize(hb, startValues);
        double[] r = optimizer.getParameters();
        param1[0] = r[0];
        param1[1] = 1.0;
        f = optimizer.getFunctionValue();
        hb.setIntercept(param1[0]);
        hb.setScale(param1[1]);

    } catch (UncminException ex) {
        ex.printStackTrace();
    }

    //Check UNCMIN values against results from plink.
    System.out.println("  UNCMIN Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + hb.getIntercept() + "  A = " + hb.getScale());

    //TODO there's something wrong with plink. I'm getting different results, but sirt agree with mine (at least for the Rasch model).

    //        assertEquals("  Intercept test", 0.105526, hb.getIntercept(), 1e-4);//True results from plink package in R
    //        assertEquals("  Scale test", 1.0, hb.getScale(), 1e-4);

    //Check Brent optimizer values against plink
    BrentOptimizer brentOptimizer = new BrentOptimizer(1e-8, 1e-8);
    UnivariatePointValuePair pair = brentOptimizer.optimize(new MaxEval(200),
            new UnivariateObjectiveFunction(hb),
            org.apache.commons.math3.optim.nonlinear.scalar.GoalType.MINIMIZE, new SearchInterval(-3, 3));
    param2[0] = pair.getPoint();
    param2[1] = 1.0;
    hb.setIntercept(param2[0]);
    hb.setScale(param2[1]);
    f = pair.getValue();

    System.out.println();
    System.out.println("  Brent Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + hb.getIntercept() + "  A = " + hb.getScale());

    //Check UNCMIN values against results from plink.
    //        assertEquals("  Intercept test", 0.105526, hb.getIntercept(), 1e-4);
    //        assertEquals("  Scale test", 1.0, hb.getScale(), 1e-4);
    //
    //
    //        //Compare UMNCMIN and Brent optimizer results.
    //        assertEquals("  UNCMIN/Brent intercept test", param1[0], param2[0], 1e-6);
    //        assertEquals("  UNCMIN/Brent slope test", param1[1], param2[1], 1e-6);

}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMFitting.java

private PointValuePair runBoundedOptimiser(double[][] gr, double[] initialSolution, double[] lB, double[] uB,
        SumOfSquaresModelFunction function) {
    // Create the functions to optimise
    ObjectiveFunction objective = new ObjectiveFunction(new SumOfSquaresMultivariateFunction(function));
    ObjectiveFunctionGradient gradient = new ObjectiveFunctionGradient(
            new SumOfSquaresMultivariateVectorFunction(function));

    final boolean debug = false;

    // Try a BFGS optimiser since this will produce a deterministic solution and can respect bounds.
    PointValuePair optimum = null;//  w w  w  . ja  va2 s  . co m
    boundedEvaluations = 0;
    final MaxEval maxEvaluations = new MaxEval(2000);
    MultivariateOptimizer opt = null;
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            opt = new BFGSOptimizer();
            final double relativeThreshold = 1e-6;

            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lB.length];
            for (int i = 0; i < stepLength.length; i++)
                stepLength[i] = (uB[i] - lB[i]) * 0.3333333;

            // The GoalType is always minimise so no need to pass this in
            optimum = opt.optimize(maxEvaluations, gradient, objective,
                    new InitialGuess((optimum == null) ? initialSolution : optimum.getPointRef()),
                    new SimpleBounds(lB, uB), new BFGSOptimizer.GradientTolerance(relativeThreshold),
                    new BFGSOptimizer.StepLength(stepLength));
            if (debug)
                System.out.printf("BFGS Iter %d = %g (%d)\n", iteration, optimum.getValue(),
                        opt.getEvaluations());
        } catch (TooManyEvaluationsException e) {
            break; // No need to restart
        } catch (RuntimeException e) {
            break; // No need to restart
        } finally {
            boundedEvaluations += opt.getEvaluations();
        }
    }

    // Try a CMAES optimiser which is non-deterministic. To overcome this we perform restarts.

    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    double stopFitness = 0; //Double.NEGATIVE_INFINITY;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    RandomGenerator random = new Well44497b(); //Well19937c();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    double[] range = new double[lB.length];
    for (int i = 0; i < lB.length; i++)
        range[i] = (uB[i] - lB[i]) / 3;
    OptimizationData sigma = new CMAESOptimizer.Sigma(range);
    OptimizationData popSize = new CMAESOptimizer.PopulationSize(
            (int) (4 + Math.floor(3 * Math.log(initialSolution.length))));
    SimpleBounds bounds = new SimpleBounds(lB, uB);

    opt = new CMAESOptimizer(maxEvaluations.getMaxEval(), stopFitness, isActiveCMA, diagonalOnly,
            checkFeasableCount, random, generateStatistics, checker);
    // Restart the optimiser several times and take the best answer.
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            // Start from the initial solution
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(initialSolution), objective,
                    GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration,
                        constrainedSolution.getValue(), opt.getEvaluations());
            boundedEvaluations += opt.getEvaluations();
            if (optimum == null || constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
        if (optimum == null)
            continue;
        try {
            // Also restart from the current optimum
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(optimum.getPointRef()),
                    objective, GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration,
                        constrainedSolution.getValue(), opt.getEvaluations());
            if (constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
    }
    return optimum;
}

From source file:gdsc.smlm.ij.plugins.TraceDiffusion.java

/**
 * Fit the jump distance histogram using a cumulative sum as detailed in
 * <p>/*  w w  w.  java2 s  . co m*/
 * Update the plot by adding the fit line.
 * 
 * @param jumpDistances
 * @param jdHistogram
 * @param title
 * @param plot
 * @return
 */
private double[][] fitJumpDistance(StoredDataStatistics jumpDistances, double[][] jdHistogram, String title,
        Plot2 plot) {
    final double meanDistance = Math.sqrt(jumpDistances.getMean()) * 1e3;
    final double beta = meanDistance / precision;
    Utils.log(
            "Jump Distance analysis : N = %d, Time = %d frames (%s seconds). Mean Distance = %s nm, Precision = %s nm, Beta = %s",
            jumpDistances.getN(), settings.jumpDistance, Utils.rounded(settings.jumpDistance * exposureTime, 4),
            Utils.rounded(meanDistance, 4), Utils.rounded(precision, 4), Utils.rounded(beta, 4));
    int n = 0;
    int N = 10;
    double[] SS = new double[N];
    Arrays.fill(SS, -1);
    double[] ic = new double[N];
    Arrays.fill(ic, Double.POSITIVE_INFINITY);
    double[][] coefficients = new double[N][];
    double[][] fractions = new double[N][];
    double[][] fitParams = new double[N][];
    double bestIC = Double.POSITIVE_INFINITY;
    int best = -1;

    // Guess the D
    final double estimatedD = jumpDistances.getMean() / 4;
    Utils.log("Estimated D = %s um^2/s", Utils.rounded(estimatedD, 4));

    // Fit using a single population model
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    try {
        final JumpDistanceFunction function = new JumpDistanceFunction(jdHistogram[0], jdHistogram[1],
                estimatedD);
        PointVectorValuePair lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return function.jacobian(point);
                    }
                }), new ModelFunction(function), new Target(function.getY()), new Weight(function.getWeights()),
                new InitialGuess(function.guess()));

        fitParams[n] = lvmSolution.getPointRef();
        SS[n] = calculateSumOfSquares(function.getY(), lvmSolution.getValueRef());
        ic[n] = Maths.getInformationCriterion(SS[n], function.x.length, 1);
        coefficients[n] = fitParams[n];
        fractions[n] = new double[] { 1 };

        Utils.log("Fit Jump distance (N=%d) : D = %s um^2/s, SS = %f, IC = %s (%d evaluations)", n + 1,
                Utils.rounded(fitParams[n][0], 4), SS[n], Utils.rounded(ic[n], 4), optimizer.getEvaluations());

        bestIC = ic[n];
        best = 0;

        addToPlot(function, fitParams[n], jdHistogram, title, plot, Color.magenta);
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%d)", optimizer.getIterations());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }

    n++;

    // Fit using a mixed population model. 
    // Vary n from 2 to N. Stop when the fit fails or the fit is worse.
    int bestMulti = -1;
    double bestMultiIC = Double.POSITIVE_INFINITY;
    while (n < N) {
        // Uses a weighted sum of n exponential functions, each function models a fraction of the particles.
        // An LVM fit cannot restrict the parameters so the fractions do not go below zero.
        // Use the CMEASOptimizer which supports bounded fitting.

        MixedJumpDistanceFunctionMultivariate mixedFunction = new MixedJumpDistanceFunctionMultivariate(
                jdHistogram[0], jdHistogram[1], estimatedD, n + 1);

        double[] lB = mixedFunction.getLowerBounds();
        double[] uB = mixedFunction.getUpperBounds();
        SimpleBounds bounds = new SimpleBounds(lB, uB);

        int maxIterations = 2000;
        double stopFitness = 0; //Double.NEGATIVE_INFINITY;
        boolean isActiveCMA = true;
        int diagonalOnly = 20;
        int checkFeasableCount = 1;
        RandomGenerator random = new Well19937c();
        boolean generateStatistics = false;
        ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
        // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
        double[] s = new double[lB.length];
        for (int i = 0; i < s.length; i++)
            s[i] = (uB[i] - lB[i]) / 3;
        OptimizationData sigma = new CMAESOptimizer.Sigma(s);
        OptimizationData popSize = new CMAESOptimizer.PopulationSize(
                (int) (4 + Math.floor(3 * Math.log(mixedFunction.x.length))));

        // Iterate this for stability in the initial guess
        CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly,
                checkFeasableCount, random, generateStatistics, checker);

        int evaluations = 0;
        PointValuePair constrainedSolution = null;

        for (int i = 0; i <= settings.fitRestarts; i++) {
            // Try from the initial guess
            try {
                PointValuePair solution = opt.optimize(new InitialGuess(mixedFunction.guess()),
                        new ObjectiveFunction(mixedFunction), GoalType.MINIMIZE, bounds, sigma, popSize,
                        new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                if (constrainedSolution == null || solution.getValue() < constrainedSolution.getValue()) {
                    evaluations = opt.getEvaluations();
                    constrainedSolution = solution;
                    //Utils.log("[%da] Fit Jump distance (N=%d) : SS = %f (%d evaluations)", i, n + 1,
                    //      solution.getValue(), evaluations);
                }
            } catch (TooManyEvaluationsException e) {
            }

            if (constrainedSolution == null)
                continue;

            // Try from the current optimum
            try {
                PointValuePair solution = opt.optimize(new InitialGuess(constrainedSolution.getPointRef()),
                        new ObjectiveFunction(mixedFunction), GoalType.MINIMIZE, bounds, sigma, popSize,
                        new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                if (constrainedSolution == null || solution.getValue() < constrainedSolution.getValue()) {
                    evaluations = opt.getEvaluations();
                    constrainedSolution = solution;
                    //Utils.log("[%db] Fit Jump distance (N=%d) : SS = %f (%d evaluations)", i, n + 1,
                    //      solution.getValue(), evaluations);
                }
            } catch (TooManyEvaluationsException e) {
            }
        }

        if (constrainedSolution == null) {
            Utils.log("Failed to fit N=%d", n + 1);
            break;
        }

        fitParams[n] = constrainedSolution.getPointRef();
        SS[n] = constrainedSolution.getValue();

        // TODO - Try a bounded BFGS optimiser

        // Try and improve using a LVM fit
        final MixedJumpDistanceFunctionGradient mixedFunctionGradient = new MixedJumpDistanceFunctionGradient(
                jdHistogram[0], jdHistogram[1], estimatedD, n + 1);

        PointVectorValuePair lvmSolution;
        try {
            lvmSolution = optimizer.optimize(new MaxIter(3000), new MaxEval(Integer.MAX_VALUE),
                    new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                        public double[][] value(double[] point) throws IllegalArgumentException {
                            return mixedFunctionGradient.jacobian(point);
                        }
                    }), new ModelFunction(mixedFunctionGradient), new Target(mixedFunctionGradient.getY()),
                    new Weight(mixedFunctionGradient.getWeights()), new InitialGuess(fitParams[n]));
            double ss = calculateSumOfSquares(mixedFunctionGradient.getY(), lvmSolution.getValue());
            // All fitted parameters must be above zero
            if (ss < SS[n] && Maths.min(lvmSolution.getPoint()) > 0) {
                //Utils.log("  Re-fitting improved the SS from %s to %s (-%s%%)", Utils.rounded(SS[n], 4),
                //      Utils.rounded(ss, 4), Utils.rounded(100 * (SS[n] - ss) / SS[n], 4));
                fitParams[n] = lvmSolution.getPoint();
                SS[n] = ss;
                evaluations += optimizer.getEvaluations();
            }
        } catch (TooManyIterationsException e) {
            //Utils.log("Failed to re-fit : Too many evaluations (%d)", optimizer.getEvaluations());
        } catch (ConvergenceException e) {
            //Utils.log("Failed to re-fit : %s", e.getMessage());
        }

        // Since the fractions must sum to one we subtract 1 degree of freedom from the number of parameters
        ic[n] = Maths.getInformationCriterion(SS[n], mixedFunction.x.length, fitParams[n].length - 1);

        double[] d = new double[n + 1];
        double[] f = new double[n + 1];
        double sum = 0;
        for (int i = 0; i < d.length; i++) {
            f[i] = fitParams[n][i * 2];
            sum += f[i];
            d[i] = fitParams[n][i * 2 + 1];
        }
        for (int i = 0; i < f.length; i++)
            f[i] /= sum;
        // Sort by coefficient size
        sort(d, f);
        coefficients[n] = d;
        fractions[n] = f;

        Utils.log("Fit Jump distance (N=%d) : D = %s um^2/s (%s), SS = %f, IC = %s (%d evaluations)", n + 1,
                format(d), format(f), SS[n], Utils.rounded(ic[n], 4), evaluations);

        boolean valid = true;
        for (int i = 0; i < f.length; i++) {
            // Check the fit has fractions above the minimum fraction
            if (f[i] < minFraction) {
                Utils.log("Fraction is less than the minimum fraction: %s < %s", Utils.rounded(f[i]),
                        Utils.rounded(minFraction));
                valid = false;
                break;
            }
            // Check the coefficients are different
            if (i + 1 < f.length && d[i] / d[i + 1] < minDifference) {
                Utils.log("Coefficients are not different: %s / %s = %s < %s", Utils.rounded(d[i]),
                        Utils.rounded(d[i + 1]), Utils.rounded(d[i] / d[i + 1]), Utils.rounded(minDifference));
                valid = false;
                break;
            }
        }

        if (!valid)
            break;

        // Store the best model
        if (bestIC > ic[n]) {
            bestIC = ic[n];
            best = n;
        }

        // Store the best multi model
        if (bestMultiIC < ic[n]) {
            break;
        }

        bestMultiIC = ic[n];
        bestMulti = n;

        n++;
    }

    // Add the best fit to the plot and return the parameters.
    if (bestMulti > -1) {
        Function function = new MixedJumpDistanceFunctionMultivariate(jdHistogram[0], jdHistogram[1], 0,
                bestMulti + 1);
        addToPlot(function, fitParams[bestMulti], jdHistogram, title, plot, Color.yellow);
    }

    if (best > -1) {
        Utils.log("Best fit achieved using %d population%s: D = %s um^2/s, Fractions = %s", best + 1,
                (best == 0) ? "" : "s", format(coefficients[best]), format(fractions[best]));
    }

    return (best > -1) ? new double[][] { coefficients[best], fractions[best] } : null;
}

From source file:com.itemanalysis.psychometrics.irt.equating.StockingLordMethodTest.java

/**
 * This test uses a combination of 3PL and PCM items. Results compared to plink
 *
 * plink code://  w ww . j av a2  s .  c om
 *
 * library(plink)
 *
 * fX<-matrix(c(
 * 1, -3.188047976, 0,NA,NA,
 * 1,  1.031760328, 0,NA,NA,
 * 1,  0.819040914, 0,NA,NA,
 * 1, -2.706947360, 0,NA,NA,
 * 1, -0.094527077, 0,NA,NA,
 * 1,  0.689697135, 0,NA,NA,
 * 1, -0.551837153, 0,NA,NA,
 * 1, -0.359559276, 0,NA,NA,
 * 1, -1.451470831, -0.146619694, -0.636399040, 0.783018734),
 * nrow=9, byrow=TRUE)
 * fX<-as.data.frame(fX)
 * names(fX)<-c("aparam", "bparam","cparam","s1","s2")
 *
 * fY<-matrix(c(
 * 1,-3.074599226,0,NA,NA,
 * 1,1.01282435,0,NA,NA,
 * 1,0.868538408,0,NA,NA,
 * 1,-2.404483603,0,NA,NA,
 * 1,0.037402866,0,NA,NA,
 * 1,0.70074742,0,NA,NA,
 * 1,-0.602555046,0,NA,NA,
 * 1,-0.350426446,0,NA,NA,
 * 1,-1.267744832,-0.185885988,-0.61535623,0.801242218),
 * nrow=9, byrow=TRUE)
 * fY<-as.data.frame(fY)
 * names(fY)<-c("aparam", "bparam","cparam","s1","s2")
 *
 * common<-cbind(1:9, 1:9)
 * cat<-c(rep(2,8),4)
 *
 * pmX <- as.poly.mod(9,c("drm","gpcm"),list(1:8,9))
 * pmY <- as.poly.mod(9,c("drm","gpcm"),list(1:8,9))
 *
 * pars <- as.irt.pars(list(fx=fX,fy=fY), common, cat=list(fx=cat,fy=cat),
 * poly.mod=list(pmX,pmY), location=c(TRUE,TRUE))
 *
 * plink(pars, startvals=c(1,0), rescale="SL", base.grp=2, D=1.0, symmetric=TRUE)
 *
 *
 */
@Test
public void mixedFormtRaschPCMTest() {
    System.out.println("Mixed format Stocking-Lord test: Rasch and PCM");

    LinkedHashMap<String, ItemResponseModel> itemFormX = new LinkedHashMap<String, ItemResponseModel>();
    LinkedHashMap<String, ItemResponseModel> itemFormY = new LinkedHashMap<String, ItemResponseModel>();

    itemFormX.put("Item1", new Irm3PL(-3.188047976, 1.0));
    itemFormX.put("Item2", new Irm3PL(1.031760328, 1.0));
    itemFormX.put("Item3", new Irm3PL(0.819040914, 1.0));
    itemFormX.put("Item4", new Irm3PL(-2.706947360, 1.0));
    itemFormX.put("Item5", new Irm3PL(-0.094527077, 1.0));
    itemFormX.put("Item6", new Irm3PL(0.689697135, 1.0));
    itemFormX.put("Item7", new Irm3PL(-0.551837153, 1.0));
    itemFormX.put("Item8", new Irm3PL(-0.359559276, 1.0));
    double[] step1x = { -0.146619694, -0.636399040, 0.783018734 };
    itemFormX.put("Item9", new IrmPCM(-1.451470831, step1x, 1.0));

    itemFormY.put("Item1", new Irm3PL(-3.074599226, 1.0));
    itemFormY.put("Item2", new Irm3PL(1.012824350, 1.0));
    itemFormY.put("Item3", new Irm3PL(0.868538408, 1.0));
    itemFormY.put("Item4", new Irm3PL(-2.404483603, 1.0));
    itemFormY.put("Item5", new Irm3PL(0.037402866, 1.0));
    itemFormY.put("Item6", new Irm3PL(0.700747420, 1.0));
    itemFormY.put("Item7", new Irm3PL(-0.602555046, 1.0));
    itemFormY.put("Item8", new Irm3PL(-0.350426446, 1.0));
    double[] step1y = { -0.185885988, -0.61535623, 0.801242218 };
    itemFormY.put("Item9", new IrmPCM(-1.267744832, step1y, 1.0));

    double f = 0;
    double[] param1 = new double[2];
    double[] param2 = new double[2];

    UniformDistributionApproximation distX = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default
    UniformDistributionApproximation distY = new UniformDistributionApproximation(-4.0, 4.0, 161);//plink default

    StockingLordMethod sl = new StockingLordMethod(itemFormX, itemFormY, distX, distY,
            EquatingCriterionType.Q1Q2);
    sl.setPrecision(6);
    double[] startValues = { 0.0 };//Using a start value for the intercept only for the Rasch family of models.

    DefaultUncminOptimizer optimizer = new DefaultUncminOptimizer();

    try {
        optimizer.minimize(sl, startValues);
        double[] r = optimizer.getParameters();
        param1[0] = r[0];
        param1[1] = 1.0;
        f = optimizer.getFunctionValue();
        sl.setIntercept(param1[0]);
        sl.setScale(param1[1]);

    } catch (UncminException ex) {
        ex.printStackTrace();
    }

    //Check UNCMIN values against results from plink.
    System.out.println("  UNCMIN Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + sl.getIntercept() + "  A = " + sl.getScale());

    assertEquals("  Intercept test", 0.101388, sl.getIntercept(), 1e-4);//True results from plink package in R
    assertEquals("  Scale test", 1.0, sl.getScale(), 1e-4);

    //Check Brent optimizer values against plink
    BrentOptimizer brentOptimizer = new BrentOptimizer(1e-8, 1e-8);
    UnivariatePointValuePair pair = brentOptimizer.optimize(new MaxEval(200),
            new UnivariateObjectiveFunction(sl), GoalType.MINIMIZE, new SearchInterval(-3, 3));
    param2[0] = pair.getPoint();
    param2[1] = 1.0;
    sl.setIntercept(param2[0]);
    sl.setScale(param2[1]);
    f = pair.getValue();

    System.out.println();
    System.out.println("  Brent Optimization");
    System.out.println("  Iterations: ");
    System.out.println("  fmin: " + f);
    System.out.println("  B = " + sl.getIntercept() + "  A = " + sl.getScale());

    //Check UNCMIN values against results from plink.
    assertEquals("  Intercept test", 0.101388, sl.getIntercept(), 1e-4);
    assertEquals("  Scale test", 1.0, sl.getScale(), 1e-4);

    //Compare UMNCMIN and Brent optimizer results.
    assertEquals("  UNCMIN/Brent intercept test", param1[0], param2[0], 1e-6);
    assertEquals("  UNCMIN/Brent slope test", param1[1], param2[1], 1e-6);

}