Example usage for org.apache.commons.math3.util Precision SAFE_MIN

List of usage examples for org.apache.commons.math3.util Precision SAFE_MIN

Introduction

In this page you can find the example usage for org.apache.commons.math3.util Precision SAFE_MIN.

Prototype

double SAFE_MIN

To view the source code for org.apache.commons.math3.util Precision SAFE_MIN.

Click Source Link

Document

Safe minimum, such that 1 / SAFE_MIN does not overflow.

Usage

From source file:gdsc.smlm.fitting.nonlinear.ApacheLVMFitter.java

public FitStatus fit(int n, double[] y, double[] y_fit, double[] a, double[] a_dev, double[] error,
        double noise) {
    numberOfFittedPoints = n;/*from  w  ww.  ja  va  2s. c o m*/

    try {
        // Different convergence thresholds seem to have no effect on the resulting fit, only the number of
        // iterations for convergence
        final double initialStepBoundFactor = 100;
        final double costRelativeTolerance = 1e-10;
        final double parRelativeTolerance = 1e-10;
        final double orthoTolerance = 1e-10;
        final double threshold = Precision.SAFE_MIN;

        // Extract the parameters to be fitted
        final double[] initialSolution = getInitialSolution(a);

        // TODO - Pass in more advanced stopping criteria.

        // Create the target and weight arrays
        final double[] yd = new double[n];
        final double[] w = new double[n];
        for (int i = 0; i < n; i++) {
            yd[i] = y[i];
            w[i] = 1;
        }

        LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
                costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);
        PointVectorValuePair optimum = optimizer.optimize(new MaxIter(getMaxEvaluations()),
                new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunctionWrapper(f, a, n)),
                new ModelFunction(new MultivariateVectorFunctionWrapper(f, a, n)), new Target(yd),
                new Weight(w), new InitialGuess(initialSolution));

        final double[] parameters = optimum.getPoint();
        setSolution(a, parameters);
        iterations = optimizer.getIterations();
        evaluations = optimizer.getEvaluations();
        if (a_dev != null) {
            double[][] covar = optimizer.computeCovariances(parameters, threshold);
            setDeviations(a_dev, covar);
        }
        // Compute fitted function if desired 
        if (y_fit != null) {
            f.initialise(a);
            for (int i = 0; i < n; i++)
                y_fit[i] = f.eval(i);
        }

        residualSumOfSquares = error[0] = optimizer.getChiSquare();
        totalSumOfSquares = getSumOfSquares(n, y);
    } catch (TooManyEvaluationsException e) {
        return FitStatus.FAILED_TO_CONVERGE;
    } catch (ConvergenceException e) {
        // Occurs when QR decomposition fails - mark as a singular non-linear model (no solution)
        return FitStatus.SINGULAR_NON_LINEAR_MODEL;
    } catch (Exception e) {
        // TODO - Find out the other exceptions from the fitter and add return values to match. 
        return FitStatus.UNKNOWN;
    }

    return FitStatus.OK;
}

From source file:gdsc.smlm.ij.plugins.BlinkEstimator.java

/**
 * Fit the dark time to counts of molecules curve. Only use the first n fitted points.
 * <p>/*from ww  w.j  a va2 s. co  m*/
 * Calculates:<br/>
 * N = The number of photoblinking molecules in the sample<br/>
 * nBlink = The average number of blinks per flourophore<br/>
 * tOff = The off-time
 * 
 * @param td
 *            The dark time
 * @param ntd
 *            The counts of molecules
 * @param nFittedPoints
 * @param log
 *            Write the fitting results to the ImageJ log window
 * @return The fitted parameters [N, nBlink, tOff], or null if no fit was possible
 */
public double[] fit(double[] td, double[] ntd, int nFittedPoints, boolean log) {
    blinkingModel = new BlinkingFunction();
    blinkingModel.setLogging(true);
    for (int i = 0; i < nFittedPoints; i++)
        blinkingModel.addPoint(td[i], ntd[i]);

    // Different convergence thresholds seem to have no effect on the resulting fit, only the number of
    // iterations for convergence
    double initialStepBoundFactor = 100;
    double costRelativeTolerance = 1e-6;
    double parRelativeTolerance = 1e-6;
    double orthoTolerance = 1e-6;
    double threshold = Precision.SAFE_MIN;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(initialStepBoundFactor,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);
    try {
        double[] obs = blinkingModel.getY();

        PointVectorValuePair optimum = optimizer.optimize(new MaxIter(1000), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return blinkingModel.jacobian(point);
                    }
                }), new ModelFunction(blinkingModel), new Target(obs), new Weight(blinkingModel.getWeights()),
                new InitialGuess(new double[] { ntd[0], 0.1, td[1] }));

        blinkingModel.setLogging(false);

        double[] parameters = optimum.getPoint();

        double[] exp = optimum.getValue();
        double mean = 0;
        for (double d : obs)
            mean += d;
        mean /= obs.length;
        double ssResiduals = 0, ssTotal = 0;
        for (int i = 0; i < obs.length; i++) {
            ssResiduals += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            ssTotal += (obs[i] - mean) * (obs[i] - mean);
        }

        r2 = 1 - ssResiduals / ssTotal;
        adjustedR2 = getAdjustedCoefficientOfDetermination(ssResiduals, ssTotal, obs.length, parameters.length);

        if (log) {
            Utils.log("  Fit %d points. R^2 = %s. Adjusted R^2 = %s", obs.length, Utils.rounded(r2, 4),
                    Utils.rounded(adjustedR2, 4));
            Utils.log("  N=%s, nBlink=%s, tOff=%s (%s frames)", Utils.rounded(parameters[0], 4),
                    Utils.rounded(parameters[1], 4), Utils.rounded(parameters[2], 4),
                    Utils.rounded(parameters[2] / msPerFrame, 4));
        }

        return parameters;
    } catch (TooManyIterationsException e) {
        if (log)
            Utils.log("  Failed to fit %d points: Too many iterations (%d)", blinkingModel.size(),
                    optimizer.getIterations());
        return null;
    } catch (ConvergenceException e) {
        if (log)
            Utils.log("  Failed to fit %d points", blinkingModel.size());
        return null;
    }
}

From source file:edu.cudenver.bios.power.glmm.GLMMTestUnivariateRepeatedMeasures.java

/**
 * Ensure that the within subject contrast is orthonormal for all
 * UNIREP tests/*from   ww w . j av a2 s  . com*/
 */
protected void createOrthonormalU() {
    RealMatrix UtU = U.transpose().multiply(U);
    double upperLeft = UtU.getEntry(0, 0);
    if (upperLeft != 0)
        UtU = UtU.scalarMultiply(1 / upperLeft);

    RealMatrix diffFromIdentity = UtU.subtract(
            org.apache.commons.math3.linear.MatrixUtils.createRealIdentityMatrix(UtU.getRowDimension()));

    // get maximum absolute value in U'U
    double maxValue = Double.NEGATIVE_INFINITY;
    for (int r = 0; r < diffFromIdentity.getRowDimension(); r++) {
        for (int c = 0; c < diffFromIdentity.getColumnDimension(); c++) {
            double entryVal = Math.abs(diffFromIdentity.getEntry(r, c));
            if (entryVal > maxValue)
                maxValue = entryVal;
        }
    }

    if (maxValue > Precision.SAFE_MIN) {
        // U'U matrix deviates from identity, so create a U matrix that is orthonormal
        // TODO: thus UNIREP tests may use a different U matrix than HLT/PBT/WLR tests???
        // TODO: displayed matrix results are incorrect now?
        U = new GramSchmidtOrthonormalization(U).getQ();
        debug("U replaced by orthonormal", U);
    }
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.DoseResponseLMOptimizer.java

/**
 * Copy of super source code, because of inaccessible super InternalData
 * class. Determines the Levenberg-Marquardt parameter.
 *
 * <p>/*from w  w w  . j a  va 2s  . co  m*/
 * This implementation is a translation in Java of the MINPACK
 * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
 * routine.</p>
 * <p>
 * This method sets the lmPar and lmDir attributes.</p>
 * <p>
 * The authors of the original fortran function are:</p>
 * <ul>
 * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
 * <li>Burton S. Garbow</li>
 * <li>Kenneth E. Hillstrom</li>
 * <li>Jorge J. More</li>
 * </ul>
 * <p>
 * Luc Maisonobe did the Java translation.</p>
 *
 * @param qy Array containing qTy.
 * @param delta Upper bound on the euclidean norm of diagR * lmDir.
 * @param diag Diagonal matrix.
 * @param internalData Data (modified in-place in this method).
 * @param solvedCols Number of solved point.
 * @param work1 work array
 * @param work2 work array
 * @param work3 work array
 * @param lmDir the "returned" LM direction will be stored in this array.
 * @param lmPar the value of the LM parameter from the previous iteration.
 * @return the new LM parameter
 */
private double determineLMParameter(double[] qy, double delta, double[] diag, InternalData internalData,
        int solvedCols, double[] work1, double[] work2, double[] work3, double[] lmDir, double lmPar) {
    final double[][] weightedJacobian = internalData.weightedJacobian;
    final int[] permutation = internalData.permutation;
    final int rank = internalData.rank;
    final double[] diagR = internalData.diagR;

    final int nC = weightedJacobian[0].length;

    // compute and store in x the gauss-newton direction, if the
    // jacobian is rank-deficient, obtain a least squares solution
    for (int j = 0; j < rank; ++j) {
        lmDir[permutation[j]] = qy[j];
    }
    for (int j = rank; j < nC; ++j) {
        lmDir[permutation[j]] = 0;
    }
    for (int k = rank - 1; k >= 0; --k) {
        int pk = permutation[k];
        double ypk = lmDir[pk] / diagR[pk];
        for (int i = 0; i < k; ++i) {
            lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
        }
        lmDir[pk] = ypk;
    }

    // evaluate the function at the origin, and test
    // for acceptance of the Gauss-Newton direction
    double dxNorm = 0;
    for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double s = diag[pj] * lmDir[pj];
        work1[pj] = s;
        dxNorm += s * s;
    }
    dxNorm = FastMath.sqrt(dxNorm);
    double fp = dxNorm - delta;
    if (fp <= 0.1 * delta) {
        lmPar = 0;
        return lmPar;
    }

    // if the jacobian is not rank deficient, the Newton step provides
    // a lower bound, parl, for the zero of the function,
    // otherwise set this bound to zero
    double sum2;
    double parl = 0;
    if (rank == solvedCols) {
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            work1[pj] *= diag[pj] / dxNorm;
        }
        sum2 = 0;
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            double sum = 0;
            for (int i = 0; i < j; ++i) {
                sum += weightedJacobian[i][pj] * work1[permutation[i]];
            }
            double s = (work1[pj] - sum) / diagR[pj];
            work1[pj] = s;
            sum2 += s * s;
        }
        parl = fp / (delta * sum2);
    }

    // calculate an upper bound, paru, for the zero of the function
    sum2 = 0;
    for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double sum = 0;
        for (int i = 0; i <= j; ++i) {
            sum += weightedJacobian[i][pj] * qy[i];
        }
        sum /= diag[pj];
        sum2 += sum * sum;
    }
    double gNorm = FastMath.sqrt(sum2);
    double paru = gNorm / delta;
    if (paru == 0) {
        paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
    }

    // if the input par lies outside of the interval (parl,paru),
    // set par to the closer endpoint
    lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
    if (lmPar == 0) {
        lmPar = gNorm / dxNorm;
    }

    for (int countdown = 10; countdown >= 0; --countdown) {

        // evaluate the function at the current value of lmPar
        if (lmPar == 0) {
            lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
        }
        double sPar = FastMath.sqrt(lmPar);
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            work1[pj] = sPar * diag[pj];
        }
        determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);

        dxNorm = 0;
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            double s = diag[pj] * lmDir[pj];
            work3[pj] = s;
            dxNorm += s * s;
        }
        dxNorm = FastMath.sqrt(dxNorm);
        double previousFP = fp;
        fp = dxNorm - delta;

        // if the function is small enough, accept the current value
        // of lmPar, also test for the exceptional cases where parl is zero
        if (FastMath.abs(fp) <= 0.1 * delta || (parl == 0 && fp <= previousFP && previousFP < 0)) {
            return lmPar;
        }

        // compute the Newton correction
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            work1[pj] = work3[pj] * diag[pj] / dxNorm;
        }
        for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            work1[pj] /= work2[j];
            double tmp = work1[pj];
            for (int i = j + 1; i < solvedCols; ++i) {
                work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
            }
        }
        sum2 = 0;
        for (int j = 0; j < solvedCols; ++j) {
            double s = work1[permutation[j]];
            sum2 += s * s;
        }
        double correction = fp / (delta * sum2);

        // depending on the sign of the function, update parl or paru.
        if (fp > 0) {
            parl = FastMath.max(parl, lmPar);
        } else if (fp < 0) {
            paru = FastMath.min(paru, lmPar);
        }

        // compute an improved estimate for lmPar
        lmPar = FastMath.max(parl, lmPar + correction);
    }

    return lmPar;
}

From source file:experiment.SimpleRegression_bug.java

/**
 * Performs a regression on data present in buffers and outputs a RegressionResults object.
 *
 * <p>If there are fewer than 3 observations in the model and {@code hasIntercept} is true
 * a {@code NoDataException} is thrown.  If there is no intercept term, the model must
 * contain at least 2 observations.</p>
 *
 * @return RegressionResults acts as a container of regression output
 * @throws ModelSpecificationException if the model is not correctly specified
 * @throws NoDataException if there is not sufficient data in the model to
 * estimate the regression parameters//  ww  w .j  a va  2 s  .  c  o m
 */
public RegressionResults regress() throws ModelSpecificationException, NoDataException {
    if (hasIntercept) {
        if (n < 3) {
            throw new NoDataException(LocalizedFormats.NOT_ENOUGH_DATA_REGRESSION);
        }
        if (FastMath.abs(sumXX) > Precision.SAFE_MIN) {
            final double[] params = new double[] { getIntercept(), getSlope() };
            final double mse = getMeanSquareError();
            final double _syy = sumYY + sumY * sumY / n;
            final double[] vcv = new double[] { mse * (xbar * xbar / sumXX + 1.0 / n), -xbar * mse / sumXX,
                    mse / sumXX };
            return new RegressionResults(params, new double[][] { vcv }, true, n, 2, sumY, _syy,
                    getSumSquaredErrors(), true, false);
        } else {
            final double[] params = new double[] { sumY / n, Double.NaN };
            //final double mse = getMeanSquareError();
            final double[] vcv = new double[] { ybar / (n - 1.0), Double.NaN, Double.NaN };
            return new RegressionResults(params, new double[][] { vcv }, true, n, 1, sumY, sumYY,
                    getSumSquaredErrors(), true, false);
        }
    } else {
        if (n < 2) {
            throw new NoDataException(LocalizedFormats.NOT_ENOUGH_DATA_REGRESSION);
        }
        if (!Double.isNaN(sumXX)) {
            final double[] vcv = new double[] { getMeanSquareError() / sumXX };
            final double[] params = new double[] { sumXY / sumXX };
            return new RegressionResults(params, new double[][] { vcv }, true, n, 1, sumY, sumYY,
                    getSumSquaredErrors(), false, false);
        } else {
            final double[] vcv = new double[] { Double.NaN };
            final double[] params = new double[] { Double.NaN };
            return new RegressionResults(params, new double[][] { vcv }, true, n, 1, Double.NaN, Double.NaN,
                    Double.NaN, false, false);
        }
    }
}

From source file:gdsc.utils.Cell_Outliner.java

/**
 * Find an ellipse that optimises the fit to the polygon detected edges.
 * /*from   w  w w.jav  a2s  .co  m*/
 * @param roi
 * @param params
 * @param weightMap
 * @param angle
 * @return
 */
private double[] fitPolygonalCell(PolygonRoi roi, double[] params, FloatProcessor weightMap,
        FloatProcessor angle) {
    // Get an estimate of the starting parameters using the current polygon
    double[] startPoint = params;
    startPoint = estimateStartPoint(roi, weightMap.getWidth(), weightMap.getHeight());

    int maxEval = 2000;
    final DifferentiableEllipticalFitFunction func = new DifferentiableEllipticalFitFunction(roi, weightMap);

    double relativeThreshold = 100 * Precision.EPSILON;
    double absoluteThreshold = 100 * Precision.SAFE_MIN;
    ConvergenceChecker<PointVectorValuePair> checker = new SimplePointChecker<PointVectorValuePair>(
            relativeThreshold, absoluteThreshold);
    double initialStepBoundFactor = 10;
    double costRelativeTolerance = 1e-10;
    double parRelativeTolerance = 1e-10;
    double orthoTolerance = 1e-10;
    double threshold = Precision.SAFE_MIN;

    LevenbergMarquardtOptimizer optimiser = new LevenbergMarquardtOptimizer(initialStepBoundFactor, checker,
            costRelativeTolerance, parRelativeTolerance, orthoTolerance, threshold);

    try {
        PointVectorValuePair solution = optimiser.optimize(new MaxIter(maxEval), new MaxEval(Integer.MAX_VALUE),
                new ModelFunctionJacobian(new MultivariateMatrixFunction() {
                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return func.jacobian(point);
                    }
                }), new ModelFunction(func), new Target(func.calculateTarget()),
                new Weight(func.calculateWeights()), new InitialGuess(startPoint));

        if (debug)
            IJ.log(String.format("Eval = %d (Iter = %d), RMS = %f", optimiser.getEvaluations(),
                    optimiser.getIterations(), optimiser.getRMS()));
        return solution.getPointRef();
    } catch (Exception e) {
        IJ.log("Failed to find an elliptical solution, defaulting to polygon");
        e.printStackTrace();
    }

    return null;
}

From source file:org.drugis.mtc.yadas.MultivariateGaussian.java

public double compute(double[] x, double[] mu, double[][] sigma) {
    int d = x.length;
    if (d != mu.length || d != sigma.length) {
        throw new IllegalArgumentException("All arguments need to be of equal length");
    }// w  w  w .  j av  a2s  .  c  o  m

    Array2DRowRealMatrix sigmaM = new Array2DRowRealMatrix(sigma);
    Array2DRowRealMatrix xM = new Array2DRowRealMatrix(x);
    Array2DRowRealMatrix muM = new Array2DRowRealMatrix(mu);

    // Return the log of:
    // 1/sqrt(2pi^d * det(sigma)) * e^(-.5 (x - mu)' inv(sigma) (x - mu))
    // Which is:
    // -log(sqrt(2pi^d * det(sigma))) + -.5 (x - mu)' inv(sigma) (x - mu) 
    Array2DRowRealMatrix dM = xM.subtract(muM);
    LUDecomposition sigmaD = new LUDecomposition(sigmaM, Precision.SAFE_MIN);
    try {
        RealMatrix sigmaInv = sigmaD.getSolver().getInverse();
        return -0.5 * (Math.log(2 * Math.PI) * d + Math.log(sigmaD.getDeterminant())
                + dM.transpose().multiply(sigmaInv).multiply(dM).getEntry(0, 0));
    } catch (RuntimeException e) {
        System.out.println(sigmaM);
        throw e;
    }
    /*      RealMatrix sigmaInv = sigmaM.inverse();
          return -0.5 * (
             Math.log(2 * Math.PI) * d + Math.log(sigmaM.getDeterminant()) +
             dM.transpose().multiply(sigmaInv).multiply(dM).getEntry(0, 0)); */
}

From source file:org.orekit.bodies.Ellipsoid.java

/** Compute the 2D ellipse at the intersection of the 3D ellipsoid and a plane.
 * @param planePoint point belonging to the plane, in the ellipsoid frame
 * @param planeNormal normal of the plane, in the ellipsoid frame
 * @return plane section or null if there are no intersections
 *///w  ww.j a v a  2  s. c o m
public Ellipse getPlaneSection(final Vector3D planePoint, final Vector3D planeNormal) {

    // we define the points Q in the plane using two free variables  and  as:
    // Q = P +  u +  v
    // where u and v are two unit vectors belonging to the plane
    // Q belongs to the 3D ellipsoid so:
    // (xQ / a) + (yQ / b) + (zQ / c) = 1
    // combining both equations, we get:
    //   (xP + 2 xP ( xU +  xV) + ( xU +  xV)) / a
    // + (yP + 2 yP ( yU +  yV) + ( yU +  yV)) / b
    // + (zP + 2 zP ( zU +  zV) + ( zU +  zV)) / c
    // = 1
    // which can be rewritten:
    //   +   + 2   + 2   + 2   +  = 0
    // with
    //  =  xU  / a +  yU  / b +  zU  / c > 0
    //  =  xV  / a +  yV  / b +  zV  / c > 0
    //  = xU xV / a + yU yV / b + zU zV / c
    //  = xP xU / a + yP yU / b + zP zU / c
    //  = xP xV / a + yP yV / b + zP zV / c
    //  =  xP  / a +  yP  / b +  zP  / c - 1
    // this is the equation of a conic (here an ellipse)
    // Of course, we note that if the point P belongs to the ellipsoid
    // then  = 0 and the equation holds at point P since  = 0 and  = 0
    final Vector3D u = planeNormal.orthogonal();
    final Vector3D v = Vector3D.crossProduct(planeNormal, u).normalize();
    final double xUOa = u.getX() / a;
    final double yUOb = u.getY() / b;
    final double zUOc = u.getZ() / c;
    final double xVOa = v.getX() / a;
    final double yVOb = v.getY() / b;
    final double zVOc = v.getZ() / c;
    final double xPOa = planePoint.getX() / a;
    final double yPOb = planePoint.getY() / b;
    final double zPOc = planePoint.getZ() / c;
    final double alpha = xUOa * xUOa + yUOb * yUOb + zUOc * zUOc;
    final double beta = xVOa * xVOa + yVOb * yVOb + zVOc * zVOc;
    final double gamma = MathArrays.linearCombination(xUOa, xVOa, yUOb, yVOb, zUOc, zVOc);
    final double delta = MathArrays.linearCombination(xPOa, xUOa, yPOb, yUOb, zPOc, zUOc);
    final double epsilon = MathArrays.linearCombination(xPOa, xVOa, yPOb, yVOb, zPOc, zVOc);
    final double zeta = MathArrays.linearCombination(xPOa, xPOa, yPOb, yPOb, zPOc, zPOc, 1, -1);

    // reduce the general equation   +   + 2   + 2   + 2   +  = 0
    // to canonical form (/l) + (/m) = 1
    // using a coordinates change
    //        = C +  cos -  sin
    //        = C +  sin +  cos
    // or equivalently
    //        =   ( - C) cos + ( - C) sin
    //        = - ( - C) sin + ( - C) cos
    // C and C are the coordinates of the 2D ellipse center with respect to P
    // 2l and 2m and are the axes lengths (major or minor depending on which one is greatest)
    //  is the angle of the 2D ellipse axis corresponding to axis with length 2l

    // choose  in order to cancel the coupling term in 
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with C = 2[( - ) cos sin +  (cos - sin)]
    // hence the term is cancelled when  = arctan(t), with  t + ( - ) t -  = 0
    // As the solutions of the quadratic equation obey t?t = -1, they correspond to
    // angles  in quadrature to each other. Selecting one solution or the other simply
    // exchanges the principal axes. As we don't care about which axis we want as the
    // first one, we select an arbitrary solution
    final double tanTheta;
    if (FastMath.abs(gamma) < Precision.SAFE_MIN) {
        tanTheta = 0.0;
    } else {
        final double bMA = beta - alpha;
        tanTheta = (bMA >= 0) ? (-2 * gamma / (bMA + FastMath.sqrt(bMA * bMA + 4 * gamma * gamma)))
                : (-2 * gamma / (bMA - FastMath.sqrt(bMA * bMA + 4 * gamma * gamma)));
    }
    final double tan2 = tanTheta * tanTheta;
    final double cos2 = 1 / (1 + tan2);
    final double sin2 = tan2 * cos2;
    final double cosSin = tanTheta * cos2;
    final double cos = FastMath.sqrt(cos2);
    final double sin = tanTheta * cos;

    // choose C and C in order to cancel the linear terms in  and 
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with D = 2[ ( C +  C + ) cos + ( C +  C + ) sin]
    //      E = 2[-( C +  C + ) sin + ( C +  C + ) cos]
    //  can be eliminated by combining the equations
    // D cos - E sin = 2[ C +  C + ]
    // E cos + D sin = 2[ C +  C + ]
    // hence the terms D and E are both cancelled (regardless of ) when
    //     C = (  -  ) / ( -  )
    //     C = (  -  ) / ( -  )
    final double denom = MathArrays.linearCombination(gamma, gamma, -alpha, beta);
    final double tauC = MathArrays.linearCombination(beta, delta, -gamma, epsilon) / denom;
    final double nuC = MathArrays.linearCombination(alpha, epsilon, -gamma, delta) / denom;

    // compute l and m
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with A =  cos +  sin + 2  cos sin
    //      B =  sin +  cos - 2  cos sin
    //      F =  C +  C + 2  C C + 2  C + 2  C + 
    // hence we compute directly l = (-F/A) and m = (-F/B)
    final double twogcs = 2 * gamma * cosSin;
    final double bigA = alpha * cos2 + beta * sin2 + twogcs;
    final double bigB = alpha * sin2 + beta * cos2 - twogcs;
    final double bigF = (alpha * tauC + 2 * (gamma * nuC + delta)) * tauC + (beta * nuC + 2 * epsilon) * nuC
            + zeta;
    final double l = FastMath.sqrt(-bigF / bigA);
    final double m = FastMath.sqrt(-bigF / bigB);
    if (Double.isNaN(l + m)) {
        // the plane does not intersect the ellipsoid
        return null;
    }

    if (l > m) {
        return new Ellipse(new Vector3D(1, planePoint, tauC, u, nuC, v), new Vector3D(cos, u, sin, v),
                new Vector3D(-sin, u, cos, v), l, m, frame);
    } else {
        return new Ellipse(new Vector3D(1, planePoint, tauC, u, nuC, v), new Vector3D(sin, u, -cos, v),
                new Vector3D(cos, u, sin, v), m, l, frame);
    }

}

From source file:org.orekit.forces.BoxAndSolarArraySpacecraft.java

/** {@inheritDoc} */
public Vector3D radiationPressureAcceleration(final AbsoluteDate date, final Frame frame,
        final Vector3D position, final Rotation rotation, final double mass, final Vector3D flux)
        throws OrekitException {

    if (flux.getNormSq() < Precision.SAFE_MIN) {
        // null illumination (we are probably in umbra)
        return Vector3D.ZERO;
    }/* w  w w  .  j av  a2  s.com*/

    // radiation flux in spacecraft frame
    final Vector3D fluxSat = rotation.applyTo(flux);

    // solar array contribution
    Vector3D normal = getNormal(date, frame, position, rotation);
    double dot = Vector3D.dotProduct(normal, fluxSat);
    if (dot > 0) {
        // the solar array is illuminated backward,
        // fix signs to compute contribution correctly
        dot = -dot;
        normal = normal.negate();
    }
    Vector3D force = facetRadiationAcceleration(normal, solarArrayArea, fluxSat, dot);

    // body facets contribution
    for (final Facet bodyFacet : facets) {
        normal = bodyFacet.getNormal();
        dot = Vector3D.dotProduct(normal, fluxSat);
        if (dot < 0) {
            // the facet intercepts the incoming flux
            force = force.add(facetRadiationAcceleration(normal, bodyFacet.getArea(), fluxSat, dot));
        }
    }

    // convert to inertial frame
    return rotation.applyInverseTo(new Vector3D(1.0 / mass, force));

}

From source file:org.orekit.forces.BoxAndSolarArraySpacecraft.java

/** {@inheritDoc} */
public FieldVector3D<DerivativeStructure> radiationPressureAcceleration(final AbsoluteDate date,
        final Frame frame, final FieldVector3D<DerivativeStructure> position,
        final FieldRotation<DerivativeStructure> rotation, final DerivativeStructure mass,
        final FieldVector3D<DerivativeStructure> flux) throws OrekitException {

    if (flux.getNormSq().getValue() < Precision.SAFE_MIN) {
        // null illumination (we are probably in umbra)
        return new FieldVector3D<DerivativeStructure>(0.0, flux);
    }/*from  w  w  w. j a v a 2 s  .com*/

    // radiation flux in spacecraft frame
    final FieldVector3D<DerivativeStructure> fluxSat = rotation.applyTo(flux);

    // solar array contribution
    FieldVector3D<DerivativeStructure> normal = getNormal(date, frame, position, rotation);
    DerivativeStructure dot = FieldVector3D.dotProduct(normal, fluxSat);
    if (dot.getValue() > 0) {
        // the solar array is illuminated backward,
        // fix signs to compute contribution correctly
        dot = dot.negate();
        normal = normal.negate();
    }
    FieldVector3D<DerivativeStructure> force = facetRadiationAcceleration(normal, solarArrayArea, fluxSat, dot);

    // body facets contribution
    for (final Facet bodyFacet : facets) {
        normal = new FieldVector3D<DerivativeStructure>(mass.getField().getOne(), bodyFacet.getNormal());
        dot = FieldVector3D.dotProduct(normal, fluxSat);
        if (dot.getValue() < 0) {
            // the facet intercepts the incoming flux
            force = force.add(facetRadiationAcceleration(normal, bodyFacet.getArea(), fluxSat, dot));
        }
    }

    // convert to inertial frame
    return rotation.applyInverseTo(new FieldVector3D<DerivativeStructure>(mass.reciprocal(), force));

}