Example usage for org.apache.commons.math3.linear DiagonalMatrix DiagonalMatrix

List of usage examples for org.apache.commons.math3.linear DiagonalMatrix DiagonalMatrix

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear DiagonalMatrix DiagonalMatrix.

Prototype

public DiagonalMatrix(final double[] d) 

Source Link

Document

Creates a matrix using the input array as the underlying data.

Usage

From source file:com.opengamma.strata.math.impl.regression.WeightedLeastSquaresRegression.java

public LeastSquaresRegressionResult regress(double[][] x, double[] weights, double[] y, boolean useIntercept) {
    if (weights == null) {
        throw new IllegalArgumentException("Cannot perform WLS regression without an array of weights");
    }//  w  w w. j  ava2s.c o m
    checkData(x, weights, y);
    double[][] dep = addInterceptVariable(x, useIntercept);
    double[] w = new double[weights.length];
    for (int i = 0; i < y.length; i++) {
        w[i] = weights[i];
    }
    DoubleMatrix matrix = DoubleMatrix.copyOf(dep);
    DoubleArray vector = DoubleArray.copyOf(y);
    RealMatrix wDiag = new DiagonalMatrix(w);
    DoubleMatrix transpose = s_algebra.getTranspose(matrix);

    DoubleMatrix wDiagTimesMatrix = DoubleMatrix
            .ofUnsafe(wDiag.multiply(new Array2DRowRealMatrix(matrix.toArrayUnsafe())).getData());
    DoubleMatrix tmp = (DoubleMatrix) s_algebra
            .multiply(s_algebra.getInverse(s_algebra.multiply(transpose, wDiagTimesMatrix)), transpose);

    DoubleMatrix wTmpTimesDiag = DoubleMatrix
            .copyOf(wDiag.preMultiply(new Array2DRowRealMatrix(tmp.toArrayUnsafe())).getData());
    DoubleMatrix betasVector = (DoubleMatrix) s_algebra.multiply(wTmpTimesDiag, vector);
    double[] yModel = super.writeArrayAsVector(
            ((DoubleMatrix) s_algebra.multiply(matrix, betasVector)).toArray());
    double[] betas = super.writeArrayAsVector(betasVector.toArray());
    return getResultWithStatistics(x, convertArray(wDiag.getData()), y, betas, yModel, transpose, matrix,
            useIntercept);
}

From source file:iocomms.subpos.NonLinearLeastSquaresSolver.java

public Optimum solve(double[] target, double[] weights, double[] initialPoint, boolean debugInfo) {
    if (debugInfo) {
        System.out.println("Max Number of Iterations : " + MAXNUMBEROFITERATIONS);
    }/*from  w  w  w.j  a  va2  s.c  o m*/

    LeastSquaresProblem leastSquaresProblem = LeastSquaresFactory.create(
            // function to be optimized
            function,
            // target values at optimal point in least square equation
            // (x0+xi)^2 + (y0+yi)^2 + ri^2 = target[i]
            new ArrayRealVector(target, false), new ArrayRealVector(initialPoint, false),
            new DiagonalMatrix(weights), null, MAXNUMBEROFITERATIONS, MAXNUMBEROFITERATIONS);

    return leastSquaresOptimizer.optimize(leastSquaresProblem);
}

From source file:com.cloudera.hts.utils.math.MyFunc2.java

protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> points) {

    final int len = points.size();

    final double[] target = new double[len];
    final double[] weights = new double[len];

    final double[] initialGuess = { 1.0, 1.0, 1.0 };

    int i = 0;//from w ww.j  a  v  a 2 s .c  o m
    for (WeightedObservedPoint point : points) {
        target[i] = point.getY();
        weights[i] = point.getWeight();
        i += 1;
    }

    final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
            new MyFunc(), points);

    return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
            .start(initialGuess).target(target).weight(new DiagonalMatrix(weights))
            .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
}

From source file:edu.ucsf.valelab.saim.calculations.SaimFunctionFitter.java

@Override
protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> points) {
    final int len = points.size();
    final double[] target = new double[len];
    final double[] weights = new double[len];

    int i = 0;//  w  ww  .  jav  a 2  s.  com
    for (WeightedObservedPoint point : points) {
        target[i] = point.getY();
        weights[i] = point.getWeight();
        i += 1;
    }

    final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
            saimFunction_, points);

    ConvergenceChecker<PointVectorValuePair> checker = new SimpleVectorValueChecker(1.0e-6, 1.0e-10);

    // this parameter validator appears to have the same effect
    // as using the SaimFunctionFitterWithBounds
    double[] lowerBounds = { 0.0, 0.0, 0.0 };
    double[] upperBounds = { 64000, 64000, 1000 };
    ParameterValidator spv = new SaimParameterValidator(lowerBounds, upperBounds);

    return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(maxIterations_)
            .lazyEvaluation(true).
            //checker(checker).
            start(guess_).target(target).parameterValidator(spv).weight(new DiagonalMatrix(weights))
            .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.SigmoidFitterImpl.java

@Override
public void fitNoConstrain(List<DoseResponsePair> dataToFit, SigmoidFittingResultsHolder resultsHolder,
        int standardHillslope) {
    //initial parameter values for fitting: lowest y, highest y, middle x and standard hillslope
    double[] yValues = AnalysisUtils.generateYValues(dataToFit);
    double[] xValues = AnalysisUtils.generateXValues(dataToFit);

    double initialTop = yValues[0];
    double initialBottom = yValues[0];
    double initialLogEC50;
    double maxX = xValues[0];
    double minX = xValues[0];
    for (int i = 0; i < yValues.length; i++) {
        if (yValues[i] < initialBottom) {
            initialBottom = yValues[i];//from   w ww  . ja v  a2 s. c om
        } else if (yValues[i] > initialTop) {
            initialTop = yValues[i];
        }
        if (xValues[i] < minX) {
            minX = xValues[i];
        } else if (xValues[i] > maxX) {
            maxX = xValues[i];
        }
    }
    initialLogEC50 = (maxX + minX) / 2;
    final double[] initialGuesses = new double[] { initialBottom, initialTop, initialLogEC50,
            standardHillslope };

    //add all datapoint to collection with standard weight 1.0
    Collection<WeightedObservedPoint> observations = new ArrayList<>();
    for (int i = 0; i < xValues.length; i++) {
        observations.add(new WeightedObservedPoint(1.0, xValues[i], yValues[i]));
    }

    final ParametricUnivariateFunction function = new ParametricUnivariateFunction() {
        /**
         * @param conc The concentration of the drug, log transformed
         * @param paramaters The fitted parameters (bottom, top, logEC50 and
         * hillslope)
         * @return The velocity
         */
        @Override
        public double value(double conc, double[] parameters) {
            double bottom = parameters[0];
            double top = parameters[1];
            double logEC50 = parameters[2];
            double hillslope = parameters[3];

            return (bottom + (top - bottom) / (1 + Math.pow(10, (logEC50 - conc) * hillslope)));
        }

        @Override
        public double[] gradient(double conc, double[] parameters) {
            double bottom = parameters[0];
            double top = parameters[1];
            double logEC50 = parameters[2];
            double hillslope = parameters[3];

            return new double[] { 1 - (1 / ((Math.pow(10, (logEC50 - conc) * hillslope)) + 1)),
                    1 / ((Math.pow(10, (logEC50 - conc) * hillslope)) + 1),
                    (hillslope * Math.log(10) * Math.pow(10, hillslope * (logEC50 + conc)) * (bottom - top))
                            / (Math.pow(Math.pow(10, hillslope * conc) + Math.pow(10, hillslope * logEC50), 2)),
                    (Math.log(10) * (logEC50 - conc) * (bottom - top)
                            * Math.pow(10, (logEC50 + conc) * hillslope))
                            / Math.pow((Math.pow(10, logEC50 * hillslope) + Math.pow(10, hillslope * conc)), 2)

            };

        }

    };

    //set up the fitter with the observations and function created above
    DoseResponseAbstractCurveFitter fitter = new DoseResponseAbstractCurveFitter() {

        @Override
        protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
            // Prepare least-squares problem.
            final int len = observations.size();
            final double[] target = new double[len];
            final double[] weights = new double[len];

            int i = 0;
            for (final WeightedObservedPoint obs : observations) {
                target[i] = obs.getY();
                weights[i] = obs.getWeight();
                ++i;
            }

            final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
                    function, observations);

            // build a new least squares problem set up to fit a secular and harmonic curve to the observed points
            return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
                    .start(initialGuesses).target(target).weight(new DiagonalMatrix(weights))
                    .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
        }
    };

    OptimumImpl bestFit = fitter.performRegression(observations);
    //get the best-fit parameters
    double[] params = bestFit.getPoint().toArray();
    double bottom = params[0];
    double top = params[1];
    double logEC50 = params[2];
    double hillslope = params[3];

    //set the fields of the fitting results holder
    resultsHolder.setBottom(bottom);
    resultsHolder.setTop(top);
    resultsHolder.setLogEC50(logEC50);
    resultsHolder.setHillslope(hillslope);
    //no parameters were constrained
    resultsHolder.setConstrainedParameters(new ArrayList<String>());
    //TEST: what is the effect of the singularity threshold argument?
    resultsHolder.setCovariances(bestFit.getCovariances(0).getData());
}

From source file:com.datumbox.framework.core.machinelearning.featureselection.PCA.java

/** {@inheritDoc} */
@Override/*  w w  w. j a  v a  2  s . c  o  m*/
protected void _fit(Dataframe trainingData) {
    ModelParameters modelParameters = knowledgeBase.getModelParameters();

    int n = trainingData.size();
    int d = trainingData.xColumnSize();

    //convert data into matrix
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();
    DataframeMatrix matrixDataset = DataframeMatrix.newInstance(trainingData, false, null, featureIds);
    RealMatrix X = matrixDataset.getX();

    //calculate means and subtract them from data
    RealVector meanValues = new OpenMapRealVector(d);
    for (Integer columnId : featureIds.values()) {
        double mean = 0.0;
        for (int row = 0; row < n; row++) {
            mean += X.getEntry(row, columnId);
        }
        mean /= n;

        for (int row = 0; row < n; row++) {
            X.addToEntry(row, columnId, -mean);
        }

        meanValues.setEntry(columnId, mean);
    }
    modelParameters.setMean(meanValues);

    //dxd matrix
    RealMatrix covarianceDD = (X.transpose().multiply(X)).scalarMultiply(1.0 / (n - 1.0));

    EigenDecomposition decomposition = new EigenDecomposition(covarianceDD);
    RealVector eigenValues = new ArrayRealVector(decomposition.getRealEigenvalues(), false);

    RealMatrix components = decomposition.getV();

    //Whiten Components W = U*L^0.5; To whiten them we multiply with L^0.5.
    if (knowledgeBase.getTrainingParameters().isWhitened()) {

        RealMatrix sqrtEigenValues = new DiagonalMatrix(d);
        for (int i = 0; i < d; i++) {
            sqrtEigenValues.setEntry(i, i, FastMath.sqrt(eigenValues.getEntry(i)));
        }

        components = components.multiply(sqrtEigenValues);
    }

    //the eigenvalues and their components are sorted by descending order no need to resort them
    Integer maxDimensions = knowledgeBase.getTrainingParameters().getMaxDimensions();
    Double variancePercentageThreshold = knowledgeBase.getTrainingParameters().getVariancePercentageThreshold();
    if (variancePercentageThreshold != null && variancePercentageThreshold <= 1) {
        double totalVariance = 0.0;
        for (int i = 0; i < d; i++) {
            totalVariance += eigenValues.getEntry(i);
        }

        double sum = 0.0;
        int varCounter = 0;
        for (int i = 0; i < d; i++) {
            sum += eigenValues.getEntry(i) / totalVariance;
            varCounter++;
            if (sum >= variancePercentageThreshold) {
                break;
            }
        }

        if (maxDimensions == null || maxDimensions > varCounter) {
            maxDimensions = varCounter;
        }
    }

    if (maxDimensions != null && maxDimensions < d) {
        //keep only the maximum selected eigenvalues
        eigenValues = eigenValues.getSubVector(0, maxDimensions);

        //keep only the maximum selected eigenvectors
        components = components.getSubMatrix(0, components.getRowDimension() - 1, 0, maxDimensions - 1);
    }

    modelParameters.setEigenValues(eigenValues);
    modelParameters.setComponents(components);
}

From source file:com.datumbox.framework.machinelearning.featureselection.continuous.PCA.java

@Override
protected void estimateModelParameters(Dataset originaldata) {
    ModelParameters modelParameters = knowledgeBase.getModelParameters();

    int n = originaldata.size();
    int d = originaldata.getColumnSize();

    //convert data into matrix
    Map<Object, Integer> feature2ColumnId = modelParameters.getFeature2ColumnId();
    MatrixDataset matrixDataset = MatrixDataset.newInstance(originaldata, false, feature2ColumnId);
    RealMatrix X = matrixDataset.getX();

    //calculate means and subtract them from data
    double[] meanValues = new double[d];
    for (Map.Entry<Object, Integer> entry : feature2ColumnId.entrySet()) {
        Object feature = entry.getKey();
        Integer columnId = entry.getValue();

        meanValues[columnId] = Descriptives
                .mean(originaldata.extractColumnValues(feature).toFlatDataCollection());

        for (int row = 0; row < n; ++row) {
            X.addToEntry(row, columnId, -meanValues[columnId]); //inplace subtraction!!!
        }/*from  w  w  w.  j  av  a  2  s .  c  o m*/
    }
    modelParameters.setMean(meanValues);

    RealMatrix components;
    double[] eigenValues;
    /*
    if(d>n) { //turned off because of the algorithm could not be validated
    //Karhunen Lowe Transform to speed up calculations
            
    //nxn matrix
    RealMatrix covarianceNN = (X.multiply(X.transpose())).scalarMultiply(1.0/(n-1.0)); 
            
    EigenDecomposition decomposition = new EigenDecomposition(covarianceNN);
    eigenValues = decomposition.getRealEigenvalues();
            
            
    RealMatrix eigenVectors = decomposition.getV();
            
    double[] sqrtInverseEigenValues = new double[eigenValues.length];
    for(int i=0;i<eigenValues.length;++i) {
        if(eigenValues[i]==0.0) {
            sqrtInverseEigenValues[i] = 0.0;
        }
        else {
            sqrtInverseEigenValues[i] = 1.0/Math.sqrt(eigenValues[i]);
        }
    }
            
    components = X.transpose().multiply(eigenVectors);
    //Components = X'*V*L^-0.5; To whiten them we multiply with L^0.5 which 
    //cancels out the previous multiplication. So below we multiply by
    //L^-0.5 ONLY if we don't whiten.
    if(!knowledgeBase.getTrainingParameters().isWhitened()) { 
        components = components.multiply(new DiagonalMatrix(sqrtInverseEigenValues));
    }
    }
    else {
    //Normal PCA goes here
    }
    */
    //dxd matrix
    RealMatrix covarianceDD = (X.transpose().multiply(X)).scalarMultiply(1.0 / (n - 1.0));

    EigenDecomposition decomposition = new EigenDecomposition(covarianceDD);
    eigenValues = decomposition.getRealEigenvalues();

    components = decomposition.getV();

    //Whiten Components W = U*L^0.5; To whiten them we multiply with L^0.5.
    if (knowledgeBase.getTrainingParameters().isWhitened()) {

        double[] sqrtEigenValues = new double[eigenValues.length];
        for (int i = 0; i < eigenValues.length; ++i) {
            sqrtEigenValues[i] = Math.sqrt(eigenValues[i]);
        }

        components = components.multiply(new DiagonalMatrix(sqrtEigenValues));
    }

    //the eigenvalues and their components are sorted by descending order no need to resort them
    Integer maxDimensions = knowledgeBase.getTrainingParameters().getMaxDimensions();
    Double varianceThreshold = knowledgeBase.getTrainingParameters().getVarianceThreshold();
    if (varianceThreshold != null && varianceThreshold <= 1) {
        double sum = 0.0;
        double totalVariance = StatUtils.sum(eigenValues);
        int varCounter = 0;
        for (double l : eigenValues) {
            sum += l / totalVariance;
            ++varCounter;
            if (sum >= varianceThreshold) {
                break;
            }
        }

        if (maxDimensions == null || maxDimensions > varCounter) {
            maxDimensions = varCounter;
        }
    }

    if (maxDimensions != null && maxDimensions < d) {
        //keep only the maximum selected eigenvalues
        double[] newEigenValues = new double[maxDimensions];
        System.arraycopy(eigenValues, 0, newEigenValues, 0, maxDimensions);
        eigenValues = newEigenValues;

        //keep only the maximum selected eigenvectors
        components = components.getSubMatrix(0, components.getRowDimension() - 1, 0, maxDimensions - 1);
    }

    modelParameters.setRows(components.getRowDimension());
    modelParameters.setCols(components.getColumnDimension());

    modelParameters.setEigenValues(eigenValues);
    modelParameters.setComponents(components.getData());
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.SigmoidFitterImpl.java

@Override
public void fitBotConstrain(List<DoseResponsePair> dataToFit, SigmoidFittingResultsHolder resultsHolder,
        Double bottomConstrain, int standardHillslope) {

    final Double bottom = bottomConstrain;

    //initial parameter values for fitting: highest y, middle x and standard hillslope
    double[] yValues = AnalysisUtils.generateYValues(dataToFit);
    double[] xValues = AnalysisUtils.generateXValues(dataToFit);
    double initialTop = yValues[0];
    double initialLogEC50;
    double maxX = xValues[0];
    double minX = xValues[0];
    for (int i = 0; i < yValues.length; i++) {
        if (yValues[i] > initialTop) {
            initialTop = yValues[i];/*from  ww  w.j a v  a  2  s  .c o  m*/
        }
        if (xValues[i] < minX) {
            minX = xValues[i];
        } else if (xValues[i] > maxX) {
            maxX = xValues[i];
        }
    }
    initialLogEC50 = (maxX + minX) / 2;

    final double[] initialGuesses = new double[] { initialTop, initialLogEC50, standardHillslope };

    //add all datapoint to collection with standard weight 1.0
    Collection<WeightedObservedPoint> observations = new ArrayList<>();
    for (int i = 0; i < xValues.length; i++) {
        observations.add(new WeightedObservedPoint(1.0, xValues[i], yValues[i]));
    }

    final ParametricUnivariateFunction function = new ParametricUnivariateFunction() {
        /**
         * @param conc The concentration of the drug, log transformed
         * @param paramaters The fitted parameters (top, logEC50 and
         * hillslope)
         * @return The velocity
         */
        @Override
        public double value(double conc, double[] parameters) {
            double top = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return (bottom + (top - bottom) / (1 + Math.pow(10, (logEC50 - conc) * hillslope)));
        }

        @Override
        public double[] gradient(double conc, double[] parameters) {
            double top = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return new double[] { 1 / ((Math.pow(10, (logEC50 - conc) * hillslope)) + 1),
                    (hillslope * Math.log(10) * Math.pow(10, hillslope * (logEC50 + conc)) * (bottom - top))
                            / (Math.pow(Math.pow(10, hillslope * conc) + Math.pow(10, hillslope * logEC50), 2)),
                    (Math.log(10) * (logEC50 - conc) * (bottom - top)
                            * Math.pow(10, (logEC50 + conc) * hillslope))
                            / Math.pow((Math.pow(10, logEC50 * hillslope) + Math.pow(10, hillslope * conc)), 2)

            };

        }

    };

    //set up the fitter with the observations and function created above
    DoseResponseAbstractCurveFitter fitter = new DoseResponseAbstractCurveFitter() {

        @Override
        protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
            // Prepare least-squares problem.
            final int len = observations.size();
            final double[] target = new double[len];
            final double[] weights = new double[len];

            int i = 0;
            for (final WeightedObservedPoint obs : observations) {
                target[i] = obs.getY();
                weights[i] = obs.getWeight();
                ++i;
            }

            final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
                    function, observations);

            // build a new least squares problem set up to fit a secular and harmonic curve to the observed points
            return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
                    .start(initialGuesses).target(target).weight(new DiagonalMatrix(weights))
                    .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
        }
    };

    OptimumImpl bestFit = fitter.performRegression(observations);
    double[] params = bestFit.getPoint().toArray();
    double top = params[0];
    double logEC50 = params[1];
    double hillslope = params[2];

    resultsHolder.setBottom(bottom);
    resultsHolder.setTop(top);
    resultsHolder.setLogEC50(logEC50);
    resultsHolder.setHillslope(hillslope);
    //bottom parameter was constrained
    List<String> constrainedParam = new ArrayList<>();
    constrainedParam.add("bottom");
    resultsHolder.setConstrainedParameters(constrainedParam);
    resultsHolder.setCovariances(bestFit.getCovariances(0).getData());
}

From source file:com.datumbox.framework.core.machinelearning.featureselection.continuous.PCA.java

/** {@inheritDoc} */
@Override/* w ww  .ja v  a2s  .  c  o  m*/
protected void _fit(Dataframe originalData) {
    ModelParameters modelParameters = kb().getModelParameters();

    int n = modelParameters.getN();
    int d = modelParameters.getD();

    //convert data into matrix
    Map<Object, Integer> featureIds = modelParameters.getFeatureIds();
    MatrixDataframe matrixDataset = MatrixDataframe.newInstance(originalData, false, null, featureIds);
    RealMatrix X = matrixDataset.getX();

    //calculate means and subtract them from data
    double[] meanValues = new double[d];
    for (Integer columnId : featureIds.values()) {

        meanValues[columnId] = 0.0;
        for (double v : X.getColumn(columnId)) {
            meanValues[columnId] += v;
        }
        meanValues[columnId] /= n;

        for (int row = 0; row < n; row++) {
            X.addToEntry(row, columnId, -meanValues[columnId]);
        }
    }
    modelParameters.setMean(meanValues);

    RealMatrix components;
    double[] eigenValues;

    //dxd matrix
    RealMatrix covarianceDD = (X.transpose().multiply(X)).scalarMultiply(1.0 / (n - 1.0));

    EigenDecomposition decomposition = new EigenDecomposition(covarianceDD);
    eigenValues = decomposition.getRealEigenvalues();

    components = decomposition.getV();

    //Whiten Components W = U*L^0.5; To whiten them we multiply with L^0.5.
    if (kb().getTrainingParameters().isWhitened()) {

        double[] sqrtEigenValues = new double[eigenValues.length];
        for (int i = 0; i < eigenValues.length; i++) {
            sqrtEigenValues[i] = Math.sqrt(eigenValues[i]);
        }

        components = components.multiply(new DiagonalMatrix(sqrtEigenValues));
    }

    //the eigenvalues and their components are sorted by descending order no need to resort them
    Integer maxDimensions = kb().getTrainingParameters().getMaxDimensions();
    Double variancePercentageThreshold = kb().getTrainingParameters().getVariancePercentageThreshold();
    if (variancePercentageThreshold != null && variancePercentageThreshold <= 1) {
        double sum = 0.0;
        double totalVariance = StatUtils.sum(eigenValues);
        int varCounter = 0;
        for (double l : eigenValues) {
            sum += l / totalVariance;
            varCounter++;
            if (sum >= variancePercentageThreshold) {
                break;
            }
        }

        if (maxDimensions == null || maxDimensions > varCounter) {
            maxDimensions = varCounter;
        }
    }

    if (maxDimensions != null && maxDimensions < d) {
        //keep only the maximum selected eigenvalues
        double[] newEigenValues = new double[maxDimensions];
        System.arraycopy(eigenValues, 0, newEigenValues, 0, maxDimensions);
        eigenValues = newEigenValues;

        //keep only the maximum selected eigenvectors
        components = components.getSubMatrix(0, components.getRowDimension() - 1, 0, maxDimensions - 1);
    }

    modelParameters.setRows(components.getRowDimension());
    modelParameters.setCols(components.getColumnDimension());

    modelParameters.setEigenValues(eigenValues);
    modelParameters.setComponents(components.getData());
}

From source file:be.ugent.maf.cellmissy.analysis.doseresponse.impl.SigmoidFitterImpl.java

@Override
public void fitTopConstrain(List<DoseResponsePair> dataToFit, SigmoidFittingResultsHolder resultsHolder,
        Double topConstrain, int standardHillslope) {
    final Double top = topConstrain;

    //initial parameter values for fitting: lowest y, middle x and standard hillslope
    double[] yValues = AnalysisUtils.generateYValues(dataToFit);
    double[] xValues = AnalysisUtils.generateXValues(dataToFit);
    double initialBottom = yValues[0];
    double initialLogEC50;
    double maxX = xValues[0];
    double minX = xValues[0];
    for (int i = 0; i < yValues.length; i++) {
        if (yValues[i] < initialBottom) {
            initialBottom = yValues[i];/*from w  ww  . j av a2s  .  co  m*/
        }
        if (xValues[i] < minX) {
            minX = xValues[i];
        } else if (xValues[i] > maxX) {
            maxX = xValues[i];
        }
    }
    initialLogEC50 = (maxX + minX) / 2;

    final double[] initialGuesses = new double[] { initialBottom, initialLogEC50, standardHillslope };

    //add all datapoint to collection with standard weight 1.0
    Collection<WeightedObservedPoint> observations = new ArrayList<>();
    for (int i = 0; i < xValues.length; i++) {
        observations.add(new WeightedObservedPoint(1.0, xValues[i], yValues[i]));
    }

    final ParametricUnivariateFunction function = new ParametricUnivariateFunction() {
        /**
         * @param conc The concentration of the drug, log transformed
         * @param paramaters The fitted parameters (bottom, logEC50 and
         * hillslope)
         * @return The velocity
         */
        @Override
        public double value(double conc, double[] parameters) {
            double bottom = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return (bottom + (top - bottom) / (1 + Math.pow(10, (logEC50 - conc) * hillslope)));
        }

        @Override
        public double[] gradient(double conc, double[] parameters) {
            double bottom = parameters[0];
            double logEC50 = parameters[1];
            double hillslope = parameters[2];

            return new double[] { 1 - (1 / ((Math.pow(10, (logEC50 - conc) * hillslope)) + 1)),
                    (hillslope * Math.log(10) * Math.pow(10, hillslope * (logEC50 + conc)) * (bottom - top))
                            / (Math.pow(Math.pow(10, hillslope * conc) + Math.pow(10, hillslope * logEC50), 2)),
                    (Math.log(10) * (logEC50 - conc) * (bottom - top)
                            * Math.pow(10, (logEC50 + conc) * hillslope))
                            / Math.pow((Math.pow(10, logEC50 * hillslope) + Math.pow(10, hillslope * conc)), 2)

            };

        }

    };

    //set up the fitter with the observations and function created above
    DoseResponseAbstractCurveFitter fitter = new DoseResponseAbstractCurveFitter() {

        @Override
        protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
            // Prepare least-squares problem.
            final int len = observations.size();
            final double[] target = new double[len];
            final double[] weights = new double[len];

            int i = 0;
            for (final WeightedObservedPoint obs : observations) {
                target[i] = obs.getY();
                weights[i] = obs.getWeight();
                ++i;
            }

            final AbstractCurveFitter.TheoreticalValuesFunction model = new AbstractCurveFitter.TheoreticalValuesFunction(
                    function, observations);

            // build a new least squares problem set up to fit a secular and harmonic curve to the observed points
            return new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(Integer.MAX_VALUE)
                    .start(initialGuesses).target(target).weight(new DiagonalMatrix(weights))
                    .model(model.getModelFunction(), model.getModelFunctionJacobian()).build();
        }
    };

    OptimumImpl bestFit = fitter.performRegression(observations);
    double[] params = bestFit.getPoint().toArray();
    double bottom = params[0];
    double logEC50 = params[1];
    double hillslope = params[2];

    resultsHolder.setBottom(bottom);
    resultsHolder.setTop(top);
    resultsHolder.setLogEC50(logEC50);
    resultsHolder.setHillslope(hillslope);
    //top parameter was constrained
    List<String> constrainedParam = new ArrayList<>();
    constrainedParam.add("top");
    resultsHolder.setConstrainedParameters(constrainedParam);
    resultsHolder.setCovariances(bestFit.getCovariances(0).getData());
}