List of usage examples for org.apache.commons.math3.linear RealMatrix getRowDimension
int getRowDimension();
From source file:com.joptimizer.algebra.CholeskyFactorizationTest.java
public void testInvert1() throws Exception { log.debug("testInvert1"); double[][] QData = new double[][] { { 1, .12, .13, .14, .15 }, { .12, 2, .23, .24, .25 }, { .13, .23, 3, 0, 0 }, { .14, .24, 0, 4, 0 }, { .15, .25, 0, 0, 5 } }; RealMatrix Q = MatrixUtils.createRealMatrix(QData); CholeskyFactorization myc = new CholeskyFactorization(DoubleFactory2D.dense.make(QData)); myc.factorize();//from ww w . j a v a 2s.co m RealMatrix L = new Array2DRowRealMatrix(myc.getL().toArray()); RealMatrix LT = new Array2DRowRealMatrix(myc.getLT().toArray()); log.debug("L: " + ArrayUtils.toString(L.getData())); log.debug("LT: " + ArrayUtils.toString(LT.getData())); log.debug("L.LT: " + ArrayUtils.toString(L.multiply(LT).getData())); log.debug("LT.L: " + ArrayUtils.toString(LT.multiply(L).getData())); // check Q = L.LT double norm = L.multiply(LT).subtract(Q).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 1.E-15); RealMatrix LInv = new SingularValueDecomposition(L).getSolver().getInverse(); log.debug("LInv: " + ArrayUtils.toString(LInv.getData())); RealMatrix LInvT = LInv.transpose(); log.debug("LInvT: " + ArrayUtils.toString(LInvT.getData())); RealMatrix LTInv = new SingularValueDecomposition(LT).getSolver().getInverse(); log.debug("LTInv: " + ArrayUtils.toString(LTInv.getData())); RealMatrix LTInvT = LTInv.transpose(); log.debug("LTInvT: " + ArrayUtils.toString(LTInvT.getData())); log.debug("LInv.LInvT: " + ArrayUtils.toString(LInv.multiply(LInvT).getData())); log.debug("LTInv.LTInvT: " + ArrayUtils.toString(LTInv.multiply(LTInvT).getData())); RealMatrix Id = MatrixUtils.createRealIdentityMatrix(Q.getRowDimension()); //check Q.(LTInv * LInv) = 1 norm = Q.multiply(LTInv.multiply(LInv)).subtract(Id).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 5.E-15); // check Q.QInv = 1 RealMatrix QInv = MatrixUtils.createRealMatrix(myc.getInverse().toArray()); norm = Q.multiply(QInv).subtract(Id).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 1.E-15); }
From source file:edu.cudenver.bios.power.GLMMPowerCalculator.java
/** * Returns true if all values in the beta matrix are identical * @param beta beta matrix/*from w w w . j a va2s . co m*/ * @return true if no mean difference, false otherwise */ private boolean noMeanDifference(GLMMTest test) { // get the difference between theta null and the alternative RealMatrix sumSqHypothesis = test.getHypothesisSumOfSquares(); // check if there is at least one non-zero value if (sumSqHypothesis != null) { for (int r = 0; r < sumSqHypothesis.getRowDimension(); r++) { for (int c = 0; c < sumSqHypothesis.getColumnDimension(); c++) { if (Math.abs(sumSqHypothesis.getEntry(r, c)) > EPSILON) { return false; } } } } return true; }
From source file:com.joptimizer.algebra.CholeskyFactorizationTest.java
/** * The same as before, but with rescaling. */// w ww . j ava2s.com public void testInvert2() throws Exception { log.debug("testInvert2"); double[][] QData = new double[][] { { 1, .12, .13, .14, .15 }, { .12, 2, .23, .24, .25 }, { .13, .23, 3, 0, 0 }, { .14, .24, 0, 4, 0 }, { .15, .25, 0, 0, 5 } }; RealMatrix Q = MatrixUtils.createRealMatrix(QData); CholeskyFactorization myc = new CholeskyFactorization(DoubleFactory2D.dense.make(QData), new Matrix1NormRescaler()); myc.factorize(); RealMatrix L = new Array2DRowRealMatrix(myc.getL().toArray()); RealMatrix LT = new Array2DRowRealMatrix(myc.getLT().toArray()); log.debug("L: " + ArrayUtils.toString(L.getData())); log.debug("LT: " + ArrayUtils.toString(LT.getData())); log.debug("L.LT: " + ArrayUtils.toString(L.multiply(LT).getData())); log.debug("LT.L: " + ArrayUtils.toString(LT.multiply(L).getData())); // check Q = L.LT double norm = L.multiply(LT).subtract(Q).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 1.E-15); RealMatrix LInv = new SingularValueDecomposition(L).getSolver().getInverse(); log.debug("LInv: " + ArrayUtils.toString(LInv.getData())); RealMatrix LInvT = LInv.transpose(); log.debug("LInvT: " + ArrayUtils.toString(LInvT.getData())); RealMatrix LTInv = new SingularValueDecomposition(LT).getSolver().getInverse(); log.debug("LTInv: " + ArrayUtils.toString(LTInv.getData())); RealMatrix LTInvT = LTInv.transpose(); log.debug("LTInvT: " + ArrayUtils.toString(LTInvT.getData())); log.debug("LInv.LInvT: " + ArrayUtils.toString(LInv.multiply(LInvT).getData())); log.debug("LTInv.LTInvT: " + ArrayUtils.toString(LTInv.multiply(LTInvT).getData())); RealMatrix Id = MatrixUtils.createRealIdentityMatrix(Q.getRowDimension()); //check Q.(LTInv * LInv) = 1 norm = Q.multiply(LTInv.multiply(LInv)).subtract(Id).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 5.E-15); // check Q.QInv = 1 RealMatrix QInv = MatrixUtils.createRealMatrix(myc.getInverse().toArray()); norm = Q.multiply(QInv).subtract(Id).getNorm(); log.debug("norm: " + norm); assertTrue(norm < 1.E-15); }
From source file:edu.cudenver.bios.power.GLMMPowerCalculator.java
/** * Ensure that all required matrices are specified, and that conformance is correct * @param params GLMM input parameters/*w w w. j av a2 s. c om*/ * @throws IllegalArgumentException */ protected void validateMatrices(GLMMPowerParameters params) throws PowerException { if (params.getConfidenceIntervalType() != ConfidenceIntervalType.NONE && params.getSampleSizeForEstimates() <= params.getDesignMatrixRankForEstimates()) { throw new PowerException(NEGATIVE_NU_EST_MESSAGE, PowerErrorEnum.POWER_CI_NEGATIVE_NU_EST); } // convenience variables RealMatrix beta = params.getBeta().getCombinedMatrix(); RealMatrix theta0 = params.getTheta(); RealMatrix XEssence = params.getDesignEssence(); RealMatrix C = params.getBetweenSubjectContrast().getCombinedMatrix(); RealMatrix U = params.getWithinSubjectContrast(); RealMatrix sigmaE = params.getSigmaError(); RealMatrix sigmaG = params.getSigmaGaussianRandom(); RealMatrix sigmaY = params.getSigmaOutcome(); RealMatrix sigmaYG = params.getSigmaOutcomeGaussianRandom(); int numRandom = sigmaG != null ? sigmaG.getRowDimension() : 0; // only allow at most 1 random predictor // TODO: handle multiple random predictors if (numRandom > 1) throw new PowerException("Too many random predictors - at most 1 is allowed", PowerErrorEnum.MAX_RANDOM_PREDICTORS_EXCEEDED); // make sure all required matrices have been specified // note, we don't check U (within subject contrast), since it may be null in univariate cases if (beta == null) throw new PowerException("No beta (regression coefficients) matrix specified", PowerErrorEnum.MISSING_MATRIX_BETA); if (XEssence == null) throw new PowerException("No design essence matrix specified", PowerErrorEnum.MISSING_MATRIX_DESIGN); if (C == null) throw new PowerException("No between subject contrast (C) matrix specified", PowerErrorEnum.MISSING_MATRIX_C); if (theta0 == null) throw new PowerException("No theta_null (null hypothesis) matrix specified", PowerErrorEnum.MISSING_MATRIX_THETA_NULL); // create a default U if not specified if (U == null) { U = org.apache.commons.math3.linear.MatrixUtils.createRealIdentityMatrix(beta.getColumnDimension()); params.setWithinSubjectContrast(U); } // different variance/covariance matrices are specified depending on the presence // of random covariate if (numRandom == 0) { if (sigmaE == null) throw new PowerException("No sigma (error) matrix specified", PowerErrorEnum.MISSING_MATRIX_SIGMA_E); if (!sigmaE.isSquare()) throw new PowerException("Sigma error matrix must be square", PowerErrorEnum.MATRIX_NONSQUARE_SIGMA_E); if (U.getRowDimension() != sigmaE.getRowDimension()) throw new PowerException( "Within subject contrast matrix " + "(" + U.getRowDimension() + " x " + U.getColumnDimension() + ")" + " does not conform with " + "sigma matrix " + "(" + sigmaE.getRowDimension() + " x " + sigmaE.getColumnDimension() + ")", PowerErrorEnum.MATRIX_CONFORMANCE_U_SIGMA_E); } else if (numRandom == 1) { // covariate (results not published for Wilk's Lambda or Pillai-Bartlett for (Test test : params.getTestList()) { if (test != Test.HOTELLING_LAWLEY_TRACE && test != Test.UNIREP && test != Test.UNIREP_BOX && test != Test.UNIREP_GEISSER_GREENHOUSE && test != Test.UNIREP_HUYNH_FELDT) throw new PowerException( "With a random covariate, " + "only Hotelling-Lawley and Unirep test statistics are supported", PowerErrorEnum.UNKNOWN_TEST_REQUESTED_RANDOM); } if (sigmaG == null) throw new PowerException("No variance/covariance matrix specified for gaussian predictors", PowerErrorEnum.MISSING_MATRIX_SIGMA_G); if (sigmaY == null) throw new PowerException("No variance/covariance matrix specified for response variables", PowerErrorEnum.MISSING_MATRIX_SIGMA_Y); if (sigmaYG == null) throw new PowerException("No outcome / gaussian predictor covariance matrix specified", PowerErrorEnum.MISSING_MATRIX_SIGMA_YG); // check conformance if (U.getRowDimension() != sigmaY.getRowDimension()) throw new PowerException( "Within subject contrast matrix " + "(" + U.getRowDimension() + " x " + U.getColumnDimension() + ")" + " does not conform with " + "sigma matrix " + "(" + sigmaY.getRowDimension() + " x " + sigmaY.getColumnDimension() + ")", PowerErrorEnum.MATRIX_CONFORMANCE_U_SIGMA_Y); if (sigmaG.getRowDimension() != sigmaYG.getColumnDimension()) throw new PowerException( "Outcome / Gaussian predictor covariance " + "matrix does not conform with variance matrix for the gaussian predictor", PowerErrorEnum.MATRIX_CONFORMANCE_SIGMA_G_SIGMA_YG); if (!sigmaY.isSquare()) throw new PowerException("Variance/covariance matrix for " + "response variables must be square", PowerErrorEnum.MATRIX_NONSQUARE_SIGMA_Y); if (!sigmaG.isSquare()) throw new PowerException("Variance/covariance matrix " + "for gaussian predictors must be square", PowerErrorEnum.MATRIX_NONSQUARE_SIGMA_G); } // check dimensionality if (C.getColumnDimension() != beta.getRowDimension()) throw new PowerException("Between subject contrast matrix does not conform with beta matrix", PowerErrorEnum.MATRIX_CONFORMANCE_C_BETA); if (beta.getColumnDimension() != U.getRowDimension()) throw new PowerException("Within subject contrast matrix does not conform with beta matrix", PowerErrorEnum.MATRIX_CONFORMANCE_BETA_U); if ((XEssence.getColumnDimension() != beta.getRowDimension() && numRandom == 0) || (XEssence.getColumnDimension() + 1 != beta.getRowDimension() && numRandom > 0)) throw new PowerException("Design matrix does not conform with beta matrix", PowerErrorEnum.MATRIX_CONFORMANCE_X_BETA); if (C.getRowDimension() > C.getColumnDimension()) throw new PowerException( "Number of rows in between subject " + "contrast must be less than or equal to the number of columns", PowerErrorEnum.MATRIX_DIMENSION_C_TOO_MANY_ROWS); if (U.getColumnDimension() > U.getRowDimension()) throw new PowerException( "Number of columns in within " + "subject contrast must be less than or equal to the number of rows", PowerErrorEnum.MATRIX_DIMENSION_U_TOO_MANY_COLUMNS); if (theta0.getRowDimension() != C.getRowDimension()) throw new PowerException( "Number of rows in theta null " + "must equal number of rows in between subject contrast", PowerErrorEnum.MATRIX_CONFORMANCE_C_THETA_NULL); if (theta0.getColumnDimension() != U.getColumnDimension()) throw new PowerException( "Number of columns in theta null " + "must equal number of columns in within subject contrast", PowerErrorEnum.MATRIX_CONFORMANCE_U_THETA_NULL); // check rank of the design matrix int rankX = new SingularValueDecomposition(XEssence).getRank(); if (rankX != Math.min(XEssence.getColumnDimension(), XEssence.getRowDimension())) throw new PowerException("Design matrix is not full rank: it is of rank " + rankX, PowerErrorEnum.MATRIX_RANK_DESIGN_LTFR); // make sure design matrix is symmetric and positive definite // TODO: how to check this? }
From source file:com.datumbox.framework.core.machinelearning.featureselection.PCA.java
/** {@inheritDoc} */ @Override//from www .j a v a 2s . co m protected void _fit(Dataframe trainingData) { ModelParameters modelParameters = knowledgeBase.getModelParameters(); int n = trainingData.size(); int d = trainingData.xColumnSize(); //convert data into matrix Map<Object, Integer> featureIds = modelParameters.getFeatureIds(); DataframeMatrix matrixDataset = DataframeMatrix.newInstance(trainingData, false, null, featureIds); RealMatrix X = matrixDataset.getX(); //calculate means and subtract them from data RealVector meanValues = new OpenMapRealVector(d); for (Integer columnId : featureIds.values()) { double mean = 0.0; for (int row = 0; row < n; row++) { mean += X.getEntry(row, columnId); } mean /= n; for (int row = 0; row < n; row++) { X.addToEntry(row, columnId, -mean); } meanValues.setEntry(columnId, mean); } modelParameters.setMean(meanValues); //dxd matrix RealMatrix covarianceDD = (X.transpose().multiply(X)).scalarMultiply(1.0 / (n - 1.0)); EigenDecomposition decomposition = new EigenDecomposition(covarianceDD); RealVector eigenValues = new ArrayRealVector(decomposition.getRealEigenvalues(), false); RealMatrix components = decomposition.getV(); //Whiten Components W = U*L^0.5; To whiten them we multiply with L^0.5. if (knowledgeBase.getTrainingParameters().isWhitened()) { RealMatrix sqrtEigenValues = new DiagonalMatrix(d); for (int i = 0; i < d; i++) { sqrtEigenValues.setEntry(i, i, FastMath.sqrt(eigenValues.getEntry(i))); } components = components.multiply(sqrtEigenValues); } //the eigenvalues and their components are sorted by descending order no need to resort them Integer maxDimensions = knowledgeBase.getTrainingParameters().getMaxDimensions(); Double variancePercentageThreshold = knowledgeBase.getTrainingParameters().getVariancePercentageThreshold(); if (variancePercentageThreshold != null && variancePercentageThreshold <= 1) { double totalVariance = 0.0; for (int i = 0; i < d; i++) { totalVariance += eigenValues.getEntry(i); } double sum = 0.0; int varCounter = 0; for (int i = 0; i < d; i++) { sum += eigenValues.getEntry(i) / totalVariance; varCounter++; if (sum >= variancePercentageThreshold) { break; } } if (maxDimensions == null || maxDimensions > varCounter) { maxDimensions = varCounter; } } if (maxDimensions != null && maxDimensions < d) { //keep only the maximum selected eigenvalues eigenValues = eigenValues.getSubVector(0, maxDimensions); //keep only the maximum selected eigenvectors components = components.getSubMatrix(0, components.getRowDimension() - 1, 0, maxDimensions - 1); } modelParameters.setEigenValues(eigenValues); modelParameters.setComponents(components); }
From source file:com.datumbox.framework.core.machinelearning.featureselection.continuous.PCA.java
/** {@inheritDoc} */ @Override//from w w w . j a v a2s. c o m protected void _fit(Dataframe originalData) { ModelParameters modelParameters = kb().getModelParameters(); int n = modelParameters.getN(); int d = modelParameters.getD(); //convert data into matrix Map<Object, Integer> featureIds = modelParameters.getFeatureIds(); MatrixDataframe matrixDataset = MatrixDataframe.newInstance(originalData, false, null, featureIds); RealMatrix X = matrixDataset.getX(); //calculate means and subtract them from data double[] meanValues = new double[d]; for (Integer columnId : featureIds.values()) { meanValues[columnId] = 0.0; for (double v : X.getColumn(columnId)) { meanValues[columnId] += v; } meanValues[columnId] /= n; for (int row = 0; row < n; row++) { X.addToEntry(row, columnId, -meanValues[columnId]); } } modelParameters.setMean(meanValues); RealMatrix components; double[] eigenValues; //dxd matrix RealMatrix covarianceDD = (X.transpose().multiply(X)).scalarMultiply(1.0 / (n - 1.0)); EigenDecomposition decomposition = new EigenDecomposition(covarianceDD); eigenValues = decomposition.getRealEigenvalues(); components = decomposition.getV(); //Whiten Components W = U*L^0.5; To whiten them we multiply with L^0.5. if (kb().getTrainingParameters().isWhitened()) { double[] sqrtEigenValues = new double[eigenValues.length]; for (int i = 0; i < eigenValues.length; i++) { sqrtEigenValues[i] = Math.sqrt(eigenValues[i]); } components = components.multiply(new DiagonalMatrix(sqrtEigenValues)); } //the eigenvalues and their components are sorted by descending order no need to resort them Integer maxDimensions = kb().getTrainingParameters().getMaxDimensions(); Double variancePercentageThreshold = kb().getTrainingParameters().getVariancePercentageThreshold(); if (variancePercentageThreshold != null && variancePercentageThreshold <= 1) { double sum = 0.0; double totalVariance = StatUtils.sum(eigenValues); int varCounter = 0; for (double l : eigenValues) { sum += l / totalVariance; varCounter++; if (sum >= variancePercentageThreshold) { break; } } if (maxDimensions == null || maxDimensions > varCounter) { maxDimensions = varCounter; } } if (maxDimensions != null && maxDimensions < d) { //keep only the maximum selected eigenvalues double[] newEigenValues = new double[maxDimensions]; System.arraycopy(eigenValues, 0, newEigenValues, 0, maxDimensions); eigenValues = newEigenValues; //keep only the maximum selected eigenvectors components = components.getSubMatrix(0, components.getRowDimension() - 1, 0, maxDimensions - 1); } modelParameters.setRows(components.getRowDimension()); modelParameters.setCols(components.getColumnDimension()); modelParameters.setEigenValues(eigenValues); modelParameters.setComponents(components.getData()); }
From source file:edu.oregonstate.eecs.mcplan.ml.InformationTheoreticMetricLearner.java
/** * Execute the algorithm./* w w w . ja v a 2 s.c o m*/ * @see java.lang.Runnable#run() */ @Override public void run() { System.out.println("\tITML run()"); final RealMatrix A = A0_.copy(); final int[] idx = Fn.range(0, Nc_); int iter_count = 0; final int logging_interval = 1; double cumulative_update = 0.0; while (iter_count++ < iteration_limit_) { if (iter_count % logging_interval == 0) { System.out.println("\tIteration " + iter_count); } Fn.shuffle(rng_, idx); double update_magnitude = 0.0; for (int c = 0; c < Nc_; ++c) { final int[] constraint; final RealVector xdiff; final int delta; if (c < S_.size()) { // constraint = S_.get( c ); xdiff = new ArrayRealVector(S_.get(c)); delta = 1; } else { // constraint = D_.get( c - S_.size() ); xdiff = new ArrayRealVector(D_.get(c - S_.size())); delta = -1; } // final int i = constraint[0]; // final int j = constraint[1]; // final RealVector xdiff = X_.get( i ).subtract( X_.get( j ) ); // final double p = xdiff.dotProduct( A.operate( xdiff ) ); // if( p == 0.0 ) { // System.out.println( "! p == 0.0" ); // } // if( xi_[c] == 0.0 ) { // System.out.println( "! xi_[c] == 0.0" ); // } final double p = HilbertSpace.inner_prod(xdiff, A, xdiff); final double alpha; if (p == 0.0) { alpha = lambda_[c]; } else { alpha = Math.min(lambda_[c], (delta / 2.0) * ((1.0 / p) - (gamma_ / xi_[c]))); } final double beta = (delta * alpha) / (1 - delta * alpha * p); xi_[c] = (gamma_ * xi_[c]) / (gamma_ + delta * alpha * xi_[c]); lambda_[c] -= alpha; // This implements: A += beta * A xdiff xdiff^T A final RealVector Ax = A.operate(xdiff); // final RealMatrix outer_prod = Ax.outerProduct( Ax ); for (int row = 0; row < A.getRowDimension(); ++row) { final double axi = Ax.getEntry(row); for (int col = 0; col < A.getColumnDimension(); ++col) { final double a = axi * Ax.getEntry(col); A.addToEntry(row, col, a * beta); } } // final RealMatrix outer_prod = xdiff.outerProduct( xdiff ); // final RealMatrix update = A.multiply( outer_prod ).multiply( A ).scalarMultiply( beta ); // A = A.add( update ); update_magnitude += alpha * alpha; //update.getFrobeniusNorm(); } cumulative_update += update_magnitude; if (iter_count % logging_interval == 0) { System.out.println("\tupdate = " + (cumulative_update / logging_interval)); cumulative_update = 0.0; } // Declare convergence if all updates were small. if (Math.sqrt(update_magnitude) < convergence_tolerance_) { break; } } A_ = A; // A_ = MatrixAlgorithms.makePositiveDefinite( A, 1e-4 ); // System.out.println( "Metric:" ); // for( int i = 0; i < A_.getRowDimension(); ++i ) { // System.out.println( A_.getRowVector( i ) ); // } // Check for positive-definiteness // final EigenDecomposition eig = new EigenDecomposition( A_ ); // final double det = eig.getDeterminant(); // assert( det > 0.0 ); }
From source file:com.github.tteofili.looseen.yay.SGM.java
/** * perform weights learning from the training examples using (configurable) mini batch gradient descent algorithm * * @param samples the training examples/*from ww w . ja va2s .c om*/ * @return the final cost with the updated weights * @throws Exception if BGD fails to converge or any numerical error happens */ private double learnWeights(Sample... samples) throws Exception { int iterations = 0; double cost = Double.MAX_VALUE; int j = 0; // momentum RealMatrix vb = MatrixUtils.createRealMatrix(biases[0].getRowDimension(), biases[0].getColumnDimension()); RealMatrix vb2 = MatrixUtils.createRealMatrix(biases[1].getRowDimension(), biases[1].getColumnDimension()); RealMatrix vw = MatrixUtils.createRealMatrix(weights[0].getRowDimension(), weights[0].getColumnDimension()); RealMatrix vw2 = MatrixUtils.createRealMatrix(weights[1].getRowDimension(), weights[1].getColumnDimension()); long start = System.currentTimeMillis(); int c = 1; RealMatrix x = MatrixUtils.createRealMatrix(configuration.batchSize, samples[0].getInputs().length); RealMatrix y = MatrixUtils.createRealMatrix(configuration.batchSize, samples[0].getOutputs().length); while (true) { int i = 0; for (int k = j * configuration.batchSize; k < j * configuration.batchSize + configuration.batchSize; k++) { Sample sample = samples[k % samples.length]; x.setRow(i, sample.getInputs()); y.setRow(i, sample.getOutputs()); i++; } j++; long time = (System.currentTimeMillis() - start) / 1000; if (iterations % (1 + (configuration.maxIterations / 100)) == 0 && time > 60 * c) { c += 1; // System.out.println("cost: " + cost + ", accuracy: " + evaluate(this) + " after " + iterations + " iterations in " + (time / 60) + " minutes (" + ((double) iterations / time) + " ips)"); } RealMatrix w0t = weights[0].transpose(); RealMatrix w1t = weights[1].transpose(); RealMatrix hidden = rectifierFunction.applyMatrix(x.multiply(w0t)); hidden.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + biases[0].getEntry(0, column); } @Override public double end() { return 0; } }); RealMatrix scores = hidden.multiply(w1t); scores.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + biases[1].getEntry(0, column); } @Override public double end() { return 0; } }); RealMatrix probs = scores.copy(); int len = scores.getColumnDimension() - 1; for (int d = 0; d < configuration.window - 1; d++) { int startColumn = d * len / (configuration.window - 1); RealMatrix subMatrix = scores.getSubMatrix(0, scores.getRowDimension() - 1, startColumn, startColumn + x.getColumnDimension()); for (int sm = 0; sm < subMatrix.getRowDimension(); sm++) { probs.setSubMatrix(softmaxActivationFunction.applyMatrix(subMatrix.getRowMatrix(sm)).getData(), sm, startColumn); } } RealMatrix correctLogProbs = MatrixUtils.createRealMatrix(x.getRowDimension(), 1); correctLogProbs.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return -Math.log(probs.getEntry(row, getMaxIndex(y.getRow(row)))); } @Override public double end() { return 0; } }); double dataLoss = correctLogProbs.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += value; } @Override public double end() { return d; } }) / samples.length; double reg = 0d; reg += weights[0].walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0d; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += Math.pow(value, 2); } @Override public double end() { return d; } }); reg += weights[1].walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0d; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += Math.pow(value, 2); } @Override public double end() { return d; } }); double regLoss = 0.5 * configuration.regularizationLambda * reg; double newCost = dataLoss + regLoss; if (iterations == 0) { // System.out.println("started with cost = " + dataLoss + " + " + regLoss + " = " + newCost); } if (Double.POSITIVE_INFINITY == newCost) { throw new Exception("failed to converge at iteration " + iterations + " with alpha " + configuration.alpha + " : cost going from " + cost + " to " + newCost); } else if (iterations > 1 && (newCost < configuration.threshold || iterations > configuration.maxIterations)) { cost = newCost; // System.out.println("successfully converged after " + (iterations - 1) + " iterations (alpha:" + configuration.alpha + ",threshold:" + configuration.threshold + ") with cost " + newCost); break; } else if (Double.isNaN(newCost)) { throw new Exception("failed to converge at iteration " + iterations + " with alpha " + configuration.alpha + " : cost calculation underflow"); } // update registered cost cost = newCost; // calculate the derivatives to update the parameters RealMatrix dscores = probs.copy(); dscores.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return (y.getEntry(row, column) == 1 ? (value - 1) : value) / samples.length; } @Override public double end() { return 0; } }); // get derivative on second layer RealMatrix dW2 = hidden.transpose().multiply(dscores); // regularize dw2 dW2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + configuration.regularizationLambda * w1t.getEntry(row, column); } @Override public double end() { return 0; } }); RealMatrix db2 = MatrixUtils.createRealMatrix(biases[1].getRowDimension(), biases[1].getColumnDimension()); dscores.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { db2.setEntry(0, column, db2.getEntry(0, column) + value); } @Override public double end() { return 0; } }); RealMatrix dhidden = dscores.multiply(weights[1]); dhidden.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value < 0 ? 0 : value; } @Override public double end() { return 0; } }); RealMatrix db = MatrixUtils.createRealMatrix(biases[0].getRowDimension(), biases[0].getColumnDimension()); dhidden.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { db.setEntry(0, column, db.getEntry(0, column) + value); } @Override public double end() { return 0; } }); // get derivative on first layer RealMatrix dW = x.transpose().multiply(dhidden); // regularize dW.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + configuration.regularizationLambda * w0t.getEntry(row, column); } @Override public double end() { return 0; } }); RealMatrix dWt = dW.transpose(); RealMatrix dWt2 = dW2.transpose(); if (configuration.useNesterovMomentum) { // update nesterov momentum final RealMatrix vbPrev = vb.copy(); final RealMatrix vb2Prev = vb2.copy(); final RealMatrix vwPrev = vw.copy(); final RealMatrix vw2Prev = vw2.copy(); vb.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); vb2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); vw.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); vw2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vbPrev.getEntry(row, column) + (1 + configuration.mu) * vb.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vb2Prev.getEntry(row, column) + (1 + configuration.mu) * vb2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vwPrev.getEntry(row, column) + (1 + configuration.mu) * vw.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vw2Prev.getEntry(row, column) + (1 + configuration.mu) * vw2.getEntry(row, column); } @Override public double end() { return 0; } }); } else if (configuration.useMomentum) { // update momentum vb.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); vb2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); vw.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); vw2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vb.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vb2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vw.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vw2.getEntry(row, column); } @Override public double end() { return 0; } }); } else { // standard parameter update // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); } iterations++; } return cost; }
From source file:edu.cudenver.bios.power.GLMMPowerCalculator.java
/** * Generate a sample of powers for a design with a random covariate * @param params power parameters//from w w w. ja va 2 s .c o m * @param iterations size of the sample * @return */ private SimulatedPower[] getSimulatedPowerSampleValue(GLMMPowerParameters params, Test test, PowerMethod method, double alpha, double sigmaScale, double betaScale, int sampleSize, double quantile, int iterations) { // scale beta and sigma RealMatrix scaledBeta = params.getBeta().scalarMultiply(betaScale, true); RealMatrix scaledSigmaError = params.getSigmaError().scalarMultiply(sigmaScale); // get random errors RandomErrorMatrix randomErrors = new RandomErrorMatrix( MatrixUtils.getTotalSampleSize(params.getDesignEssence(), sampleSize), scaledBeta.getColumnDimension(), scaledSigmaError); randomErrors.setPositivityThreshold(positivityThreshold); randomErrors.setSymmetryThreshold(symmetryThreshold); SimulatedPower[] simPower = new SimulatedPower[iterations]; for (int gInstance = 0; gInstance < iterations; gInstance++) { // force a new realization of the design matrix (i.e. a new covariate column) RealMatrix X = MatrixUtils.getFullDesignMatrix(params.getDesignEssence(), params.getSigmaGaussianRandom(), sampleSize, seed); RealMatrix XtXinverse = new LUDecomposition(X.transpose().multiply(X)).getSolver().getInverse(); int rejectionCount = 0; simPower[gInstance] = new SimulatedPower(scaledBeta.getRowDimension(), scaledBeta.getColumnDimension()); for (int i = 0; i < SIMULATION_ITERATIONS_QUANTILE_UNCONDITIONAL; i++) { SimulationFit fit = simulateAndFitModel(params, test, X, XtXinverse, scaledBeta, randomErrors, sampleSize, alpha); if (fit.Pvalue <= alpha) rejectionCount++; simPower[gInstance].accumulateBeta(fit.beta); } simPower[gInstance] .setPower(((double) rejectionCount) / ((double) SIMULATION_ITERATIONS_QUANTILE_UNCONDITIONAL)); } return simPower; }
From source file:com.bolatu.gezkoncsvlogger.GyroOrientation.RotationKalmanFilter.java
/** * Creates a new Kalman filter with the given process and measurement * models./* w w w .j a va 2 s . com*/ * * @param process * the model defining the underlying process dynamics * @param measurement * the model defining the given measurement characteristics * @throws NullArgumentException * if any of the given inputs is null (except for the control * matrix) * @throws NonSquareMatrixException * if the transition matrix is non square * @throws DimensionMismatchException * if the column dimension of the transition matrix does not * match the dimension of the initial state estimation vector * @throws MatrixDimensionMismatchException * if the matrix dimensions do not fit together */ public RotationKalmanFilter(final ProcessModel process, final MeasurementModel measurement) throws NullArgumentException, NonSquareMatrixException, DimensionMismatchException, MatrixDimensionMismatchException { MathUtils.checkNotNull(process); MathUtils.checkNotNull(measurement); this.processModel = process; this.measurementModel = measurement; transitionMatrix = processModel.getStateTransitionMatrix(); MathUtils.checkNotNull(transitionMatrix); transitionMatrixT = transitionMatrix.transpose(); // create an empty matrix if no control matrix was given if (processModel.getControlMatrix() == null) { controlMatrix = new Array2DRowRealMatrix(); } else { controlMatrix = processModel.getControlMatrix(); } measurementMatrix = measurementModel.getMeasurementMatrix(); MathUtils.checkNotNull(measurementMatrix); measurementMatrixT = measurementMatrix.transpose(); // check that the process and measurement noise matrices are not null // they will be directly accessed from the model as they may change // over time RealMatrix processNoise = processModel.getProcessNoise(); MathUtils.checkNotNull(processNoise); RealMatrix measNoise = measurementModel.getMeasurementNoise(); MathUtils.checkNotNull(measNoise); // set the initial state estimate to a zero vector if it is not // available from the process model if (processModel.getInitialStateEstimate() == null) { stateEstimation = new ArrayRealVector(transitionMatrix.getColumnDimension()); } else { stateEstimation = processModel.getInitialStateEstimate(); } if (transitionMatrix.getColumnDimension() != stateEstimation.getDimension()) { throw new DimensionMismatchException(transitionMatrix.getColumnDimension(), stateEstimation.getDimension()); } // initialize the error covariance to the process noise if it is not // available from the process model if (processModel.getInitialErrorCovariance() == null) { errorCovariance = processNoise.copy(); } else { errorCovariance = processModel.getInitialErrorCovariance(); } // sanity checks, the control matrix B may be null // A must be a square matrix if (!transitionMatrix.isSquare()) { throw new NonSquareMatrixException(transitionMatrix.getRowDimension(), transitionMatrix.getColumnDimension()); } // row dimension of B must be equal to A // if no control matrix is available, the row and column dimension will // be 0 if (controlMatrix != null && controlMatrix.getRowDimension() > 0 && controlMatrix.getColumnDimension() > 0 && controlMatrix.getRowDimension() != transitionMatrix.getRowDimension()) { throw new MatrixDimensionMismatchException(controlMatrix.getRowDimension(), controlMatrix.getColumnDimension(), transitionMatrix.getRowDimension(), controlMatrix.getColumnDimension()); } // Q must be equal to A MatrixUtils.checkAdditionCompatible(transitionMatrix, processNoise); // column dimension of H must be equal to row dimension of A if (measurementMatrix.getColumnDimension() != transitionMatrix.getRowDimension()) { throw new MatrixDimensionMismatchException(measurementMatrix.getRowDimension(), measurementMatrix.getColumnDimension(), measurementMatrix.getRowDimension(), transitionMatrix.getRowDimension()); } // row dimension of R must be equal to row dimension of H if (measNoise.getRowDimension() != measurementMatrix.getRowDimension()) { throw new MatrixDimensionMismatchException(measNoise.getRowDimension(), measNoise.getColumnDimension(), measurementMatrix.getRowDimension(), measNoise.getColumnDimension()); } }