List of usage examples for org.apache.commons.math3.linear BlockRealMatrix BlockRealMatrix
public BlockRealMatrix(final double[][] rawData) throws DimensionMismatchException, NotStrictlyPositiveException
From source file:edu.cmu.tetrad.util.TetradMatrix.java
public TetradMatrix(double[][] data) { if (data.length == 0) { this.apacheData = new Array2DRowRealMatrix(); } else {/*www .ja va 2s . c o m*/ // this.apacheData = new OpenMapRealMatrix(data.length, data[0].length); // // for (int i = 0; i < data.length; i++) { // for (int j = 0; j < data[0].length; j++) { // apacheData.setEntry(i, j, data[i][j]); // } // } this.apacheData = new BlockRealMatrix(data); } this.m = data.length; this.n = m == 0 ? 0 : data[0].length; }
From source file:edu.cmu.tetrad.util.TetradMatrix1.java
public TetradMatrix1(double[][] data) { if (data.length == 0) { this.apacheData = new Array2DRowRealMatrix(); } else {//from w ww.jav a2 s.c om // this.apacheData = new OpenMapRealMatrix(data.length, data[0].length); // // for (int i = 0; i < data.length; i++) { // for (int j = 0; j < data[0].length; j++) { // apacheData.setEntry(i, j, data[i][j]); // } // } this.apacheData = new BlockRealMatrix(data); } this.m = data.length; this.n = m == 0 ? 0 : data[0].length; }
From source file:com.datumbox.opensource.examples.DPMMExample.java
/** * Demo of Dirichlet Process Mixture Model with Gaussian *//*from w w w . j ava 2 s .c om*/ public static void GDPMM() { System.out.println("Gaussian DPMM"); //Data points to cluster List<Point> pointList = new ArrayList<>(); //cluster 1 pointList.add(new Point(0, new ArrayRealVector(new double[] { 5.0, 1.0 }))); pointList.add(new Point(1, new ArrayRealVector(new double[] { 5.1, 1.1 }))); pointList.add(new Point(2, new ArrayRealVector(new double[] { 4.9, 0.9 }))); //cluster 2 pointList.add(new Point(3, new ArrayRealVector(new double[] { 15.0, 11.0 }))); pointList.add(new Point(4, new ArrayRealVector(new double[] { 15.1, 11.1 }))); pointList.add(new Point(5, new ArrayRealVector(new double[] { 14.9, 10.9 }))); //cluster 3 pointList.add(new Point(6, new ArrayRealVector(new double[] { 1.0, 5.0 }))); pointList.add(new Point(7, new ArrayRealVector(new double[] { 1.1, 5.1 }))); pointList.add(new Point(8, new ArrayRealVector(new double[] { 0.9, 4.9 }))); //Dirichlet Process parameter Integer dimensionality = 2; double alpha = 1.0; //Hyper parameters of Base Function int kappa0 = 0; int nu0 = 1; RealVector mu0 = new ArrayRealVector(new double[] { 0.0, 0.0 }); RealMatrix psi0 = new BlockRealMatrix(new double[][] { { 1.0, 0.0 }, { 0.0, 1.0 } }); //Create a DPMM object DPMM dpmm = new GaussianDPMM(dimensionality, alpha, kappa0, nu0, mu0, psi0); int maxIterations = 100; int performedIterations = dpmm.cluster(pointList, maxIterations); if (performedIterations < maxIterations) { System.out.println("Converged in " + String.valueOf(performedIterations)); } else { System.out.println("Max iterations of " + String.valueOf(performedIterations) + " reached. Possibly did not converge."); } //get a list with the point ids and their assignments Map<Integer, Integer> zi = dpmm.getPointAssignments(); System.out.println(zi.toString()); }
From source file:com.clust4j.algo.preprocess.PCA.java
@Override public double[][] transform(double[][] data) { checkFit();/*from w w w .ja v a2s. c o m*/ MatUtils.checkDimsForUniformity(data); double[][] x = this.centerer.transform(data); // use block because it's faster for multiplication of potentially large matrices BlockRealMatrix X = new BlockRealMatrix(x); BlockRealMatrix transformed = X.multiply(this.components.transpose()); return transformed.getData(); }
From source file:edu.cmu.tetrad.util.MatrixUtils1.java
public static double[][] pseudoInverse(double[][] x) { SingularValueDecomposition svd = new SingularValueDecomposition(new BlockRealMatrix(x)); RealMatrix U = svd.getU();//from w w w .ja v a 2s . c o m RealMatrix V = svd.getV(); RealMatrix S = svd.getS(); for (int i = 0; i < S.getRowDimension(); i++) { for (int j = 0; j < S.getColumnDimension(); j++) { double v = S.getEntry(i, j); S.setEntry(i, j, v == 0 ? 0.0 : 1.0 / v); } } return V.multiply(S.multiply(U.transpose())).getData(); }
From source file:com.itemanalysis.psychometrics.mixture.MvNormalComponentDistributionTest.java
/** * This test computes mean vector and covariance matrix for a single component mixture model. * The estimates should be the same as the sample mean and covariance. *//*from ww w.java 2 s .c o m*/ // @Test public void mixModel2Test() { System.out.println("Mixture model test with Two variables and two components"); /** * x contains 2,000 cases and three variables. * The first two variables are continuous data. The last variable is a group indicator variable */ double[][] x = testData.getMixtureData2(); BlockRealMatrix allData = new BlockRealMatrix(x); int nrow = allData.getRowDimension(); BlockRealMatrix group1Data = allData.getSubMatrix(0, 799, 0, 1); BlockRealMatrix group2Data = allData.getSubMatrix(800, 1999, 0, 1); VectorialMean trueMean1 = new VectorialMean(2); VectorialMean trueMean2 = new VectorialMean(2); Covariance trueCov1 = new Covariance(group1Data); Covariance trueCov2 = new Covariance(group2Data); for (int i = 0; i < group1Data.getRowDimension(); i++) { trueMean1.increment(group1Data.getRow(i)); } for (int i = 0; i < group2Data.getRowDimension(); i++) { trueMean2.increment(group2Data.getRow(i)); } double[] trueMeanGroup1 = trueMean1.getResult(); double[] trueMeanGroup2 = trueMean2.getResult(); /** * actual data in first two columns */ BlockRealMatrix data = allData.getSubMatrix(0, nrow - 1, 0, 1); //run two group mixture model int startGroup = 2; int endGroup = 2; int maxIter = 500; double converge = 1e-12; int starts = 100; MvNormalMixtureModel model = null; model = new MvNormalMixtureModel(data, endGroup); model.setModelConstraints(true, true, true, true); model.setEmOptions(maxIter, converge, starts); model.call(); // System.out.println(model.printResults()); double[] pi = new double[2]; pi[0] = model.getMixingProportion(0); pi[1] = model.getMixingProportion(1); int[] index = { 0, 1 }; //group 1 should be the smaller group. If not, reverse indexes. if (pi[0] > pi[1]) { index[0] = 1; index[1] = 0; } //check convergence System.out.println(" Testing convergence..."); assertTrue("Convergence test", model.converged()); //check mixing proportions System.out.println(" Testing proportions..."); assertEquals("Proportion 1 test: ", 0.40, pi[index[0]], 5e-2); assertEquals("Proportion 2 test: ", 0.60, pi[index[1]], 5e-2); //get estimated parameters double[] estMeanGroup1 = model.getMean(index[0]); double[] estMeanGroup2 = model.getMean(index[1]); double[][] estCovGroup1 = model.getCov(index[0]); double[][] estCovGroup2 = model.getCov(index[1]); //check mean vector for each group System.out.println(" Testing means..."); assertEquals("Mean (1,1) test", trueMeanGroup1[index[0]], estMeanGroup1[index[0]], 5e-2); assertEquals("Mean (1,2) test", trueMeanGroup1[index[1]], estMeanGroup1[index[1]], 5e-2); assertEquals("Mean (2,1) test", trueMeanGroup2[index[0]], estMeanGroup2[index[0]], 5e-2); assertEquals("Mean (2,2) test", trueMeanGroup2[index[1]], estMeanGroup2[index[1]], 5e-2); //check covariance matrix for group 1 System.out.println(" Testing covariances..."); double[][] trueCovGroup1 = trueCov1.getCovarianceMatrix().getData(); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { assertEquals("CovGroup1[" + i + "," + j + "] test:", trueCovGroup1[i][j], estCovGroup1[i][j], 5e-2); } } //check covariance matrix for group 2 double[][] trueCovGroup2 = trueCov2.getCovarianceMatrix().getData(); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { assertEquals("CovGroup2[" + i + "," + j + "] test:", trueCovGroup2[i][j], estCovGroup2[i][j], 5e-2); } } }
From source file:edu.cmu.tetrad.util.ApacheTetradMatrix.java
/** * Adds semantic checks to the default deserialization method. This method * must have the standard signature for a readObject method, and the body of * the method must begin with "s.defaultReadObject();". Other than that, any * semantic checks can be specified and do not need to stay the same from * version to version. A readObject method of this form may be added to any * class, even if Tetrad sessions were previously saved out using a version * of the class that didn't include it. (That's what the * "s.defaultReadObject();" is for. See J. Bloch, Effective Java, for help. * * @throws java.io.IOException//ww w . ja v a2 s. c o m * @throws ClassNotFoundException */ private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); if (this.data != null) { double[][] d = data.toArray(); if (d.length == 0) { this.apacheData = new Array2DRowRealMatrix(); } else { this.apacheData = new BlockRealMatrix(d); } this.data = null; } }
From source file:com.datumbox.framework.machinelearning.featureselection.continuous.PCA.java
@Override protected void filterFeatures(Dataset newdata) { ModelParameters modelParameters = knowledgeBase.getModelParameters(); //convert data into matrix Map<Object, Integer> feature2ColumnId = modelParameters.getFeature2ColumnId(); MatrixDataset matrixDataset = MatrixDataset.parseDataset(newdata, feature2ColumnId); RealMatrix X = matrixDataset.getX(); /*/* w w w. j a v a 2s .c om*/ //subtracting means double[] meanValues = modelParameters.getMean(); int n = newdata.size(); int cols = feature2ColumnId.size(); for(int row=0;row<n;++row) { for(int columnId=0;columnId<cols;++columnId) { X.addToEntry(row, columnId, -meanValues[columnId]); //inplace subtraction!!! } } */ RealMatrix components = new BlockRealMatrix(modelParameters.getComponents()); //multiplying the data with components X = X.multiply(components); Dataset transformedDataset = new Dataset(); for (Record r : newdata) { Integer recordId = r.getId(); Record newR = new Record(); int componentId = 0; for (double value : X.getRow(recordId)) { newR.getX().put(componentId, value); ++componentId; } newR.setY(r.getY()); transformedDataset.add(newR); } newdata.clear(); newdata.merge(transformedDataset); }
From source file:gamlss.smoothing.PB.java
/** * The main fitting method, initiate appropriate smoothing * function according to incoming parameters. * @param additiveFit -object of AdditiveFoit class * @return reiduals/* www.j a v a2 s .com*/ */ //gamlss.pb <- function(x, y, w, xeval = NULL, ...) public ArrayRealVector solve(final AdditiveFit additiveFit) { Double lambda = Controls.LAMBDAS_USER_DEFINED; colNum = additiveFit.getColNum(); y = additiveFit.getZ(); w = additiveFit.getW(); whichDistParameter = additiveFit.getWhichDistParameter(); basisM = (BlockRealMatrix) getBasisM().get(colNum); penaltyM = (BlockRealMatrix) getPenaltyM().get(colNum); df = (Integer) getDfValues().get(colNum); //n <- nrow(X) # the no of observations n = basisM.getRowDimension(); //lambdaS <- get(startLambdaName, envir=gamlss.env) //geting the starting value lambdaS = getLambdas().get(whichDistParameter); //if (lambdaS>=1e+07) lambda <- 1e+07 if (lambdaS >= 1e+07) { lambda = 1e+07; } //if (lambdaS<=1e-07) lambda <- 1e-07 if (lambdaS <= 1e-07) { lambda = 1e-07; } //if (is.null(df)&&!is.null(lambda)||!is.null(df)&&!is.null(lambda)) if (lambda != null) { //fit <- regpen(y, X, w, lambda, D) beta = regpen(lambda); //fv <- X %*% fit$beta fv = (ArrayRealVector) basisM.operate(beta); //else if (is.null(df)&&is.null(lambda)) } else if (df == null) { //lambda <- lambdaS lambda = lambdaS; //switch(control$c," ML"={ switch (Controls.SMOOTH_METHOD) { case DistributionSettings.GAIC: fv = functionGAIC(lambda); break; case DistributionSettings.ML: fv = functionML(lambda); break; case DistributionSettings.ML1: fv = functionML1(lambda); break; case DistributionSettings.EM: System.err.println("EM has not been implemented yet"); break; case DistributionSettings.GCV: fv = functionGCV(lambda); break; default: System.err.println( " Cannot recognise the " + "smoothing method or it has" + "not been implemented yet"); } } else { //QR <- qr(sqrt(w)*X) //Rinv <- solve(qr.R(QR)) rM = (BlockRealMatrix) new QRDecomposition( MatrixFunctions.multVectorMatrix(MatrixFunctions.sqrtVec(w), basisM)).getR(); rM = rM.getSubMatrix(0, rM.getColumnDimension() - 1, 0, rM.getColumnDimension() - 1); rMinv = (BlockRealMatrix) new QRDecomposition(rM).getSolver().getInverse(); //S <- t(D)%*%D sM = penaltyM.transpose().multiply(penaltyM); //UDU <- eigen(t(Rinv)%*%S%*%Rinv) uduV = new EigenDecomposition(rMinv.transpose().multiply(sM).multiply(rMinv)).getRealEigenvalues(); //lambda <- if (sign(edf1_df(0))==sign(edf1_df(100000))) 100000 //in case they have the some sign edfTemp1 = edf1_df(0, df); edfTemp2 = edf1_df(100000.0, df); if (FastMath.signum(edfTemp1) == FastMath.signum(edfTemp2)) { lambda = 100000.0; } else { //else uniroot(edf1_df, c(0,100000))$root uniRootObj.setDf(df); final double relativeAccuracy = 1.0e-12; final double absoluteAccuracy = 1.0e-8; UnivariateSolver uniRootSolver = new BrentSolver(relativeAccuracy, absoluteAccuracy); lambda = uniRootSolver.solve(1000, uniRootObj, 0.0, 100000.0); } //fit <- regpen(y, X, w, lambda, D) beta = regpen(lambda); fv = (ArrayRealVector) basisM.operate(beta); } if (Controls.IF_NEED_THIS) { //waug <- as.vector(c(w, rep(1,nrow(D)))) waug = w.append(MatrixFunctions.repV(1.0, penaltyM.getRowDimension())); //xaug <- as.matrix(rbind(X,sqrt(lambda)*D)) xaug = MatrixFunctions.appendMatricesRows(basisM, (BlockRealMatrix) penaltyM.scalarMultiply(FastMath.sqrt(lambda))); //lev <- hat(sqrt(waug)*xaug,intercept=FALSE)[1:n] //get the hat matrix lev = (ArrayRealVector) MatrixFunctions.getMainDiagonal(new BlockRealMatrix(MatrixFunctions .calculateHat(MatrixFunctions.multVectorMatrix(MatrixFunctions.sqrtVec(waug), xaug)).getData())) .getSubVector(0, n); //lev <- (lev-.hat.WX(w,x)) //subtract the linear since is already fitted lev = lev.subtract( hatWX((ArrayRealVector) getSmootherMatrices().get(whichDistParameter).getColumnVector(colNum))); // var <- lev/w //the variance of the smootherz var = lev.ebeDivide(w); } //if (is.null(xeval)) # if no prediction if (Controls.XEVAL_USER_DEFINED == null) { // Residuals return y.subtract(fv); //else # for prediction } else { //ll <- dim(as.matrix(attr(x,"X")))[1] //nx <- as.matrix(attr(x,"X"))[seq(length(y)+1,ll),] tempArr = ArithmeticSeries.getSeries(y.getDimension() + 1, basisM.getRowDimension(), 1); BlockRealMatrix nx = new BlockRealMatrix(tempArr.length, basisM.getColumnDimension()); for (int i = 0; i < tempArr.length; i++) { nx.setRowVector(i, basisM.getRowVector((int) tempArr[i])); } //pred <- drop(nx %*% fit$beta) ArrayRealVector pred = (ArrayRealVector) nx.operate(beta); // Residuals return y.subtract(fv); } }
From source file:com.datumbox.framework.core.machinelearning.featureselection.continuous.PCA.java
/** {@inheritDoc} */ @Override//from w ww .j a va 2 s .c om protected void filterFeatures(Dataframe dataset) { ModelParameters modelParameters = kb().getModelParameters(); //convert data into matrix Map<Object, Integer> featureIds = modelParameters.getFeatureIds(); Map<Integer, Integer> recordIdsReference = new HashMap<>(); MatrixDataframe matrixDataset = MatrixDataframe.parseDataset(dataset, recordIdsReference, featureIds); RealMatrix components = new BlockRealMatrix(modelParameters.getComponents()); //multiplying the data with components final RealMatrix X = matrixDataset.getX().multiply(components); streamExecutor.forEach(StreamMethods.stream(dataset.entries(), isParallelized()), e -> { Integer rId = e.getKey(); Record r = e.getValue(); int rowId = recordIdsReference.get(rId); AssociativeArray xData = new AssociativeArray(); int componentId = 0; for (double value : X.getRow(rowId)) { xData.put(componentId++, value); } Record newR = new Record(xData, r.getY(), r.getYPredicted(), r.getYPredictedProbabilities()); //we call below the recalculateMeta() dataset._unsafe_set(rId, newR); }); //recordIdsReference = null; //matrixDataset = null; dataset.recalculateMeta(); }