List of usage examples for weka.core.matrix Matrix set
public void set(int i, int j, double s)
From source file:adams.core.discovery.genetic.WekaGeneticHelper.java
License:Open Source License
/** * Convert bit string into weka Matrix/*from w w w .j a v a2s . co m*/ * @param bits * @param min * @param max * @param numBits * @param splits * @param rows * @param columns * @return */ public static Matrix bitsToMatrix(String bits, double min, double max, int numBits, int splits, int rows, int columns) { Matrix m = new Matrix(rows, columns); for (int row = 0; row < rows; row++) { for (int column = 0; column < columns; column++) { int start = (row * columns * numBits) + (column * numBits); double j = 0; for (int i = start; i < start + numBits; i++) { if (bits.charAt(i) == '1') { j = j + Math.pow(2, start + numBits - i - 1); } } j = Math.min(j, splits); double val = (min + j * ((max - min) / (double) (splits - 1))); m.set(row, column, val); } } return m; }
From source file:adams.data.instancesanalysis.pls.PLS1.java
License:Open Source License
/** * Transforms the data, initializes if necessary. * * @param data the data to use//from w w w .j a v a2s.c om */ protected Instances doTransform(Instances data, Map<String, Object> params) throws Exception { Matrix X, X_trans; Matrix y; Matrix W, w; Matrix T, t, t_trans; Matrix P, p, p_trans; double b; Matrix b_hat; int j; Matrix tmp; Instances result; // initialization if (!isInitialized()) { // split up data X = MatrixHelper.getX(data); y = MatrixHelper.getY(data); X_trans = X.transpose(); // init W = new Matrix(data.numAttributes() - 1, getNumComponents()); P = new Matrix(data.numAttributes() - 1, getNumComponents()); T = new Matrix(data.numInstances(), getNumComponents()); b_hat = new Matrix(getNumComponents(), 1); for (j = 0; j < getNumComponents(); j++) { // 1. step: wj w = X_trans.times(y); MatrixHelper.normalizeVector(w); MatrixHelper.setVector(w, W, j); // 2. step: tj t = X.times(w); t_trans = t.transpose(); MatrixHelper.setVector(t, T, j); // 3. step: ^bj b = t_trans.times(y).get(0, 0) / t_trans.times(t).get(0, 0); b_hat.set(j, 0, b); // 4. step: pj p = X_trans.times(t).times(1 / t_trans.times(t).get(0, 0)); p_trans = p.transpose(); MatrixHelper.setVector(p, P, j); // 5. step: Xj+1 X = X.minus(t.times(p_trans)); y = y.minus(t.times(b)); } // W*(P^T*W)^-1 tmp = W.times(((P.transpose()).times(W)).inverse()); // factor = W*(P^T*W)^-1 * b_hat m_r_hat = tmp.times(b_hat); // save matrices m_P = P; m_W = W; m_b_hat = b_hat; result = predict(data); } // prediction else { result = predict(data); } return result; }
From source file:Classifier.supervised.LinearRegression.java
License:Open Source License
/** * Calculate a linear regression using the selected attributes * * @param selectedAttributes an array of booleans where each element * is true if the corresponding attribute should be included in the * regression./*from www .j ava 2 s . c o m*/ * @return an array of coefficients for the linear regression model. * @throws Exception if an error occurred during the regression. */ protected double[] doRegression(boolean[] selectedAttributes) throws Exception { if (m_Debug) { System.out.print("doRegression("); for (int i = 0; i < selectedAttributes.length; i++) { System.out.print(" " + selectedAttributes[i]); } System.out.println(" )"); } int numAttributes = 0; for (int i = 0; i < selectedAttributes.length; i++) { if (selectedAttributes[i]) { numAttributes++; } } // Check whether there are still attributes left Matrix independent = null, dependent = null; if (numAttributes > 0) { independent = new Matrix(m_TransformedData.numInstances(), numAttributes); dependent = new Matrix(m_TransformedData.numInstances(), 1); for (int i = 0; i < m_TransformedData.numInstances(); i++) { Instance inst = m_TransformedData.instance(i); double sqrt_weight = Math.sqrt(inst.weight()); int column = 0; for (int j = 0; j < m_TransformedData.numAttributes(); j++) { if (j == m_ClassIndex) { dependent.set(i, 0, inst.classValue() * sqrt_weight); } else { if (selectedAttributes[j]) { double value = inst.value(j) - m_Means[j]; // We only need to do this if we want to // scale the input if (!m_checksTurnedOff) { value /= m_StdDevs[j]; } independent.set(i, column, value * sqrt_weight); column++; } } } } } // Compute coefficients (note that we have to treat the // intercept separately so that it doesn't get affected // by the ridge constant.) double[] coefficients = new double[numAttributes + 1]; if (numAttributes > 0) { double[] coeffsWithoutIntercept = independent.regression(dependent, m_Ridge).getCoefficients(); System.arraycopy(coeffsWithoutIntercept, 0, coefficients, 0, numAttributes); } coefficients[numAttributes] = m_ClassMean; // Convert coefficients into original scale int column = 0; for (int i = 0; i < m_TransformedData.numAttributes(); i++) { if ((i != m_TransformedData.classIndex()) && (selectedAttributes[i])) { // We only need to do this if we have scaled the // input. if (!m_checksTurnedOff) { coefficients[column] /= m_StdDevs[i]; } // We have centred the input coefficients[coefficients.length - 1] -= coefficients[column] * m_Means[i]; column++; } } return coefficients; }
From source file:cyber009.ann.ANN.java
public void weightFindMatrix() { Matrix X = new Matrix(v.X); Matrix Y = new Matrix(v.D, 1); Matrix W = new Matrix(v.N, 1); for (int d = 0; d < v.D; d++) { Y.set(d, 0, v.TARGET[d]); }/*w w w.jav a 2 s .c o m*/ for (int n = 0; n < v.N; n++) { W.set(n, 0, 0.0); //W.set(n, 0, v.WEIGHT[n]); } Matrix temp = X.transpose().times(X); // System.out.println(temp.toString()); temp = temp.inverse().times(X.transpose()); // System.out.println(temp.toString()); temp = temp.times(Y); //System.out.println(temp.toString()); W = temp; for (int n = 0; n <= v.N; n++) { v.WEIGHT[n] = W.get(n, 0); } //System.out.println(YI.toString()); }
From source file:meka.classifiers.multilabel.PLST.java
License:Open Source License
/** * The method to transform the labels into another set of latent labels, * typically a compression method is used, e.g., Boolean matrix decomposition * in the case of MLC-BMaD, or matrix multiplication based on SVD for PLST. * * @param D the instances to transform into new instances with transformed labels. The * Instances consist of features and original labels. * @return The resulting instances. Instances consist of features and transformed labels. *//*w w w. ja v a2 s.co m*/ @Override public Instances transformLabels(Instances D) throws Exception { Instances features = this.extractPart(D, false); Instances labels = this.extractPart(D, true); Matrix labelMatrix = MatrixUtils.instancesToMatrix(labels); // first, lets do the preprocessing as in the original implementation double[] averages = new double[labels.numAttributes()]; for (int i = 0; i < labels.numAttributes(); i++) { double[] column = labels.attributeToDoubleArray(i); double sum = 0.0; for (int j = 0; j < column.length; j++) { if (column[j] == 1.0) { sum += 1.0; } else { sum += -1; // The algorithm needs 1/-1 coding, so let's // change the matrix here labelMatrix.set(j, i, -1.0); } } averages[i] = sum / column.length; } double[][] shiftMatrix = new double[1][labels.numAttributes()]; shiftMatrix[0] = averages; // remember shift for prediction this.m_Shift = new Matrix(shiftMatrix); double[][] shiftTrainMatrix = new double[labels.numInstances()][labels.numAttributes()]; for (int i = 0; i < labels.numInstances(); i++) { shiftTrainMatrix[i] = averages; } Matrix trainShift = new Matrix(shiftTrainMatrix); SingularValueDecomposition svd = new SingularValueDecomposition(labelMatrix.minus(trainShift)); // The paper uses U here, but the implementation by the authors uses V, so // we used V here too. m_v = svd.getV(); //remove columns so only size are left double[][] newArr = new double[m_v.getRowDimension()][this.getSize()]; for (int i = 0; i < newArr.length; i++) { for (int j = 0; j < newArr[i].length; j++) { newArr[i][j] = m_v.getArray()[i][j]; } } m_v = new Matrix(newArr); // now the multiplication (last step of the algorithm) Matrix compressed = MatrixUtils.instancesToMatrix(labels).times(this.m_v); // and transform it to Instances ArrayList<Attribute> attinfos = new ArrayList<Attribute>(); for (int i = 0; i < compressed.getColumnDimension(); i++) { Attribute att = new Attribute("att" + i); attinfos.add(att); } // create pattern instances (also used in prediction) note: this is a regression // problem now, labels are not binary this.m_PatternInstances = new Instances("compressedlabels", attinfos, compressed.getRowDimension()); // fill result Instances Instances result = Instances.mergeInstances(MatrixUtils.matrixToInstances(compressed, m_PatternInstances), features); result.setClassIndex(this.getSize()); return result; }
From source file:mulan.classifier.meta.MLCSSP.java
License:Open Source License
@Override protected MultiLabelOutput makePredictionInternal(Instance instance) { try {//from w w w . ja v a 2 s . c o m Instance transformed = css.transformInstance(instance); MultiLabelOutput out = baseLearner.makePrediction(transformed); double[] confidences = out.getConfidences(); // make response matrix Matrix conf = new Matrix(kappa, 1); for (int i = 0; i < kappa; i++) { conf.set(i, 0, confidences[i]); } // compute projected classifier response Matrix projectedResponse = conf.transpose().times(css.getProjectionMatrix()); boolean[] projected_bipartition = new boolean[projectedResponse.getColumnDimension()]; double[] projected_confidences = new double[projectedResponse.getColumnDimension()]; for (int i = 0; i < projectedResponse.getColumnDimension(); i++) { projected_confidences[i] = projectedResponse.get(0, i); projected_bipartition[i] = (Math.ceil(projected_confidences[i]) == 1) ? true : false; } // return mlo MultiLabelOutput mlo = new MultiLabelOutput(projected_bipartition, projected_confidences); return mlo; } catch (InvalidDataException ex) { Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex); } catch (ModelInitializationException ex) { Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex); } catch (Exception ex) { Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex); } return null; }
From source file:mulan.transformations.ColumnSubsetSelection.java
License:Open Source License
public MultiLabelInstances transform(MultiLabelInstances data, int kappa, long seed) { try {// w ww.j av a2 s . c om if (kappa >= data.getNumLabels()) { throw new MulanRuntimeException( "Dimensionality reduction parameter should not exceed or be equal to the total count of labels!"); } // integer indices of physical label assignments int[] labelIndices = data.getLabelIndices(); int[] indices = new int[labelIndices.length]; System.arraycopy(labelIndices, 0, indices, 0, labelIndices.length); // load label indicator matrix in a Matrix object double[][] datmatrix = new double[data.getDataSet().numInstances()][labelIndices.length]; Matrix mat = new Matrix(datmatrix); for (int i = 0; i < data.getDataSet().numInstances(); i++) { Instance instance = data.getDataSet().instance(i); for (int j = 0; j < labelIndices.length; j++) { mat.set(i, j, Double.parseDouble(instance.toString(labelIndices[j]))); //DEBUG: System.out.print("" + Double.parseDouble(instance.toString(labelIndices[j])) + ","); } } // make private copy of the label matrix this.Y = mat; // compute eigenvalue analysis of label indicator matrix SingularValueDecomposition svd = new SingularValueDecomposition(mat); //DEBUG: System.out.println("rows = " + svd.getV().getRowDimension() + ", cols = " + svd.getV().getColumnDimension()); assert (svd.getV().getRowDimension() == svd.getV().getColumnDimension()); Matrix rVec = svd.getV(); Matrix Vk = new Matrix(new double[svd.getV().getRowDimension()][kappa]); // snippet (2) for (int i = 0; i < kappa; i++) { for (int j = 0; j < svd.getV().getColumnDimension(); j++) { Vk.set(j, i, rVec.get(i, j)); } } // compute column selection probabilitites double[] selectionProbabilities = new double[Vk.getRowDimension()]; double[] selectionProbabilitiesCDF = new double[Vk.getRowDimension()]; for (int i = 0; i < Vk.getRowDimension(); i++) { selectionProbabilities[i] = 0.0; for (int j = 0; j < kappa; j++) { selectionProbabilities[i] += Math.pow(Vk.get(i, j), 2); } selectionProbabilities[i] = Math.sqrt(selectionProbabilities[i]); } // normalize probabilities double psum = 0.0; for (int i = 0; i < Vk.getRowDimension(); i++) { psum += selectionProbabilities[i]; //System.out.println("psum = " + psum); } //System.out.println("psum = " + psum); //assert (psum != 0 && psum == 1.0); // must be non-zero and unitary for (int i = 0; i < Vk.getRowDimension(); i++) { selectionProbabilities[i] /= psum; } psum = 0.0; for (int i = 0; i < Vk.getRowDimension(); i++) { psum += selectionProbabilities[i]; selectionProbabilitiesCDF[i] = psum; } // add selected columns on a linked list sampledIndiceSet = new java.util.HashSet(); // run column-sampling loop int sampling_count = 0; Random generator = new Random(seed); while (sampledIndiceSet.size() < kappa) // ...loop until knapsack gets filled... { // pick a random number //DEBUG: //double roulette = generator.nextDouble() * 0.5; double roulette = generator.nextDouble(); // seek closest match according to sampling probabilities int closest_match = -1; // iterate label cols for (int i = 0; i < Vk.getRowDimension(); i++) { if (roulette < selectionProbabilitiesCDF[i]) // ...spot a possible match... { // ...if so, select and quit scope... closest_match = i; // BEWARE! "i" is an index over the label enumeration, not an ordering index! break; } } // if we stepped on the flag, something serious is going on! assert (closest_match != -1); // see if column was selected; if not, add it if (!sampledIndiceSet.contains((Object) closest_match)) { sampledIndiceSet.add((Object) closest_match); //System.out.println("DEBUG(CSSP): Added column " + closest_match + " to the sampled column set!"); } sampling_count += 1; } System.out.println("Sampling loop completed in " + sampling_count + " runs."); // compute indices-to-remove array indicesToRemove = new int[labelIndices.length - sampledIndiceSet.size()]; // compute all **PHYSICAL** (not VIRTUAL) indices of label columns for CSSP to remove int idx = 0; for (int i = 0; i < labelIndices.length; i++) { if (!sampledIndiceSet.contains((Object) i)) { indicesToRemove[idx] = indices[i]; idx += 1; } } // apply CSSP: select columns to remove int[] selectedIndicesObj = indicesToRemove.clone(); selectedIndicesInt = new int[selectedIndicesObj.length]; for (int i = 0; i < selectedIndicesObj.length; i++) { selectedIndicesInt[i] = (int) selectedIndicesObj[i]; } // compute Moore-Penrose pseudo-inverse matrix of the column-reduced label indicator matrix double[][] datmatrix2 = new double[data.getDataSet().numInstances()][labelIndices.length - selectedIndicesInt.length]; Matrix matC = new Matrix(datmatrix2); //DEBUG: //System.out.println("Selecting only " + matC.getColumnDimension() + " columns; removing " + selectedIndicesInt.length + " columns out of an original total of " + data.getLabelIndices().length + " labels!"); // compute indices to keep java.util.LinkedList<Integer> indicesToKeep = new java.util.LinkedList(); for (int i = 0; i < labelIndices.length; i++) { boolean keep = true; // see if this col has to be removed for (int k = 0; k < selectedIndicesInt.length; k++) { if (selectedIndicesInt[k] == labelIndices[i]) { keep = false; break; } } // add if we actually should keep this... if (keep) { indicesToKeep.add(labelIndices[i]); } } assert (indicesToKeep.size() == matC.getColumnDimension()); for (int i = 0; i < matC.getRowDimension(); i++) { // get data instance Instance instance = data.getDataSet().instance(i); // replicate data from ALL columns that WOULD not be removed by CSSP for (int j = 0; j < matC.getColumnDimension(); j++) { // get label indice int corrIdx = (int) indicesToKeep.get(j); // update matC matC.set(i, j, Double.parseDouble(instance.toString(corrIdx))); } } //DEBUG: System.out.println("matC rows = " + matC.getRowDimension() + ", cols = " + matC.getColumnDimension() + "\n data original label cols # = " + data.getLabelIndices().length); // make private copy of projection matrices // Moore-Penrose pseudo-inverse of the label matrix matC // see http://robotics.caltech.edu/~jwb/courses/ME115/handouts/pseudo.pdf for an SVD-based workaround for MP-inverse // Moore-Penrose pseudoinverse computation based on Singular Value Decomposition (SVD) /* SingularValueDecomposition decomp = Vk.svd(); Matrix S = decomp.getS(); Matrix Scross = new Matrix(selectedIndicesInt.length,selectedIndicesInt.length); for(int i = 0; i < selectedIndicesInt.length; i++) { for(int j = 0; j < selectedIndicesInt.length; j++) { if(i == j) { if(S.get(i, j) == 0) { Scross.set(i, j, 0.0); } else { Scross.set(i, j, 1 / S.get(i, j)); } } else { Scross.set(i, j, 0.0); } } } this.Yc = decomp.getV().times(Scross).times(decomp.getU().transpose()); */ // DEBUG: traditional way of computing the Moore-Penrose pseudoinverse if (matC.getRowDimension() >= matC.getColumnDimension()) { this.Yc = ((matC.transpose().times(matC)).inverse()).times(matC.transpose()); } else { this.Yc = matC.transpose().times((matC.times(matC.transpose()).inverse())); } //System.out.println("Yc rows: " + Yc.getRowDimension() + "\nYc cols: " + Yc.getColumnDimension() + "\n Y rows: " + Y.getRowDimension() + "\nY cols: " + Y.getColumnDimension()); this.ProjectionMatrix = Yc.times(Y); // compute projection matrix // add sampled indices to Remove object remove = new Remove(); remove.setAttributeIndicesArray(selectedIndicesInt); remove.setInvertSelection(false); remove.setInputFormat(data.getDataSet()); // apply remove filter on the labels transformed = Filter.useFilter(data.getDataSet(), remove); this.sampledIndicesObj = indicesToKeep.toArray(); return data.reintegrateModifiedDataSet(transformed); } catch (Exception ex) { // do nothing //Logger.getLogger(BinaryRelevanceTransformation.class.getName()).log(Level.SEVERE, null, ex); return null; } }
From source file:net.sf.jclal.activelearning.singlelabel.querystrategy.VarianceReductionQueryStrategy.java
License:Open Source License
/** * Compute the product among the matrix and the vector * //from w ww. j a v a2 s . c o m * @param current The matrix. * @param vectorX The vector. */ public void productVector(Matrix current, double[] vectorX) { for (int m = 0; m < vectorX.length; m++) { for (int nn = 0; nn < vectorX.length; nn++) { current.set(m, nn, vectorX[m] * vectorX[nn]); } } }
From source file:org.knime.knip.suise.node.boundarymodel.BoundaryModel.java
License:Open Source License
private double calcLMDL(double epsilon) { // create sample matrix Matrix V = new Matrix(m_contourData.numFeatures(), m_contourData.numVectors()); double[] vec; for (int i = 0; i < m_contourData.numVectors(); i++) { vec = m_contourData.getVector(i); for (int j = 0; j < vec.length; j++) { V.set(j, i, vec[j]); }// w ww. ja va 2s .co m } // estimate of the covariance matrix Matrix W = V.times(V.transpose()); W.times(m_contourData.numFeatures() / (epsilon * epsilon * m_contourData.numVectors())); W = Matrix.identity(m_contourData.numFeatures(), m_contourData.numFeatures()).plus(W); return Utils.log2(W.det()) * (m_contourData.numFeatures() + m_contourData.numVectors()) / 2; }
From source file:org.knime.knip.suise.node.boundarymodel.contourdata.ContourCluster.java
License:Open Source License
private double calcLMDL() { // create sample matrix Matrix V = new Matrix(m_cdata.numFeatures(), m_samples.size()); double[] vec; for (int i = 0; i < m_samples.size(); i++) { vec = m_cdata.get(m_samples.get(i)[0], m_samples.get(i)[1]); for (int j = 0; j < vec.length; j++) { V.set(j, i, vec[j]); }/*from w w w. j av a2 s . c o m*/ } double epsilon = 5; // estimate of the covariance matrix Matrix W = V.times(V.transpose()); W.times(m_cdata.numFeatures() / (epsilon * epsilon * m_samples.size())); W = Matrix.identity(m_cdata.numFeatures(), m_cdata.numFeatures()).plus(W); return Utils.log2(W.det()) * (m_cdata.numFeatures() + m_samples.size()) / 2; }