Example usage for weka.core Matrix Matrix

List of usage examples for weka.core Matrix Matrix

Introduction

In this page you can find the example usage for weka.core Matrix Matrix.

Prototype

public Matrix(Reader r) throws Exception 

Source Link

Document

Reads a matrix from a reader.

Usage

From source file:data.generation.target.utils.PrincipalComponents.java

License:Open Source License

private void buildAttributeConstructor(Instances data) throws Exception {
    m_eigenvalues = null;/*from  w ww.  ja  va 2  s  .  c om*/
    m_outputNumAtts = -1;
    m_attributeFilter = null;
    m_nominalToBinFilter = null;
    m_sumOfEigenValues = 0.0;
    m_trainInstances = new Instances(data);

    // make a copy of the training data so that we can get the class
    // column to append to the transformed data (if necessary)
    m_trainHeader = new Instances(m_trainInstances, 0);

    m_replaceMissingFilter = new ReplaceMissingValues();
    m_replaceMissingFilter.setInputFormat(m_trainInstances);
    m_trainInstances = Filter.useFilter(m_trainInstances, m_replaceMissingFilter);

    /*if (m_normalize) {
      m_normalizeFilter = new Normalize();
      m_normalizeFilter.setInputFormat(m_trainInstances);
      m_trainInstances = Filter.useFilter(m_trainInstances, m_normalizeFilter);
    } */

    m_nominalToBinFilter = new NominalToBinary();
    m_nominalToBinFilter.setInputFormat(m_trainInstances);
    m_trainInstances = Filter.useFilter(m_trainInstances, m_nominalToBinFilter);

    // delete any attributes with only one distinct value or are all missing
    Vector deleteCols = new Vector();
    for (int i = 0; i < m_trainInstances.numAttributes(); i++) {
        if (m_trainInstances.numDistinctValues(i) <= 1) {
            deleteCols.addElement(new Integer(i));
        }
    }

    if (m_trainInstances.classIndex() >= 0) {
        // get rid of the class column
        m_hasClass = true;
        m_classIndex = m_trainInstances.classIndex();
        deleteCols.addElement(new Integer(m_classIndex));
    }

    // remove columns from the data if necessary
    if (deleteCols.size() > 0) {
        m_attributeFilter = new Remove();
        int[] todelete = new int[deleteCols.size()];
        for (int i = 0; i < deleteCols.size(); i++) {
            todelete[i] = ((Integer) (deleteCols.elementAt(i))).intValue();
        }
        m_attributeFilter.setAttributeIndicesArray(todelete);
        m_attributeFilter.setInvertSelection(false);
        m_attributeFilter.setInputFormat(m_trainInstances);
        m_trainInstances = Filter.useFilter(m_trainInstances, m_attributeFilter);
    }

    // can evaluator handle the processed data ? e.g., enough attributes?
    getCapabilities().testWithFail(m_trainInstances);

    m_numInstances = m_trainInstances.numInstances();
    m_numAttribs = m_trainInstances.numAttributes();

    //fillCorrelation();
    fillCovariance();

    double[] d = new double[m_numAttribs];
    double[][] v = new double[m_numAttribs][m_numAttribs];

    Matrix corr = new Matrix(m_correlation);
    corr.eigenvalueDecomposition(v, d);
    m_eigenvectors = (double[][]) v.clone();
    m_eigenvalues = (double[]) d.clone();

    /*for (int i = 0; i < m_numAttribs; i++) {
      for (int j = 0; j < m_numAttribs; j++) {
        System.err.println(v[i][j] + " ");
      }
      System.err.println(d[i]);
    } */

    // any eigenvalues less than 0 are not worth anything --- change to 0
    for (int i = 0; i < m_eigenvalues.length; i++) {
        if (m_eigenvalues[i] < 0) {
            m_eigenvalues[i] = 0.0;
        }
    }
    m_sortedEigens = Utils.sort(m_eigenvalues);
    m_sumOfEigenValues = Utils.sum(m_eigenvalues);

    m_transformedFormat = setOutputFormat();
    if (m_transBackToOriginal) {
        m_originalSpaceFormat = setOutputFormatOriginal();

        // new ordered eigenvector matrix
        int numVectors = (m_transformedFormat.classIndex() < 0) ? m_transformedFormat.numAttributes()
                : m_transformedFormat.numAttributes() - 1;

        double[][] orderedVectors = new double[m_eigenvectors.length][numVectors + 1];

        // try converting back to the original space
        for (int i = m_numAttribs - 1; i > (m_numAttribs - numVectors - 1); i--) {
            for (int j = 0; j < m_numAttribs; j++) {
                orderedVectors[j][m_numAttribs - i] = m_eigenvectors[j][m_sortedEigens[i]];
            }
        }

        // transpose the matrix
        int nr = orderedVectors.length;
        int nc = orderedVectors[0].length;
        m_eTranspose = new double[nc][nr];
        for (int i = 0; i < nc; i++) {
            for (int j = 0; j < nr; j++) {
                m_eTranspose[i][j] = orderedVectors[j][i];
            }
        }
    }
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Hidden Activation Probability - returns P(Z|X).
 * A Bias column added (and removed) automatically.
 * @param   X_   X (without bias)/* w w w. j  a va  2 s  . c o  m*/
 * @return   P(Z|X) 
 */
public double[][] prob_Z(double X_[][]) {
    Matrix X = new Matrix(MatrixUtils.addBias(X_));
    return MatrixUtils.removeBias(prob_Z(X).getArray());
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Visible Activation Probability - returns P(X|Z).
 * A bias column is assumed to be included.
 * @param   Z   z (bias included)// www  .  java 2 s .  c o m
 * @return   P(X|Z) 
 */
public Matrix prob_X(Matrix Z) {
    Matrix X = new Matrix(MatrixUtils.sigma(Z.times(W.transpose()).getArray())); // (this is the activation function)
    MatrixUtils.fillCol(X.getArray(), 0, 1.0); // fix bias - set first col to 1.0
    return X;
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Make W matrix of dimensions d+1 and h+1 (+1 for biases).
 * Initialized from ~N(0,0.2) (seems to work better than ~N(0.0.01)) -- except biases (set to 0)
 * @param    d   number of rows    (visible units)
 * @param   h   number of columns (hidden units)
 * @param   r    for getting random rumbers
 * @return   W/*from www  .j  a v  a  2  s. c o m*/
 */
public static Matrix makeW(int d, int h, Random r) {
    double W_[][] = MatrixUtils.multiply(MatrixUtils.randn(d + 1, h + 1, r), 0.20); // ~ N(0.0,0.01)
    MatrixUtils.fillRow(W_, 0, 0.0); // set the first row to 0 for bias
    MatrixUtils.fillCol(W_, 0, 0.0); // set the first col to 0 for bias
    return new Matrix(W_);
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Update - On raw data (with no bias column)
 * @param   X_    raw double[][] data (with no bias column)
 *///from  w w  w.  ja  va  2 s .c  om
public void update(double X_[][]) {
    Matrix X = new Matrix(MatrixUtils.addBias(X_));
    update(X);
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Update - On raw data (with no bias column)
 * @param   x_    raw double[] data (with no bias column)
 * @param   s   multiply the gradient by this scalar
 *///  w  ww  . j  a  v  a  2s.com
public void update(double x_[], double s) {
    Matrix X = new Matrix(MatrixUtils.addBias(new double[][] { x_ }));
    update(X, s);
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Train - Setup and train the RBM on X, over m_E epochs.
 * @param   X_   X//w  ww.  j a va2  s . c o m
 * @return   the error (@TODO unnecessary)
 */
public double train(double X_[][]) throws Exception {

    initWeights(X_);

    Matrix X = new Matrix(MatrixUtils.addBias(X_));

    double _error = Double.MAX_VALUE; // prev error , necessary only when using m_V

    // TRAIN FOR m_E EPOCHS.

    for (int e = 0; e < m_E; e++) {

        // BREAK OUT IF THE GRADIENT IS POSITIVE
        if (m_V) {
            double err_now = calculateError(X); // Retrieve error
            if (_error < err_now) {
                System.out.println("broken out @" + e);
                break;
            }
            _error = err_now;
        }

        /*
         * The update
         */
        update(X);
    }

    return _error;
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Train - Setup and batch-train the RBM on X.
 * <br>//w  w w  . ja  v a 2 s . c  o  m
 * TODO, above function train(X_) could really be trained with train(X_,N), so, should share code with train(X)
 * <br>
 * TODO, divide gradient by the size of the batch! (doing already? .. no)
 * @param   X_         X
 * @param   batchSize   the batch size
 */
public double train(double X_[][], int batchSize) throws Exception {

    initWeights(X_);

    X_ = MatrixUtils.addBias(X_);

    int N = X_.length; // N
    if (batchSize == N)
        return train(X_);
    int N_n = (int) Math.ceil(N * 1. / batchSize);// Number of batches

    Matrix X_n[] = new Matrix[N_n];
    for (int n = 0, i = 0; n < N; n += batchSize, i++) {
        // @TODO, could save some small-time memory/speed here
        X_n[i] = new Matrix(Arrays.copyOfRange(X_, n, Math.min(n + batchSize, N)));
    }

    for (int e = 0; e < m_E; e++) {

        // @TODO could be random, see function below
        for (Matrix X : X_n) {
            update(X, 1. / N_n);
        }
    }

    return 1.0;
}

From source file:rbms.RBM.java

License:Open Source License

/**
 * Train - Setup and batch-train the RBM on X, with some random sampling involved.
 * <br>/*from   ww w.j  a  va 2s  .c  om*/
 * TODO should share code with train(X)
 * @param   X_         X
 * @param   batchSize   the batch size
 * @param   r         the randomness
 */
public double train(double X_[][], int batchSize, Random r) throws Exception {

    initWeights(X_);

    X_ = MatrixUtils.addBias(X_);
    int N = X_.length; // N
    int N_n = (int) Math.ceil(N * 1. / batchSize);// Number of batches

    // @TODO select the batches randomly at each epoch
    Matrix X_n[] = new Matrix[N_n];
    for (int n = 0, i = 0; n < N; n += batchSize, i++) {
        X_n[i] = new Matrix(Arrays.copyOfRange(X_, n, Math.min(n + batchSize, N)));
    }

    for (int e = 0; e < m_E; e++) {
        for (int i = 0; i < N_n; i++) {
            update(X_n[r.nextInt(N_n)]);
        }
    }

    return 1.0;
}