Example usage for weka.core.matrix Matrix get

List of usage examples for weka.core.matrix Matrix get

Introduction

In this page you can find the example usage for weka.core.matrix Matrix get.

Prototype

public double get(int i, int j) 

Source Link

Document

Get a single element.

Usage

From source file:adams.core.discovery.genetic.WekaGeneticHelper.java

License:Open Source License

/**
 * Convert weka Matrix into bit string//from  w  w w  .  j  a  v  a2 s. c  o  m
 * @param ina
 * @param min
 * @param max
 * @param numBits
 * @param splits
 * @param rows
 * @param columns
 * @return
 */
public static String matrixToBits(Matrix ina, double min, double max, int numBits, int splits, int rows,
        int columns) {
    StringBuilder buff = new StringBuilder();

    if (rows != ina.getRowDimension())
        LOGGER.warning("[matrixToBits] rows parameter differs from rows in matrix: " + rows + " != "
                + ina.getRowDimension());
    if (columns != ina.getColumnDimension())
        LOGGER.warning("[matrixToBits] columns parameter differs from columns in matrix: " + columns + " != "
                + ina.getColumnDimension());

    for (int row = 0; row < ina.getRowDimension(); row++) {
        for (int column = 0; column < ina.getColumnDimension(); column++) {
            double val = ina.get(row, column);
            buff.append(doubleToBits(val, min, max, numBits, splits));
        }
    }
    return buff.toString();
}

From source file:adams.data.instancesanalysis.pls.MatrixHelper.java

License:Open Source License

/**
 * Turns the matrix into a spreadsheet.//from w w  w.  j  a va2  s .c  o  m
 *
 * @param matrix   the matrix to convert
 * @param colPrefix   the prefix for the column names
 * @return      the generated spreadsheet
 */
public static SpreadSheet matrixToSpreadSheet(Matrix matrix, String colPrefix) {
    SpreadSheet result;
    Row row;
    int i;
    int n;

    result = null;

    if (matrix != null) {
        result = new DefaultSpreadSheet();

        // header
        row = result.getHeaderRow();
        for (i = 0; i < matrix.getColumnDimension(); i++)
            row.addCell("" + i).setContent(colPrefix + (i + 1));

        // data
        for (n = 0; n < matrix.getRowDimension(); n++) {
            row = result.addRow();
            for (i = 0; i < matrix.getColumnDimension(); i++)
                row.addCell("" + i).setContent(matrix.get(n, i));
        }
    }

    return result;
}

From source file:adams.data.instancesanalysis.pls.PLS1.java

License:Open Source License

/**
 * Performs predictions on the data./*www  .j a  va2 s.c  o m*/
 *
 * @param data   the input data
 * @return      the predicted data
 */
protected Instances predict(Instances data) {
    Instances result;
    Instances tmpInst;
    int i;
    int j;
    Matrix x;
    Matrix X;
    Matrix T;
    Matrix t;

    result = new Instances(getOutputFormat());

    for (i = 0; i < data.numInstances(); i++) {
        // work on each instance
        tmpInst = new Instances(data, 0);
        tmpInst.add((Instance) data.instance(i).copy());
        x = MatrixHelper.getX(tmpInst);
        X = new Matrix(1, getNumComponents());
        T = new Matrix(1, getNumComponents());

        for (j = 0; j < getNumComponents(); j++) {
            MatrixHelper.setVector(x, X, j);
            // 1. step: tj = xj * wj
            t = x.times(MatrixHelper.getVector(m_W, j));
            MatrixHelper.setVector(t, T, j);
            // 2. step: xj+1 = xj - tj*pj^T (tj is 1x1 matrix!)
            x = x.minus(MatrixHelper.getVector(m_P, j).transpose().times(t.get(0, 0)));
        }

        switch (m_PredictionType) {
        case ALL:
            tmpInst = MatrixHelper.toInstances(getOutputFormat(), T, T.times(m_b_hat));
            break;
        case NONE:
        case EXCEPT_CLASS:
            tmpInst = MatrixHelper.toInstances(getOutputFormat(), T, MatrixHelper.getY(tmpInst));
            break;
        default:
            throw new IllegalStateException("Unhandled prediction type: " + m_PredictionType);
        }

        result.add(tmpInst.instance(0));

    }

    return result;
}

From source file:adams.data.instancesanalysis.pls.SIMPLS.java

License:Open Source License

/**
 * Transforms the data, initializes if necessary.
 *
 * @param data   the data to use/*from   w ww  .  ja  v a 2  s .  c  om*/
 */
protected Instances doTransform(Instances data, Map<String, Object> params) throws Exception {
    Matrix A, A_trans;
    Matrix M;
    Matrix X, X_trans;
    Matrix X_new;
    Matrix Y, y;
    Matrix C, c;
    Matrix Q, q;
    Matrix W, w;
    Matrix P, p, p_trans;
    Matrix v, v_trans;
    Matrix T;
    Instances result;
    int h;

    if (!isInitialized()) {
        // init
        X = MatrixHelper.getX(data);
        X_trans = X.transpose();
        Y = MatrixHelper.getY(data);
        A = X_trans.times(Y);
        M = X_trans.times(X);
        C = Matrix.identity(data.numAttributes() - 1, data.numAttributes() - 1);
        W = new Matrix(data.numAttributes() - 1, getNumComponents());
        P = new Matrix(data.numAttributes() - 1, getNumComponents());
        Q = new Matrix(1, getNumComponents());

        for (h = 0; h < getNumComponents(); h++) {
            // 1. qh as dominant EigenVector of Ah'*Ah
            A_trans = A.transpose();
            q = MatrixHelper.getDominantEigenVector(A_trans.times(A));

            // 2. wh=Ah*qh, ch=wh'*Mh*wh, wh=wh/sqrt(ch), store wh in W as column
            w = A.times(q);
            c = w.transpose().times(M).times(w);
            w = w.times(1.0 / StrictMath.sqrt(c.get(0, 0)));
            MatrixHelper.setVector(w, W, h);

            // 3. ph=Mh*wh, store ph in P as column
            p = M.times(w);
            p_trans = p.transpose();
            MatrixHelper.setVector(p, P, h);

            // 4. qh=Ah'*wh, store qh in Q as column
            q = A_trans.times(w);
            MatrixHelper.setVector(q, Q, h);

            // 5. vh=Ch*ph, vh=vh/||vh||
            v = C.times(p);
            MatrixHelper.normalizeVector(v);
            v_trans = v.transpose();

            // 6. Ch+1=Ch-vh*vh', Mh+1=Mh-ph*ph'
            C = C.minus(v.times(v_trans));
            M = M.minus(p.times(p_trans));

            // 7. Ah+1=ChAh (actually Ch+1)
            A = C.times(A);
        }

        // finish
        if (getNumCoefficients() > 0)
            slim(W);
        m_W = W;
        T = X.times(m_W);
        X_new = T;
        m_B = W.times(Q.transpose());

        switch (m_PredictionType) {
        case ALL:
            y = T.times(P.transpose()).times(m_B);
            break;
        case NONE:
        case EXCEPT_CLASS:
            y = MatrixHelper.getY(data);
            break;
        default:
            throw new IllegalStateException("Unhandled prediction type: " + m_PredictionType);
        }

        result = MatrixHelper.toInstances(getOutputFormat(), X_new, y);
    } else {
        X = MatrixHelper.getX(data);
        X_new = X.times(m_W);

        switch (m_PredictionType) {
        case ALL:
            y = X.times(m_B);
            break;
        case NONE:
        case EXCEPT_CLASS:
            y = MatrixHelper.getY(data);
            break;
        default:
            throw new IllegalStateException("Unhandled prediction type: " + m_PredictionType);
        }

        result = MatrixHelper.toInstances(getOutputFormat(), X_new, y);
    }

    return result;
}

From source file:adams.data.instancesanalysis.PLS.java

License:Open Source License

/**
 * Performs the actual analysis.// ww w .j av a  2 s .  c om
 *
 * @param data   the data to analyze
 * @return      null if successful, otherwise error message
 * @throws Exception   if analysis fails
 */
@Override
protected String doAnalyze(Instances data) throws Exception {
    String result;
    Remove remove;
    weka.filters.supervised.attribute.PLS pls;
    WekaInstancesToSpreadSheet conv;
    SpreadSheet transformed;
    Matrix matrix;
    SpreadSheet loadings;
    Row row;
    int i;
    int n;

    m_Loadings = null;
    m_Scores = null;

    data = new Instances(data);
    data.deleteWithMissingClass();

    if (!m_AttributeRange.isAllRange()) {
        if (isLoggingEnabled())
            getLogger().info("Filtering attribute range: " + m_AttributeRange.getRange());
        remove = new Remove();
        remove.setAttributeIndicesArray(m_AttributeRange.getIntIndices());
        remove.setInvertSelection(true);
        remove.setInputFormat(data);
        data = Filter.useFilter(data, remove);
    }
    if (isLoggingEnabled())
        getLogger().info("Performing PLS...");

    pls = new weka.filters.supervised.attribute.PLS();
    pls.setAlgorithm(m_Algorithm);
    pls.setInputFormat(data);
    data = Filter.useFilter(data, pls);
    conv = new WekaInstancesToSpreadSheet();
    conv.setInput(data);
    result = conv.convert();
    if (result == null) {
        transformed = (SpreadSheet) conv.getOutput();
        matrix = pls.getLoadings();
        loadings = new DefaultSpreadSheet();
        for (i = 0; i < matrix.getColumnDimension(); i++)
            loadings.getHeaderRow().addCell("L-" + (i + 1)).setContentAsString("Loading-" + (i + 1));
        for (n = 0; n < matrix.getRowDimension(); n++) {
            row = loadings.addRow();
            for (i = 0; i < matrix.getColumnDimension(); i++)
                row.addCell("L-" + (i + 1)).setContent(matrix.get(n, i));
        }
        m_Loadings = loadings;
        m_Scores = transformed;
    }

    return result;
}

From source file:cyber009.ann.ANN.java

public void weightFindMatrix() {
    Matrix X = new Matrix(v.X);
    Matrix Y = new Matrix(v.D, 1);
    Matrix W = new Matrix(v.N, 1);

    for (int d = 0; d < v.D; d++) {
        Y.set(d, 0, v.TARGET[d]);// w ww  . j a v  a  2  s. co  m
    }

    for (int n = 0; n < v.N; n++) {
        W.set(n, 0, 0.0);
        //W.set(n, 0, v.WEIGHT[n]);
    }

    Matrix temp = X.transpose().times(X);
    //        System.out.println(temp.toString());
    temp = temp.inverse().times(X.transpose());
    //        System.out.println(temp.toString());
    temp = temp.times(Y);
    //System.out.println(temp.toString());
    W = temp;
    for (int n = 0; n <= v.N; n++) {
        v.WEIGHT[n] = W.get(n, 0);
    }
    //System.out.println(YI.toString());

}

From source file:mulan.classifier.meta.MLCSSP.java

License:Open Source License

@Override
protected MultiLabelOutput makePredictionInternal(Instance instance) {
    try {//  w w  w.  j a  va 2s .  c  o  m

        Instance transformed = css.transformInstance(instance);
        MultiLabelOutput out = baseLearner.makePrediction(transformed);

        double[] confidences = out.getConfidences();

        // make response matrix
        Matrix conf = new Matrix(kappa, 1);
        for (int i = 0; i < kappa; i++) {
            conf.set(i, 0, confidences[i]);
        }

        // compute projected classifier response
        Matrix projectedResponse = conf.transpose().times(css.getProjectionMatrix());

        boolean[] projected_bipartition = new boolean[projectedResponse.getColumnDimension()];
        double[] projected_confidences = new double[projectedResponse.getColumnDimension()];

        for (int i = 0; i < projectedResponse.getColumnDimension(); i++) {
            projected_confidences[i] = projectedResponse.get(0, i);
            projected_bipartition[i] = (Math.ceil(projected_confidences[i]) == 1) ? true : false;
        }

        // return mlo
        MultiLabelOutput mlo = new MultiLabelOutput(projected_bipartition, projected_confidences);
        return mlo;

    } catch (InvalidDataException ex) {
        Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex);

    } catch (ModelInitializationException ex) {
        Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex);

    } catch (Exception ex) {
        Logger.getLogger(MLCSSP.class.getName()).log(Level.SEVERE, null, ex);
    }

    return null;
}

From source file:mulan.transformations.ColumnSubsetSelection.java

License:Open Source License

public MultiLabelInstances transform(MultiLabelInstances data, int kappa, long seed) {
    try {/*from   w ww  .j  a  v  a2s  .  c o  m*/

        if (kappa >= data.getNumLabels()) {
            throw new MulanRuntimeException(
                    "Dimensionality reduction parameter should not exceed or be equal to the total count of labels!");
        }

        // integer indices of physical label assignments
        int[] labelIndices = data.getLabelIndices();
        int[] indices = new int[labelIndices.length];

        System.arraycopy(labelIndices, 0, indices, 0, labelIndices.length);

        // load label indicator matrix in a Matrix object
        double[][] datmatrix = new double[data.getDataSet().numInstances()][labelIndices.length];
        Matrix mat = new Matrix(datmatrix);

        for (int i = 0; i < data.getDataSet().numInstances(); i++) {
            Instance instance = data.getDataSet().instance(i);
            for (int j = 0; j < labelIndices.length; j++) {
                mat.set(i, j, Double.parseDouble(instance.toString(labelIndices[j])));
                //DEBUG: System.out.print("" + Double.parseDouble(instance.toString(labelIndices[j])) + ",");
            }
        }

        // make private copy of the label matrix
        this.Y = mat;

        // compute eigenvalue analysis of label indicator matrix
        SingularValueDecomposition svd = new SingularValueDecomposition(mat);

        //DEBUG: System.out.println("rows = " + svd.getV().getRowDimension() + ", cols = " + svd.getV().getColumnDimension());

        assert (svd.getV().getRowDimension() == svd.getV().getColumnDimension());

        Matrix rVec = svd.getV();
        Matrix Vk = new Matrix(new double[svd.getV().getRowDimension()][kappa]);

        // snippet (2)
        for (int i = 0; i < kappa; i++) {
            for (int j = 0; j < svd.getV().getColumnDimension(); j++) {
                Vk.set(j, i, rVec.get(i, j));
            }
        }

        // compute column selection probabilitites
        double[] selectionProbabilities = new double[Vk.getRowDimension()];
        double[] selectionProbabilitiesCDF = new double[Vk.getRowDimension()];

        for (int i = 0; i < Vk.getRowDimension(); i++) {
            selectionProbabilities[i] = 0.0;
            for (int j = 0; j < kappa; j++) {
                selectionProbabilities[i] += Math.pow(Vk.get(i, j), 2);
            }
            selectionProbabilities[i] = Math.sqrt(selectionProbabilities[i]);
        }

        // normalize probabilities
        double psum = 0.0;
        for (int i = 0; i < Vk.getRowDimension(); i++) {
            psum += selectionProbabilities[i];
            //System.out.println("psum = " + psum);
        }
        //System.out.println("psum = " + psum);
        //assert (psum != 0 && psum == 1.0); // must be non-zero and unitary

        for (int i = 0; i < Vk.getRowDimension(); i++) {
            selectionProbabilities[i] /= psum;
        }

        psum = 0.0;
        for (int i = 0; i < Vk.getRowDimension(); i++) {
            psum += selectionProbabilities[i];
            selectionProbabilitiesCDF[i] = psum;
        }

        // add selected columns on a linked list
        sampledIndiceSet = new java.util.HashSet();

        // run column-sampling loop
        int sampling_count = 0;

        Random generator = new Random(seed);
        while (sampledIndiceSet.size() < kappa) // ...loop until knapsack gets filled...
        {
            // pick a random number

            //DEBUG:
            //double roulette = generator.nextDouble() * 0.5;
            double roulette = generator.nextDouble();

            // seek closest match according to sampling probabilities
            int closest_match = -1;

            // iterate label cols
            for (int i = 0; i < Vk.getRowDimension(); i++) {
                if (roulette < selectionProbabilitiesCDF[i]) // ...spot a possible match...
                {
                    // ...if so, select and quit scope...
                    closest_match = i; // BEWARE! "i" is an index over the label enumeration, not an ordering index!
                    break;
                }
            }

            // if we stepped on the flag, something serious is going on!
            assert (closest_match != -1);

            // see if column was selected; if not, add it
            if (!sampledIndiceSet.contains((Object) closest_match)) {
                sampledIndiceSet.add((Object) closest_match);
                //System.out.println("DEBUG(CSSP): Added column " + closest_match + " to the sampled column set!");
            }

            sampling_count += 1;
        }

        System.out.println("Sampling loop completed in " + sampling_count + " runs.");

        // compute indices-to-remove array
        indicesToRemove = new int[labelIndices.length - sampledIndiceSet.size()];

        // compute all **PHYSICAL** (not VIRTUAL) indices of label columns for CSSP to remove
        int idx = 0;
        for (int i = 0; i < labelIndices.length; i++) {
            if (!sampledIndiceSet.contains((Object) i)) {
                indicesToRemove[idx] = indices[i];
                idx += 1;
            }
        }

        // apply CSSP: select columns to remove
        int[] selectedIndicesObj = indicesToRemove.clone();
        selectedIndicesInt = new int[selectedIndicesObj.length];
        for (int i = 0; i < selectedIndicesObj.length; i++) {
            selectedIndicesInt[i] = (int) selectedIndicesObj[i];
        }

        // compute Moore-Penrose pseudo-inverse matrix of the column-reduced label indicator matrix
        double[][] datmatrix2 = new double[data.getDataSet().numInstances()][labelIndices.length
                - selectedIndicesInt.length];
        Matrix matC = new Matrix(datmatrix2);

        //DEBUG:
        //System.out.println("Selecting only " + matC.getColumnDimension() + " columns; removing " + selectedIndicesInt.length + " columns out of an original total of " + data.getLabelIndices().length + " labels!");

        // compute indices to keep
        java.util.LinkedList<Integer> indicesToKeep = new java.util.LinkedList();
        for (int i = 0; i < labelIndices.length; i++) {
            boolean keep = true;

            // see if this col has to be removed
            for (int k = 0; k < selectedIndicesInt.length; k++) {
                if (selectedIndicesInt[k] == labelIndices[i]) {
                    keep = false;
                    break;
                }
            }

            // add if we actually should keep this...
            if (keep) {
                indicesToKeep.add(labelIndices[i]);
            }
        }

        assert (indicesToKeep.size() == matC.getColumnDimension());

        for (int i = 0; i < matC.getRowDimension(); i++) {
            // get data instance
            Instance instance = data.getDataSet().instance(i);

            // replicate data from ALL columns that WOULD not be removed by CSSP           
            for (int j = 0; j < matC.getColumnDimension(); j++) {
                // get label indice
                int corrIdx = (int) indicesToKeep.get(j);

                // update matC
                matC.set(i, j, Double.parseDouble(instance.toString(corrIdx)));
            }
        }

        //DEBUG: System.out.println("matC rows = " + matC.getRowDimension() + ", cols = " + matC.getColumnDimension() + "\n data original label cols # = " + data.getLabelIndices().length);

        // make private copy of projection matrices

        // Moore-Penrose pseudo-inverse of the label matrix matC
        // see http://robotics.caltech.edu/~jwb/courses/ME115/handouts/pseudo.pdf for an SVD-based workaround for MP-inverse

        // Moore-Penrose pseudoinverse computation based on Singular Value Decomposition (SVD)
        /*
         SingularValueDecomposition decomp = Vk.svd();
                
         Matrix S = decomp.getS();
         Matrix Scross = new Matrix(selectedIndicesInt.length,selectedIndicesInt.length);
         for(int i = 0; i < selectedIndicesInt.length; i++) {
         for(int j = 0; j < selectedIndicesInt.length; j++) {
         if(i == j) {
         if(S.get(i, j) == 0) {
         Scross.set(i, j, 0.0);
         } else {
         Scross.set(i, j, 1 / S.get(i, j));
         }
         } else {
         Scross.set(i, j, 0.0);
         }
         }
         }
                
         this.Yc = decomp.getV().times(Scross).times(decomp.getU().transpose());
         */

        // DEBUG: traditional way of computing the Moore-Penrose pseudoinverse
        if (matC.getRowDimension() >= matC.getColumnDimension()) {
            this.Yc = ((matC.transpose().times(matC)).inverse()).times(matC.transpose());
        } else {
            this.Yc = matC.transpose().times((matC.times(matC.transpose()).inverse()));
        }

        //System.out.println("Yc rows: " + Yc.getRowDimension() + "\nYc cols: " + Yc.getColumnDimension() + "\n Y rows: " + Y.getRowDimension() + "\nY cols: " + Y.getColumnDimension());

        this.ProjectionMatrix = Yc.times(Y); // compute projection matrix

        // add sampled indices to Remove object
        remove = new Remove();
        remove.setAttributeIndicesArray(selectedIndicesInt);
        remove.setInvertSelection(false);
        remove.setInputFormat(data.getDataSet());

        // apply remove filter on the labels
        transformed = Filter.useFilter(data.getDataSet(), remove);

        this.sampledIndicesObj = indicesToKeep.toArray();

        return data.reintegrateModifiedDataSet(transformed);

    } catch (Exception ex) {
        // do nothing
        //Logger.getLogger(BinaryRelevanceTransformation.class.getName()).log(Level.SEVERE, null, ex);
        return null;
    }
}

From source file:net.sf.jclal.activelearning.singlelabel.querystrategy.VarianceReductionQueryStrategy.java

License:Open Source License

/**
 *
 * Analyzes how informative is an instance.
 *
 * @param instance The instance to query.
 * @return The utility of the instance.// w  w w . j a va  2 s .com
 */
@Override
public double utilityInstance(Instance instance) {

    Instances unlabeled = getUnlabelledData().getDataset();

    if (unlabelledSize != unlabeled.numInstances()) {
        unlabelledSize = unlabeled.numInstances();

        //it is initialized q_sub_i
        int n = unlabeled.numInstances();
        double[] q = new double[n];
        //1. q_sub_i = 1/n, i = 1, 2, ..., n
        //Arrays.fill(q, 1.0 / n);
        //further on it fills, to optimize

        //it is initialized pi_sub_i
        //2. pi_sub_i
        double[] piSubI = getPiSubI(unlabeled);

        //to create the Fisher matrix
        int dimensionMatrix = unlabeled.numAttributes() - 1;
        int classIndex = unlabeled.classIndex();

        Matrix matrixFisher = null;
        try {
            matrixFisher = new Matrix(dimensionMatrix, dimensionMatrix);
        } catch (Exception ex) {
            Logger.getLogger(VarianceReductionQueryStrategy.class.getName()).log(Level.SEVERE, null, ex);
        }

        for (int i = 0; i < piSubI.length; i++) {
            double mult = piSubI[i] * (1 - piSubI[i]);

            //the values of the instance are had
            double[] atributos = unlabeled.instance(i).toDoubleArray();

            //the attribute class is eliminated, only the features are left
            double[] vectorX = DatasetUtils.copyFeatures(atributos, classIndex);

            Matrix current = null;
            try {
                current = new Matrix(vectorX.length, vectorX.length);
            } catch (Exception ex) {
                Logger.getLogger(VarianceReductionQueryStrategy.class.getName()).log(Level.SEVERE, null, ex);
            }

            productVector(current, vectorX);

            //it multiplies current * multi
            current.timesEquals(mult);

            //it adds current to matrixFisher
            //plusEquals saves the result in matrixFisher
            matrixFisher.plusEquals(current);

        }

        double factorRegularizationValue = getFactorRegularization();

        Matrix identity = Matrix.identity(dimensionMatrix, dimensionMatrix);

        identity.timesEquals(factorRegularizationValue);

        //the result joins to matrixFisher
        matrixFisher.plusEquals(identity);

        //do eigen decomposition
        EigenvalueDecomposition eigen = matrixFisher.eig();

        //in case of file, the matrix v takes the matrix file from eigen
        //in this case eigen cant not be destroy for the moment
        Matrix v = eigen.getV();

        double[] landa = eigen.getRealEigenvalues();

        double epsilonValue = getEpsilon();

        //variable copies of q to know if there has been some change
        double[] copiaQ = new double[q.length];
        Arrays.fill(copiaQ, 1.0 / n);

        //while it finds change in q, it keeps on iterating
        currentEpsilonIteration = 0;
        do {
            ++currentEpsilonIteration;
            //the value of q is updated
            //in the first iteration it fills with 1.0/n
            System.arraycopy(copiaQ, 0, q, 0, q.length);

            //process of finding f_sub_i
            double[] f = new double[landa.length];
            for (int j = 0; j < f.length; j++) {
                f[j] = 0;

                for (int i = 0; i < n; i++) {
                    double mult = q[i] * piSubI[i] * (1 - piSubI[i]);

                    //the values of the instance are had
                    double[] atributos = unlabeled.instance(i).toDoubleArray();

                    //the attribute class is eliminated, only the features are left
                    double[] vectorX = DatasetUtils.copyFeatures(atributos, classIndex);

                    //it multiplies vector_x with vector_columna of V
                    //vector_x it is: 1 X n
                    //vector_de_V it is: n X 1
                    //result: a number
                    double multVectores = 0;
                    for (int k = 0; k < vectorX.length; k++) {
                        multVectores += vectorX[k] * v.get(k, j);
                    }

                    //the result rises up to the square
                    multVectores *= multVectores;

                    //it joins to f[j]
                    f[j] += mult * multVectores;
                }
            }

            //the first process of finding q of the current iteration       
            for (int i = 0; i < n; i++) {
                double mult = copiaQ[i] * copiaQ[i] * piSubI[i] * (1 - piSubI[i]);

                //the values of the instance are had
                double[] atributos = unlabeled.instance(i).toDoubleArray();

                //the attribute class is eliminated, only the features are left
                double[] vectorX = DatasetUtils.copyFeatures(atributos, classIndex);

                //the following  is realized
                double sumatoria = 0;
                for (int j = 0; j < landa.length; j++) {

                    //it multiplies vector_x with vector_columna of V
                    //vector_x is: 1 X n
                    //vector_de_V is: n X 1
                    //result: a number
                    double multVectores = 0;
                    for (int k = 0; k < vectorX.length; k++) {
                        multVectores += vectorX[k] * v.get(k, j);
                    }

                    //the result multiplies with landa[j]
                    multVectores *= landa[j];

                    //it rises up to the square
                    multVectores *= multVectores;

                    //it splits between the square of f [j]
                    multVectores /= f[j] * f[j];

                    //the sumatoria is added
                    sumatoria += multVectores;
                }

                //the value of copia_q [i] is: mult * sumatoria
                copiaQ[i] = mult * sumatoria;
            }

            //the second step to find q in the iteration

            /*the sum must be out, if it was inside and with copia_q then 
             *one would give priority to the last instance and the last one 
             * would be always chosen
             */
            double suma = 0;
            for (int j = 0; j < n; j++) {
                suma += copiaQ[j];
            }

            for (int i = 0; i < n; i++) {
                copiaQ[i] = copiaQ[i] / suma;
            }

        } while (change(q, copiaQ, epsilonValue));

        //the values are saved
        tempValues = new double[copiaQ.length];

        System.arraycopy(copiaQ, 0, tempValues, 0, copiaQ.length);

    }

    int indice = unlabeled.indexOf(instance);

    return tempValues[indice];
}

From source file:org.mitre.ccv.CompleteCompositionVectorMain.java

License:Open Source License

/**
 * Performs Affinity Propagation Clustering
 * // ww  w. j a  va 2s  . co m
 * @param dm
 * @param filename
 */
public AffinityPropagation cluster(DistanceMatrix dm, int type) {
    double[][] mVals = dm.getClonedDistances();
    Matrix m = new Matrix(mVals);
    if (distCalc == 1 || distCalc == 3) {
        m = m.times(-1.0);
    } else if (distCalc == 2) {
        Matrix o = new Matrix(m.getRowDimension(), m.getColumnDimension(), 1.0);
        m = o.minus(m);
    }

    int total = m.getColumnDimension();
    double values[] = new double[(total * total - total) / 2];
    int count = 0;
    for (int i = 1; i < total; i++) {
        for (int j = i + 1; j < total - 1; j++) {
            values[count] = m.get(i, j);
            count++;
        }

    }

    double preference = getPreference(values, type);

    AffinityPropagation ap = new AffinityPropagation(m, 5000, 300, 0.9, preference);

    return ap;
}