Example usage for weka.core Instances numInstances

List of usage examples for weka.core Instances numInstances

Introduction

In this page you can find the example usage for weka.core Instances numInstances.

Prototype


publicint numInstances() 

Source Link

Document

Returns the number of instances in the dataset.

Usage

From source file:br.fapesp.myutils.MyUtils.java

License:Open Source License

public static Instances genGaussianDatasetWithSigmaEvolution(double[][] centers, double[][] sigmas,
        double[][] sigmas2, int pointsPerCluster, long seed, boolean randomize) {
    Instances dataset1 = genGaussianDataset(centers, sigmas, pointsPerCluster, seed, randomize, false);
    Instances dataset2 = genGaussianDataset(centers, sigmas2, pointsPerCluster, seed + 59387, randomize, false);

    for (int i = 0; i < dataset2.numInstances(); i++)
        dataset1.add(dataset2.instance(i));

    return dataset1;
}

From source file:br.fapesp.myutils.MyUtils.java

License:Open Source License

/**
 * Convert an Instances data set to a doubles matrix.
 * @param data//from ww w  . j a  v a  2  s. co m
 * @return data as a double array
 */
public static double[][] convertInstancesToDoubleMatrix(Instances data) {
    int N = data.numInstances();
    int m = data.numAttributes();
    double[][] ddata = new double[N][m];
    double[] temp;

    for (int i = 0; i < N; i++) {
        temp = data.instance(i).toDoubleArray();
        for (int j = 0; j < m; j++)
            ddata[i][j] = temp[j];
    }

    return (ddata);
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private void CVClusters() throws Exception {
    double CVLogLikely = -Double.MAX_VALUE;
    double templl, tll;
    boolean CVincreased = true;
    m_num_clusters = 1;//  w  ww  .j av  a 2s.  c  om
    int num_clusters = m_num_clusters;
    int i;
    Random cvr;
    Instances trainCopy;
    int numFolds = (m_theInstances.numInstances() < 10) ? m_theInstances.numInstances() : 10;

    boolean ok = true;
    int seed = getSeed();
    int restartCount = 0;
    CLUSTER_SEARCH: while (CVincreased) {
        // theInstances.stratify(10);

        CVincreased = false;
        cvr = new Random(getSeed());
        trainCopy = new Instances(m_theInstances);
        trainCopy.randomize(cvr);
        templl = 0.0;
        for (i = 0; i < numFolds; i++) {
            Instances cvTrain = trainCopy.trainCV(numFolds, i, cvr);
            if (num_clusters > cvTrain.numInstances()) {
                break CLUSTER_SEARCH;
            }
            Instances cvTest = trainCopy.testCV(numFolds, i);
            m_rr = new Random(seed);
            for (int z = 0; z < 10; z++)
                m_rr.nextDouble();
            m_num_clusters = num_clusters;
            EM_Init(cvTrain);
            try {
                iterate(cvTrain, false);
            } catch (Exception ex) {
                // catch any problems - i.e. empty clusters occuring
                ex.printStackTrace();
                // System.err.println("Restarting after CV training failure
                // ("+num_clusters+" clusters");
                seed++;
                restartCount++;
                ok = false;
                if (restartCount > 5) {
                    break CLUSTER_SEARCH;
                }
                break;
            }
            try {
                tll = E(cvTest, false);
            } catch (Exception ex) {
                // catch any problems - i.e. empty clusters occuring
                // ex.printStackTrace();
                ex.printStackTrace();
                // System.err.println("Restarting after CV testing failure
                // ("+num_clusters+" clusters");
                // throw new Exception(ex);
                seed++;
                restartCount++;
                ok = false;
                if (restartCount > 5) {
                    break CLUSTER_SEARCH;
                }
                break;
            }

            if (m_verbose) {
                System.out.println("# clust: " + num_clusters + " Fold: " + i + " Loglikely: " + tll);
            }
            templl += tll;
        }

        if (ok) {
            restartCount = 0;
            seed = getSeed();
            templl /= (double) numFolds;

            if (m_verbose) {
                System.out.println("===================================" + "==============\n# clust: "
                        + num_clusters + " Mean Loglikely: " + templl + "\n================================"
                        + "=================");
            }

            if (templl > CVLogLikely) {
                CVLogLikely = templl;
                CVincreased = true;
                num_clusters++;
            }
        }
    }

    if (m_verbose) {
        System.out.println("Number of clusters: " + (num_clusters - 1));
    }

    m_num_clusters = num_clusters - 1;
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private double E(Instances inst, boolean change_weights) throws Exception {

    double loglk = 0.0, sOW = 0.0;

    for (int l = 0; l < inst.numInstances(); l++) {

        Instance in = inst.instance(l);/*from  ww w.j a v a 2  s  .  c  o m*/

        loglk += in.weight() * logDensityForInstance(in);
        sOW += in.weight();

        if (change_weights) {
            m_weights[l] = distributionForInstance(in);
        }
    }

    // reestimate priors
    if (change_weights) {
        estimate_priors(inst);
    }
    return loglk / sOW;
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private void EM_Init(Instances inst) throws Exception {
    int i, j, k;//from www . ja v  a 2s . c  om

    // run k means 10 times and choose best solution
    SimpleKMeans bestK = null;
    double bestSqE = Double.MAX_VALUE;
    for (i = 0; i < 10; i++) {
        SimpleKMeans sk = new SimpleKMeans();
        sk.setSeed(m_rr.nextInt());
        sk.setNumClusters(m_num_clusters);
        sk.setDisplayStdDevs(true);
        sk.buildClusterer(inst);
        if (sk.getSquaredError() < bestSqE) {
            bestSqE = sk.getSquaredError();
            bestK = sk;
        }
    }

    // initialize with best k-means solution
    m_num_clusters = bestK.numberOfClusters();
    m_weights = new double[inst.numInstances()][m_num_clusters];
    m_model = new DiscreteEstimator[m_num_clusters][m_num_attribs];
    m_modelNormal = new double[m_num_clusters][m_num_attribs][3];
    m_priors = new double[m_num_clusters];
    Instances centers = bestK.getClusterCentroids();
    Instances stdD = bestK.getClusterStandardDevs();
    double[][][] nominalCounts = bestK.getClusterNominalCounts();
    double[] clusterSizes = bestK.getClusterSizes();

    for (i = 0; i < m_num_clusters; i++) {
        Instance center = centers.instance(i);
        for (j = 0; j < m_num_attribs; j++) {
            if (inst.attribute(j).isNominal()) {
                m_model[i][j] = new DiscreteEstimator(m_theInstances.attribute(j).numValues(), true);
                for (k = 0; k < inst.attribute(j).numValues(); k++) {
                    m_model[i][j].addValue(k, nominalCounts[i][j][k]);
                }
            } else {
                double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j] : m_minStdDev;
                double mean = (center.isMissing(j)) ? inst.meanOrMode(j) : center.value(j);
                m_modelNormal[i][j][0] = mean;
                double stdv = (stdD.instance(i).isMissing(j))
                        ? ((m_maxValues[j] - m_minValues[j]) / (2 * m_num_clusters))
                        : stdD.instance(i).value(j);
                if (stdv < minStdD) {
                    stdv = inst.attributeStats(j).numericStats.stdDev;
                    if (Double.isInfinite(stdv)) {
                        stdv = minStdD;
                    }
                    if (stdv < minStdD) {
                        stdv = minStdD;
                    }
                }
                if (stdv <= 0) {
                    stdv = m_minStdDev;
                }

                m_modelNormal[i][j][1] = stdv;
                m_modelNormal[i][j][2] = 1.0;
            }
        }
    }

    for (j = 0; j < m_num_clusters; j++) {
        // m_priors[j] += 1.0;
        m_priors[j] = clusterSizes[j];
    }
    Utils.normalize(m_priors);
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private void EM_Report(Instances inst) {
    int i, j, l, m;
    System.out.println("======================================");

    for (j = 0; j < m_num_clusters; j++) {
        for (i = 0; i < m_num_attribs; i++) {
            System.out.println("Clust: " + j + " att: " + i + "\n");

            if (m_theInstances.attribute(i).isNominal()) {
                if (m_model[j][i] != null) {
                    System.out.println(m_model[j][i].toString());
                }/*from   w ww  .  j a  v a  2  s . c o  m*/
            } else {
                System.out.println(
                        "Normal Distribution. Mean = " + Utils.doubleToString(m_modelNormal[j][i][0], 8, 4)
                                + " StandardDev = " + Utils.doubleToString(m_modelNormal[j][i][1], 8, 4)
                                + " WeightSum = " + Utils.doubleToString(m_modelNormal[j][i][2], 8, 4));
            }
        }
    }

    for (l = 0; l < inst.numInstances(); l++) {
        m = Utils.maxIndex(m_weights[l]);
        System.out.print("Inst " + Utils.doubleToString((double) l, 5, 0) + " Class " + m + "\t");
        for (j = 0; j < m_num_clusters; j++) {
            System.out.print(Utils.doubleToString(m_weights[l][j], 7, 5) + "  ");
        }
        System.out.println();
    }
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private void estimate_priors(Instances inst) throws Exception {

    for (int i = 0; i < m_num_clusters; i++) {
        m_priors[i] = 0.0;//  w ww. ja v a 2s. c o m
    }

    for (int i = 0; i < inst.numInstances(); i++) {
        for (int j = 0; j < m_num_clusters; j++) {
            m_priors[j] += inst.instance(i).weight() * m_weights[i][j];
        }
    }

    Utils.normalize(m_priors);
}

From source file:br.ufrn.ia.core.clustering.EMIaProject.java

License:Open Source License

private void M(Instances inst) throws Exception {

    int i, j, l;//from  w w  w  .java 2 s .  c  o  m

    new_estimators();

    for (i = 0; i < m_num_clusters; i++) {
        for (j = 0; j < m_num_attribs; j++) {
            for (l = 0; l < inst.numInstances(); l++) {
                Instance in = inst.instance(l);
                if (!in.isMissing(j)) {
                    if (inst.attribute(j).isNominal()) {
                        m_model[i][j].addValue(in.value(j), in.weight() * m_weights[l][i]);
                    } else {
                        m_modelNormal[i][j][0] += (in.value(j) * in.weight() * m_weights[l][i]);
                        m_modelNormal[i][j][2] += in.weight() * m_weights[l][i];
                        m_modelNormal[i][j][1] += (in.value(j) * in.value(j) * in.weight() * m_weights[l][i]);
                    }
                }
            }
        }
    }

    // calcualte mean and std deviation for numeric attributes
    for (j = 0; j < m_num_attribs; j++) {
        if (!inst.attribute(j).isNominal()) {
            for (i = 0; i < m_num_clusters; i++) {
                if (m_modelNormal[i][j][2] <= 0) {
                    m_modelNormal[i][j][1] = Double.MAX_VALUE;
                    // m_modelNormal[i][j][0] = 0;
                    m_modelNormal[i][j][0] = m_minStdDev;
                } else {

                    // variance
                    m_modelNormal[i][j][1] = (m_modelNormal[i][j][1]
                            - (m_modelNormal[i][j][0] * m_modelNormal[i][j][0] / m_modelNormal[i][j][2]))
                            / (m_modelNormal[i][j][2]);

                    if (m_modelNormal[i][j][1] < 0) {
                        m_modelNormal[i][j][1] = 0;
                    }

                    // std dev
                    double minStdD = (m_minStdDevPerAtt != null) ? m_minStdDevPerAtt[j] : m_minStdDev;

                    m_modelNormal[i][j][1] = Math.sqrt(m_modelNormal[i][j][1]);

                    if ((m_modelNormal[i][j][1] <= minStdD)) {
                        m_modelNormal[i][j][1] = inst.attributeStats(j).numericStats.stdDev;
                        if ((m_modelNormal[i][j][1] <= minStdD)) {
                            m_modelNormal[i][j][1] = minStdD;
                        }
                    }
                    if ((m_modelNormal[i][j][1] <= 0)) {
                        m_modelNormal[i][j][1] = m_minStdDev;
                    }
                    if (Double.isInfinite(m_modelNormal[i][j][1])) {
                        m_modelNormal[i][j][1] = m_minStdDev;
                    }

                    // mean
                    m_modelNormal[i][j][0] /= m_modelNormal[i][j][2];
                }
            }
        }
    }
}

From source file:br.ufrn.ia.core.clustering.SimpleKMeansIaProject.java

License:Open Source License

public void buildClusterer(Instances data) throws Exception {

    // can clusterer handle the data?
    getCapabilities().testWithFail(data);

    m_Iterations = 0;/*from   w  ww .  j a v  a2s  . co m*/

    m_ReplaceMissingFilter = new ReplaceMissingValues();
    Instances instances = new Instances(data);

    instances.setClassIndex(-1);
    if (!m_dontReplaceMissing) {
        m_ReplaceMissingFilter.setInputFormat(instances);
        instances = Filter.useFilter(instances, m_ReplaceMissingFilter);
    }

    m_FullMissingCounts = new int[instances.numAttributes()];
    if (m_displayStdDevs) {
        m_FullStdDevs = new double[instances.numAttributes()];
    }
    m_FullNominalCounts = new int[instances.numAttributes()][0];

    m_FullMeansOrMediansOrModes = moveCentroid(0, instances, false);
    for (int i = 0; i < instances.numAttributes(); i++) {
        m_FullMissingCounts[i] = instances.attributeStats(i).missingCount;
        if (instances.attribute(i).isNumeric()) {
            if (m_displayStdDevs) {
                m_FullStdDevs[i] = Math.sqrt(instances.variance(i));
            }
            if (m_FullMissingCounts[i] == instances.numInstances()) {
                m_FullMeansOrMediansOrModes[i] = Double.NaN; // mark missing
                // as mean
            }
        } else {
            m_FullNominalCounts[i] = instances.attributeStats(i).nominalCounts;
            if (m_FullMissingCounts[i] > m_FullNominalCounts[i][Utils.maxIndex(m_FullNominalCounts[i])]) {
                m_FullMeansOrMediansOrModes[i] = -1; // mark missing as most
                // common value
            }
        }
    }

    m_ClusterCentroids = new Instances(instances, m_NumClusters);
    int[] clusterAssignments = new int[instances.numInstances()];

    if (m_PreserveOrder)
        m_Assignments = clusterAssignments;

    m_DistanceFunction.setInstances(instances);

    Random RandomO = new Random(getSeed());
    int instIndex;
    HashMap initC = new HashMap();
    DecisionTableHashKey hk = null;

    Instances initInstances = null;
    if (m_PreserveOrder)
        initInstances = new Instances(instances);
    else
        initInstances = instances;

    for (int j = initInstances.numInstances() - 1; j >= 0; j--) {
        instIndex = RandomO.nextInt(j + 1);
        hk = new DecisionTableHashKey(initInstances.instance(instIndex), initInstances.numAttributes(), true);
        if (!initC.containsKey(hk)) {
            m_ClusterCentroids.add(initInstances.instance(instIndex));
            initC.put(hk, null);
        }
        initInstances.swap(j, instIndex);

        if (m_ClusterCentroids.numInstances() == m_NumClusters) {
            break;
        }
    }

    m_NumClusters = m_ClusterCentroids.numInstances();

    // removing reference
    initInstances = null;

    int i;
    boolean converged = false;
    int emptyClusterCount;
    Instances[] tempI = new Instances[m_NumClusters];
    m_squaredErrors = new double[m_NumClusters];
    m_ClusterNominalCounts = new int[m_NumClusters][instances.numAttributes()][0];
    m_ClusterMissingCounts = new int[m_NumClusters][instances.numAttributes()];
    while (!converged) {
        emptyClusterCount = 0;
        m_Iterations++;
        converged = true;
        for (i = 0; i < instances.numInstances(); i++) {
            Instance toCluster = instances.instance(i);
            int newC = clusterProcessedInstance(toCluster, true);
            if (newC != clusterAssignments[i]) {
                converged = false;
            }
            clusterAssignments[i] = newC;
        }

        // update centroids
        m_ClusterCentroids = new Instances(instances, m_NumClusters);
        for (i = 0; i < m_NumClusters; i++) {
            tempI[i] = new Instances(instances, 0);
        }
        for (i = 0; i < instances.numInstances(); i++) {
            tempI[clusterAssignments[i]].add(instances.instance(i));
        }
        for (i = 0; i < m_NumClusters; i++) {
            if (tempI[i].numInstances() == 0) {
                // empty cluster
                emptyClusterCount++;
            } else {
                moveCentroid(i, tempI[i], true);
            }
        }

        if (emptyClusterCount > 0) {
            m_NumClusters -= emptyClusterCount;
            if (converged) {
                Instances[] t = new Instances[m_NumClusters];
                int index = 0;
                for (int k = 0; k < tempI.length; k++) {
                    if (tempI[k].numInstances() > 0) {
                        t[index++] = tempI[k];
                    }
                }
                tempI = t;
            } else {
                tempI = new Instances[m_NumClusters];
            }
        }

        if (m_Iterations == m_MaxIterations)
            converged = true;

        if (!converged) {
            m_squaredErrors = new double[m_NumClusters];
            m_ClusterNominalCounts = new int[m_NumClusters][instances.numAttributes()][0];
        }
    }

    if (m_displayStdDevs) {
        m_ClusterStdDevs = new Instances(instances, m_NumClusters);
    }
    m_ClusterSizes = new int[m_NumClusters];
    for (i = 0; i < m_NumClusters; i++) {
        if (m_displayStdDevs) {
            double[] vals2 = new double[instances.numAttributes()];
            for (int j = 0; j < instances.numAttributes(); j++) {
                if (instances.attribute(j).isNumeric()) {
                    vals2[j] = Math.sqrt(tempI[i].variance(j));
                } else {
                    vals2[j] = Utils.missingValue();
                }
            }
            m_ClusterStdDevs.add(new DenseInstance(1.0, vals2));
        }
        m_ClusterSizes[i] = tempI[i].numInstances();
    }
}

From source file:br.ufrn.ia.core.clustering.SimpleKMeansIaProject.java

License:Open Source License

protected double[] moveCentroid(int centroidIndex, Instances members, boolean updateClusterInfo) {
    double[] vals = new double[members.numAttributes()];

    // used only for Manhattan Distance
    Instances sortedMembers = null;//from   w w  w.j  a  va  2  s.  co m
    int middle = 0;
    boolean dataIsEven = false;

    if (m_DistanceFunction instanceof ManhattanDistance) {
        middle = (members.numInstances() - 1) / 2;
        dataIsEven = ((members.numInstances() % 2) == 0);
        if (m_PreserveOrder) {
            sortedMembers = members;
        } else {
            sortedMembers = new Instances(members);
        }
    }

    for (int j = 0; j < members.numAttributes(); j++) {

        // in case of Euclidian distance the centroid is the mean point
        // in case of Manhattan distance the centroid is the median point
        // in both cases, if the attribute is nominal, the centroid is the
        // mode
        if (m_DistanceFunction instanceof EuclideanDistance || members.attribute(j).isNominal()) {
            vals[j] = members.meanOrMode(j);
        } else if (m_DistanceFunction instanceof ManhattanDistance) {
            // singleton special case
            if (members.numInstances() == 1) {
                vals[j] = members.instance(0).value(j);
            } else {
                sortedMembers.kthSmallestValue(j, middle + 1);
                vals[j] = sortedMembers.instance(middle).value(j);
                if (dataIsEven) {
                    sortedMembers.kthSmallestValue(j, middle + 2);
                    vals[j] = (vals[j] + sortedMembers.instance(middle + 1).value(j)) / 2;
                }
            }
        }

        if (updateClusterInfo) {
            m_ClusterMissingCounts[centroidIndex][j] = members.attributeStats(j).missingCount;
            m_ClusterNominalCounts[centroidIndex][j] = members.attributeStats(j).nominalCounts;
            if (members.attribute(j).isNominal()) {
                if (m_ClusterMissingCounts[centroidIndex][j] > m_ClusterNominalCounts[centroidIndex][j][Utils
                        .maxIndex(m_ClusterNominalCounts[centroidIndex][j])]) {
                    vals[j] = Utils.missingValue(); // mark mode as missing
                }
            } else {
                if (m_ClusterMissingCounts[centroidIndex][j] == members.numInstances()) {
                    vals[j] = Utils.missingValue(); // mark mean as missing
                }
            }
        }
    }
    if (updateClusterInfo)
        m_ClusterCentroids.add(new DenseInstance(1.0, vals));
    return vals;
}