Example usage for org.apache.commons.math3.ml.clustering DoublePoint DoublePoint

List of usage examples for org.apache.commons.math3.ml.clustering DoublePoint DoublePoint

Introduction

In this page you can find the example usage for org.apache.commons.math3.ml.clustering DoublePoint DoublePoint.

Prototype

public DoublePoint(final int[] point) 

Source Link

Document

Build an instance wrapping an integer array.

Usage

From source file:DBClust.java

public static void Ident(double[] eps, int[] minPts, int[] pixelSize, boolean[] doCluster) {
    ArrayList<Particle> InpParticle = TableIO.Load(); // Get current table data.

    ij.measure.ResultsTable tab = Analyzer.getResultsTable();
    double width = tab.getValue("width", 0);
    double height = tab.getValue("height", 0);
    tab.reset();/*  ww  w  .  j av  a2  s .  c o m*/
    //      tab.incrementCounter();

    for (int Ch = 1; Ch <= InpParticle.get(InpParticle.size() - 1).channel; Ch++) {
        if (doCluster[Ch - 1]) {
            List<DoublePoint> points = new ArrayList<DoublePoint>();

            for (int i = 0; i < InpParticle.size(); i++) {
                double[] p = new double[2];
                if (InpParticle.get(i).include == 1 && InpParticle.get(i).channel == Ch) {
                    p[0] = InpParticle.get(i).x;
                    p[1] = InpParticle.get(i).y;
                    points.add(new DoublePoint(p));
                }
            }
            DBSCANClusterer<DoublePoint> DB = new DBSCANClusterer<DoublePoint>(eps[Ch - 1], minPts[Ch - 1]);
            List<Cluster<DoublePoint>> cluster = DB.cluster(points);
            int ClustIdx = 1;
            int[] IndexList = new int[InpParticle.size()];
            for (Cluster<DoublePoint> c : cluster) {
                for (int j = 0; j < c.getPoints().size(); j++) {
                    DoublePoint p = c.getPoints().get(j);
                    double[] Coord = p.getPoint();
                    for (int i = 0; i < InpParticle.size(); i++) {
                        Particle tempParticle = InpParticle.get(i);
                        if (tempParticle.x == Coord[0] && tempParticle.y == Coord[1]) {
                            IndexList[i] = ClustIdx;
                        }
                    }
                }
                ClustIdx++;
            }
            boolean first = true;
            for (int i = 0; i < InpParticle.size(); i++) {

                if (InpParticle.get(i).channel == Ch) {
                    tab.incrementCounter();
                    tab.addValue("Cluster", IndexList[i]);
                    tab.addValue("x0", InpParticle.get(i).x);
                    tab.addValue("y0", InpParticle.get(i).y);
                    tab.addValue("z0", InpParticle.get(i).z);
                    tab.addValue("frame", InpParticle.get(i).frame);
                    tab.addValue("channel", InpParticle.get(i).channel);
                    tab.addValue("sigma_x", InpParticle.get(i).sigma_x);
                    tab.addValue("sigma_y", InpParticle.get(i).sigma_y);
                    tab.addValue("precision_x", InpParticle.get(i).precision_x);
                    tab.addValue("precision_y", InpParticle.get(i).precision_y);
                    tab.addValue("precision_z", InpParticle.get(i).precision_z);
                    tab.addValue("r_square", InpParticle.get(i).r_square);
                    tab.addValue("photons", InpParticle.get(i).photons);
                    if (IndexList[i] > 0)
                        tab.addValue("include", 1);
                    else
                        tab.addValue("include", 0);
                    if (first) {
                        first = false;
                        tab.addValue("width", width);
                        tab.addValue("height", height);
                    }
                }
            }
        }
    } // Channel loop.
    tab.show("Results");

    RenderIm.run(doCluster, pixelSize, false);

}

From source file:Data.Utilities.java

public static List<Cluster<DoublePoint>> DBSCANClusterer(ArrayList<StayPoint> stayPoints, Double eParam,
        Integer minClust) {//from  w  ww. j a  v  a 2 s.c o m

    DBSCANClusterer dbscan = new DBSCANClusterer(eParam, minClust);

    ArrayList<DoublePoint> input = new ArrayList<DoublePoint>();

    for (StayPoint st : stayPoints) {
        double coord[] = new double[2];
        coord[0] = Double.parseDouble(st.getLatitude());
        coord[1] = Double.parseDouble(st.getLongitude());
        input.add(new DoublePoint(coord));
    }

    List<Cluster<DoublePoint>> cluster = dbscan.cluster(input);

    return cluster;
}

From source file:Data.Utilities.java

public static DoublePoint getCentroid(Cluster<DoublePoint> dp) {

    double[] coords = new double[2];

    List<DoublePoint> clPoints = dp.getPoints();

    for (DoublePoint p : clPoints) {
        coords[0] += p.getPoint()[0];/* ww w .j  av a 2  s.c  o m*/
        coords[1] += p.getPoint()[1];
    }

    coords[0] /= clPoints.size();
    coords[1] /= clPoints.size();

    return new DoublePoint(coords);
}

From source file:bigdataproject.ReadDataSet.java

List<DoublePoint> getCollection(HashMap<Integer, double[]> map) {
    List<DoublePoint> list = new ArrayList<>();
    map.keySet().stream().map((key) -> new DoublePoint(map.get(key))).forEach((p) -> {
        list.add(p);/* w ww.  j  a  v  a 2  s  .c  o  m*/
    });
    return list;
}

From source file:net.semanticmetadata.lire.imageanalysis.bovw.LocalFeatureHistogramBuilderKmeansPlusPlus.java

/**
 * Uses an existing index, where each and every document should have a set of local features. A number of
 * random images (numDocsForVocabulary) is selected and clustered to get a vocabulary of visual words
 * (the cluster means). For all images a histogram on the visual words is created and added to the documents.
 * Pre-existing histograms are deleted, so this method can be used for re-indexing.
 *
 * @throws java.io.IOException/*from   w ww .j av a  2 s  .  c  om*/
 */
public void index() throws IOException {
    df.setMaximumFractionDigits(3);
    // find the documents for building the vocabulary:
    HashSet<Integer> docIDs = selectVocabularyDocs();
    System.out.println("Using " + docIDs.size() + " documents to build the vocabulary.");
    KMeansPlusPlusClusterer kpp = new KMeansPlusPlusClusterer(numClusters, 15);
    // fill the KMeans object:
    LinkedList<DoublePoint> features = new LinkedList<DoublePoint>();
    // Needed for check whether the document is deleted.
    Bits liveDocs = MultiFields.getLiveDocs(reader);
    for (Iterator<Integer> iterator = docIDs.iterator(); iterator.hasNext();) {
        int nextDoc = iterator.next();
        if (reader.hasDeletions() && !liveDocs.get(nextDoc))
            continue; // if it is deleted, just ignore it.
        Document d = reader.document(nextDoc);
        //            features.clear();
        IndexableField[] fields = d.getFields(localFeatureFieldName);
        String file = d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0];
        for (int j = 0; j < fields.length; j++) {
            LireFeature f = getFeatureInstance();
            f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset,
                    fields[j].binaryValue().length);
            // copy the data over to new array ...
            double[] feat = new double[f.getDoubleHistogram().length];
            System.arraycopy(f.getDoubleHistogram(), 0, feat, 0, feat.length);
            features.add(new DoublePoint(f.getDoubleHistogram()));
        }
    }
    if (features.size() < numClusters) {
        // this cannot work. You need more data points than clusters.
        throw new UnsupportedOperationException("Only " + features.size() + " features found to cluster in "
                + numClusters + ". Try to use less clusters or more images.");
    }
    // do the clustering:
    System.out.println("Number of local features: " + df.format(features.size()));
    System.out.println("Starting clustering ...");
    List<CentroidCluster<DoublePoint>> clusterList = kpp.cluster(features);
    // TODO: Serializing clusters to a file on the disk ...
    System.out.println("Clustering finished, " + clusterList.size() + " clusters found");
    clusters = new LinkedList<double[]>();
    for (Iterator<CentroidCluster<DoublePoint>> iterator = clusterList.iterator(); iterator.hasNext();) {
        CentroidCluster<DoublePoint> centroidCluster = iterator.next();
        clusters.add(centroidCluster.getCenter().getPoint());
    }
    System.out.println("Creating histograms ...");
    int[] tmpHist = new int[numClusters];
    IndexWriter iw = LuceneUtils.createIndexWriter(((DirectoryReader) reader).directory(), true,
            LuceneUtils.AnalyzerType.WhitespaceAnalyzer, 256d);

    // careful: copy reader to RAM for faster access when reading ...
    //        reader = IndexReader.open(new RAMDirectory(reader.directory()), true);
    LireFeature f = getFeatureInstance();
    for (int i = 0; i < reader.maxDoc(); i++) {
        try {
            if (reader.hasDeletions() && !liveDocs.get(i))
                continue;
            for (int j = 0; j < tmpHist.length; j++) {
                tmpHist[j] = 0;
            }
            Document d = reader.document(i);
            IndexableField[] fields = d.getFields(localFeatureFieldName);
            // remove the fields if they are already there ...
            d.removeField(visualWordsFieldName);
            d.removeField(localFeatureHistFieldName);

            // find the appropriate cluster for each feature:
            for (int j = 0; j < fields.length; j++) {
                f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset,
                        fields[j].binaryValue().length);
                tmpHist[clusterForFeature(f, clusters)]++;
            }
            //                System.out.println(Arrays.toString(tmpHist));
            d.add(new StoredField(localFeatureHistFieldName,
                    SerializationUtils.toByteArray(normalize(tmpHist))));
            quantize(tmpHist);
            d.add(new TextField(visualWordsFieldName, arrayToVisualWordString(tmpHist), Field.Store.YES));

            // remove local features to save some space if requested:
            if (DELETE_LOCAL_FEATURES) {
                d.removeFields(localFeatureFieldName);
            }
            // now write the new one. we use the identifier to update ;)
            iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER,
                    d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    iw.commit();
    // this one does the "old" commit(), it removes the deleted local features.
    iw.forceMerge(1);
    iw.close();
    System.out.println("Finished.");
}

From source file:edu.nyu.vida.data_polygamy.ctdata.TopologicalIndex.java

public double getThreshold(Feature[] f) {

    KMeansPlusPlusClusterer<DoublePoint> kmeans = new KMeansPlusPlusClusterer<DoublePoint>(2, 1000);
    ArrayList<DoublePoint> pts = new ArrayList<DoublePoint>();

    if (f.length < 2) {
        return f[0].wt * 0.4;
    }/*from w  w  w.  j a v a 2 s .  c om*/
    for (int i = 0; i < f.length; i++) {
        DoublePoint dpt = new DoublePoint(new double[] { f[i].wt });
        pts.add(dpt);
    }
    List<CentroidCluster<DoublePoint>> clusters = kmeans.cluster(pts);

    double maxp = 0;
    double minp = 0;
    int ct = 0;
    for (CentroidCluster<DoublePoint> c : clusters) {
        double mp = 0;
        double mnp = Double.MAX_VALUE;
        for (DoublePoint dpt : c.getPoints()) {
            double[] pt = dpt.getPoint();
            mp = Math.max(mp, pt[0]);
            mnp = Math.min(mnp, pt[0]);
        }
        if (mp > maxp) {
            maxp = mp;
            minp = mnp;
        }
        ct++;
    }
    if (ct > 2) {
        Utilities.er("Can there be > 2 clusters?");
    }
    return minp;
}

From source file:Clustering.technique.KMeansPlusPlusClusterer.java

/**
 * Computes the centroid for a set of points.
 * @param clusterable /*  www.j  a  v a 2  s  . co m*/
 *
 * @param points the set of points
 * @param dimension the point dimension
 * @return the computed centroid for the set of points
 */
protected Clusterable centroidOf(Clusterable clusterable, final Collection<T> points, final int dimension) {
    final double[] centroid = new double[dimension];
    for (final T p : points) {
        final double[] point = p.getPoint();
        for (int i = 0; i < centroid.length; i++) {
            centroid[i] += point[i];
        }
    }
    for (int i = 0; i < centroid.length; i++) {
        centroid[i] /= points.size();
    }
    return new DoublePoint(centroid);
}

From source file:KMeansRecommender.MyKMeansPlusPlusClusterer.java

/**
 * Computes the centroid for a set of points.
 *
 * @param points the set of points//from   w  w  w  .j av a  2s  . c o  m
 * @param dimension the point dimension
 * @return the computed centroid for the set of points
 */
private Clusterable centroidOf(final Collection<T> points, final int dimension) {
    final double[] centroid = new double[dimension];
    DistanceMeasure measure = getDistanceMeasure();
    double minSumDistance = -1;
    T minPoint = null;

    for (final T p : points) {
        final double[] tempCenterPoint = p.getPoint();
        double tempSumDistance = 0;
        for (final T q : points) {
            tempSumDistance += measure.compute(tempCenterPoint, q.getPoint());
        }
        if (tempSumDistance < minSumDistance || minSumDistance == -1) {
            minSumDistance = tempSumDistance;
            minPoint = p;
        }
    }

    final double[] centerPoint = minPoint.getPoint();
    for (int i = 0; i < centroid.length; i++) {
        centroid[i] = centerPoint[i];
    }
    return new DoublePoint(centroid);
}

From source file:org.esa.s2tbx.s2msi.idepix.operators.cloudshadow.MyClustering.java

/**
 * Computes the centroid for a set of points.
 *
 * @param points    the set of points/*w w  w.  jav  a2 s.c o  m*/
 * @param dimension the point dimension
 * @return the computed centroid for the set of points
 */
private Clusterable centroidOf(final Collection<T> points, final int dimension) {
    final double[] centroid = new double[dimension];
    for (final T p : points) {
        final double[] point = p.getPoint();
        for (int i = 0; i < centroid.length; i++) {
            centroid[i] += point[i];
        }
    }
    for (int i = 0; i < centroid.length; i++) {
        centroid[i] /= points.size();
    }
    return new DoublePoint(centroid);
}

From source file:org.rhwlab.variationalbayesian.OldFaithfulDataSource.java

public List<CentroidCluster<Clusterable>> cluster(int K) {
    KMeansPlusPlusClusterer clusterer = new KMeansPlusPlusClusterer(K);
    ArrayList<DoublePoint> points = new ArrayList<>();
    for (RealVector v : data) {
        DoublePoint point = new DoublePoint(v.toArray());
        points.add(point);/*from w  w  w. ja va  2 s  .c  o  m*/
    }
    return clusterer.cluster(points);
}