Example usage for java.lang Double POSITIVE_INFINITY

List of usage examples for java.lang Double POSITIVE_INFINITY

Introduction

In this page you can find the example usage for java.lang Double POSITIVE_INFINITY.

Prototype

double POSITIVE_INFINITY

To view the source code for java.lang Double POSITIVE_INFINITY.

Click Source Link

Document

A constant holding the positive infinity of type double .

Usage

From source file:com.github.rinde.rinsim.scenario.measure.MetricsTest.java

/**
 * Infinity is not accepted./*from  w  w w . j  a  v  a 2  s.  co m*/
 */
@Test(expected = IllegalArgumentException.class)
public void testHistogramInvalidInput2() {
    Metrics.computeHistogram(asList(0d, Double.POSITIVE_INFINITY), 3);
}

From source file:gdsc.core.clustering.optics.OPTICSResult.java

/**
 * Extract clusters from the reachability distance profile.
 * <p>/*  w  w  w .  java2s  .  co m*/
 * The min points should be equal to the min points used during OPTICS. The xi parameter can be used to control
 * the steepness of the points a cluster starts with and ends with. Higher -values can be used to
 * find only the most significant clusters, lower -values to find less significant clusters.
 *
 * @param xi
 *            the clustering parameter (xi).
 * @param options
 *            the options
 */
public void extractClusters(double xi, int options) {
    final boolean topLevel = (options & XI_OPTION_TOP_LEVEL) != 0;
    final boolean noCorrect = (options & XI_OPTION_NO_CORRECT) != 0;
    final double ul = getUpperLimit();
    final double ll = getLowerLimit();
    final boolean useUpperLimit = (options & XI_OPTION_UPPER_LIMIT) != 0 && ul < Double.POSITIVE_INFINITY;
    final boolean useLowerLimit = (options & XI_OPTION_LOWER_LIMIT) != 0 && ll > 0;

    // This code is based on the original OPTICS paper and an R-implementation available here:
    // https://cran.r-project.org/web/packages/dbscan/ 
    // There is also a Java implementation within the ELKI project:
    // https://elki-project.github.io/
    // The ELKI project is used for JUnit testing this implementation.

    TurboList<SteepDownArea> setOfSteepDownAreas = new TurboList<SteepDownArea>();
    TurboList<OPTICSCluster> setOfClusters = new TurboList<OPTICSCluster>();
    int index = 0;
    // The maximum value between a certain point and the current index; Maximum-in-between (mib).
    double mib = 0;
    final int size = size();
    final double ixi = 1 - xi;
    // For simplicity we assume that the profile does not contain NaN values.
    // Positive infinity values are for points with no reachability distance.
    final double[] r = getReachabilityDistanceProfile(false);
    int clusterId = 0;
    resetClusterIds();
    while (valid(index, size)) {
        mib = Math.max(mib, r[index]);
        // The last point cannot be the start of a steep area so end.
        if (!valid(index + 1, size))
            break;
        // Test if this is a steep down area 
        if (steepDown(index, r, ixi)) {
            // The first reachable point must have a reachability equal or below the upper limit
            if (useUpperLimit && r[index + 1] > ul) {
                // Not allowed so move on
                index++;
                continue;
            }
            // The first reachable point must have a reachability equal or above the lower limit
            if (useLowerLimit && r[index + 1] < ll) {
                // Not allowed so move on
                index++;
                continue;
            }

            // Update mib values with current mib and filter
            updateFilterSDASet(mib, setOfSteepDownAreas, ixi);
            double startValue = r[index];
            mib = 0;
            int startSteep = index;
            int endSteep = index + 1;
            for (index++; valid(index, size); index++) {
                // Continue down the steep area
                if (steepDown(index, r, ixi)) {
                    endSteep = index + 1;
                    continue;
                }
                // Stop looking if not going downward or after minPts of non steep area
                if (!steepDown(index, r, 1) || index - endSteep > minPts) {
                    break;
                }
            }
            SteepDownArea sda = new SteepDownArea(startSteep, endSteep, startValue);
            //System.out.println("New " + sda);
            setOfSteepDownAreas.add(sda);
            continue;
        }
        if (steepUp(index, r, ixi)) {
            // The last reachable point must have a reachability equal or below the upper limit
            if (useUpperLimit && r[index] > ul) {
                // Not allowed so move on
                index++;
                continue;
            }
            // The last reachable point must have a reachability equal or above the lower limit
            if (useLowerLimit && r[index] < ll) {
                // Not allowed so move on
                index++;
                continue;
            }

            // Update mib values with current mib and filter
            updateFilterSDASet(mib, setOfSteepDownAreas, ixi);
            SteepUpArea sua;
            {
                int startSteep = index;
                int endSteep = index + 1;
                mib = r[index];
                double eSuccessor = getNextReachability(index, size, r);
                if (eSuccessor != Double.POSITIVE_INFINITY) {
                    for (index++; valid(index, size); index++) {
                        if (steepUp(index, r, ixi)) {
                            // The last reachable point must have a reachability equal or below the upper limit
                            if (useUpperLimit && r[index] > ul) {
                                // Not allowed so end
                                break;
                            }
                            // The last reachable point must have a reachability equal or above the lower limit
                            // This check is not relevant as we are going up and are already above the limit.
                            //if (useLowerLimit && r[index] < ll)
                            //{
                            //   // Not allowed so end
                            //   break;
                            //}

                            endSteep = index + 1;
                            mib = r[index];
                            eSuccessor = getNextReachability(index, size, r);
                            if (eSuccessor == Double.POSITIVE_INFINITY) {
                                endSteep--;
                                break;
                            }
                            continue;
                        }
                        // Stop looking if not going upward or after minPts of non steep area
                        if (!steepUp(index, r, 1) || index - endSteep > minPts) {
                            break;
                        }
                    }
                } else {
                    endSteep--;
                    index++;
                }
                sua = new SteepUpArea(startSteep, endSteep, eSuccessor);
                //System.out.println("New " + sua);
            }
            // Note: mib currently holds the value at the end-of-steep-up
            final double threshold = mib * ixi;
            for (int i = setOfSteepDownAreas.size(); i-- > 0;) {
                final SteepDownArea sda = setOfSteepDownAreas.getf(i);

                // Condition 3B:
                // All points within the start-end are below min(r[start],r[end]) * (1-Xi).
                // Since each SDA stores the maximum point between it and the current point (stored in mib)
                // we only check the mib, i.e. maximum-in-between SDA <= end-of-steep-up * (1-Xi)
                //if (sda.mib > mib * ixi)
                if (sda.mib > threshold)
                    continue;

                // Default values 
                int cstart = sda.s;
                int cend = sua.e;

                // Credit to ELKI
                // NOT in original OPTICS article: never include infinity-reachable 
                // points at the end of the cluster.
                if (!noCorrect) {
                    while (cend > cstart && r[cend] == Double.POSITIVE_INFINITY) {
                        cend--;
                    }
                }

                // Condition 4
                {
                    // Case b
                    if (sda.maximum * ixi >= sua.maximum) {
                        while (cstart < cend && r[cstart + 1] > sua.maximum)
                            cstart++;
                    }
                    // Case c
                    else if (sua.maximum * ixi >= sda.maximum) {
                        while (cend > cstart && r[cend - 1] > sda.maximum)
                            cend--;
                    }
                    // Case a is the default
                }

                // This NOT in the original article - credit to ELKI for finding this.
                // See http://elki.dbs.ifi.lmu.de/browser/elki/elki/src/main/java/de/lmu/ifi/dbs/elki/algorithm/clustering/optics/OPTICSXi.java
                // Ensure that the predecessor is in the current cluster. This filter
                // removes common artifacts from the Xi method.
                if (!noCorrect) {
                    simplify: while (cend > cstart) {
                        int predecessor = get(cend).predecessor;
                        for (int c = cstart; c < cend; c++) {
                            if (predecessor == get(c).parent)
                                break simplify;
                        }
                        // Not found.
                        cend--;
                    }
                }

                // This is the R-code but I do not know why so I leave it out. 
                // Ensure the last steep up point is not included if it's xi significant
                //if (steepUp(index - 1, r, ixi))
                //{
                //   cend--;
                //}

                // Condition 3A: obey minpts 
                if (cend - cstart + 1 < minPts)
                    continue;

                // Build the cluster 
                clusterId++;
                OPTICSCluster cluster;
                if (topLevel) {
                    // Do not support nested hierarchy
                    // Search for children and remove them.
                    // Take the lowest cluster Id of the children.
                    int lowestId = clusterId;

                    final boolean[] remove = new boolean[setOfClusters.size()];
                    for (int ii = 0; ii < setOfClusters.size(); ii++) {
                        OPTICSCluster child = setOfClusters.getf(ii);
                        if (cstart <= child.start && child.end <= cend) {
                            if (lowestId > child.clusterId)
                                lowestId = child.clusterId;
                            remove[ii] = true;
                        }
                    }
                    // Assume the removeIf method will go linearly through the array
                    setOfClusters.removeIf(new RemovePredicate(remove));

                    clusterId = lowestId;
                    cluster = new OPTICSCluster(cstart, cend, clusterId);

                    // Assign all points
                    for (int ii = cstart; ii <= cend; ii++) {
                        get(ii).clusterId = clusterId;
                    }
                } else {
                    cluster = new OPTICSCluster(cstart, cend, clusterId);

                    // Assign all points not currently in a cluster (thus respecting the hierarchy)
                    for (int ii = cstart; ii <= cend; ii++) {
                        if (get(ii).clusterId == NOISE)
                            get(ii).clusterId = clusterId;
                    }

                    // Build the hierarchy of clusters
                    final boolean[] remove = new boolean[setOfClusters.size()];
                    for (int ii = 0; ii < setOfClusters.size(); ii++) {
                        OPTICSCluster child = setOfClusters.getf(ii);
                        if (cstart <= child.start && child.end <= cend) {
                            cluster.addChildCluster(child);
                            remove[ii] = true;
                        }
                    }
                    setOfClusters.removeIf(new RemovePredicate(remove));
                }
                setOfClusters.add(cluster);
                //System.out.printf("> %s\n", cluster); // Level not correct
            }
        } else {
            // Not steep so move on
            index++;
        }
    }

    // Finalise
    setClustering(new ArrayList<OPTICSCluster>(setOfClusters));

    //for (OPTICSCluster cluster : getAllClusters())
    //   System.out.printf("> %s\n", cluster);
}

From source file:edu.cuny.cat.stat.HistoricalReport.java

public double getLowestUnacceptedAskPrice() {
    if (lowestUnmatchedAsk != null) {
        return lowestUnmatchedAsk.getPrice();
    }//w  w w  .j  a  va 2  s  .  c om

    final Iterator<Shout> i = asks.iterator();
    double lowestUnacceptedBidPrice = Double.POSITIVE_INFINITY;
    while (i.hasNext()) {
        final Shout s = i.next();
        if (!isMatched(s)) {
            if (s.getPrice() < lowestUnacceptedBidPrice) {
                lowestUnacceptedBidPrice = s.getPrice();
                lowestUnmatchedAsk = s;
            }
        }
    }
    return lowestUnacceptedBidPrice;
}

From source file:edu.berkeley.path.bots.core.Coordinate.java

/**
 * Concatenates two lists of coordinates, by minimizing the distance between
 * the end coordinates.//from   www .  j  a  va 2s  .  c  om
 * 
 * @param l1
 * @param l2
 * @return
 */
public static List<Coordinate> greedyConcatenation(List<Coordinate> l1, List<Coordinate> l2) {
    if (l1 == null || l1.isEmpty())
        return l2;
    if (l2 == null || l2.isEmpty())
        return l1;
    final int n1 = l1.size();
    final int n2 = l2.size();
    Coordinate l1_first = l1.get(0);
    Coordinate l1_last = l1.get(n1 - 1);
    Coordinate l2_first = l2.get(0);
    Coordinate l2_last = l2.get(n2 - 1);

    double best = Double.POSITIVE_INFINITY;
    boolean r1 = true;
    boolean r2 = true;
    double d = l1_last.distanceDefaultMethodInMeters(l2_first);
    if (d < best) {
        r1 = false;
        r2 = false;
        best = d;
    }

    d = l1_last.distanceDefaultMethodInMeters(l2_last);
    if (d < best) {
        r1 = false;
        r2 = true;
        best = d;
    }

    d = l1_first.distanceDefaultMethodInMeters(l2_first);
    if (d < best) {
        r1 = true;
        r2 = false;
        best = d;
    }

    d = l1_first.distanceDefaultMethodInMeters(l2_last);
    if (d < best) {
        r1 = true;
        r2 = true;
        best = d;
    }

    List<Coordinate> ll = new ArrayList<Coordinate>();
    for (Coordinate c : (r1 ? Lists.reverse(l1) : l1))
        ll.add(c);

    for (Coordinate c : (r2 ? Lists.reverse(l2) : l2))
        ll.add(c);
    return ll;
}

From source file:org.obiba.mica.web.model.DatasetDtos.java

private Mica.DatasetVariableAggregationDto.Builder asDto(Math.ContinuousSummaryDto summary) {
    Mica.DatasetVariableAggregationDto.Builder aggDto = Mica.DatasetVariableAggregationDto.newBuilder();
    Math.DescriptiveStatsDto stats = summary.getSummary();

    aggDto.setN(Long.valueOf(stats.getN()).intValue());

    Mica.StatisticsDto.Builder builder = Mica.StatisticsDto.newBuilder();

    if (stats.hasSum())
        builder.setSum(Double.valueOf(stats.getSum()).floatValue());
    if (stats.hasMin() && stats.getMin() != Double.POSITIVE_INFINITY)
        builder.setMin(Double.valueOf(stats.getMin()).floatValue());
    if (stats.hasMax() && stats.getMax() != Double.NEGATIVE_INFINITY)
        builder.setMax(Double.valueOf(stats.getMax()).floatValue());
    if (stats.hasMean() && !Double.isNaN(stats.getMean()))
        builder.setMean(Double.valueOf(stats.getMean()).floatValue());
    if (stats.hasSumsq() && !Double.isNaN(stats.getSumsq()))
        builder.setSumOfSquares(Double.valueOf(stats.getSumsq()).floatValue());
    if (stats.hasVariance() && !Double.isNaN(stats.getVariance()))
        builder.setVariance(Double.valueOf(stats.getVariance()).floatValue());
    if (stats.hasStdDev() && !Double.isNaN(stats.getStdDev()))
        builder.setStdDeviation(Double.valueOf(stats.getStdDev()).floatValue());

    aggDto.setStatistics(builder);/* ww w  . j  ava 2  s .c  om*/

    if (summary.getFrequenciesCount() > 0) {
        summary.getFrequenciesList().forEach(freq -> aggDto.addFrequencies(asDto(freq)));
    }

    int total = 0;
    if (summary.getFrequenciesCount() > 0) {
        for (Math.FrequencyDto freq : summary.getFrequenciesList()) {
            total += freq.getFreq();
        }
    }
    aggDto.setTotal(total);

    return aggDto;
}

From source file:com.clust4j.algo.preprocess.PreProcessorTests.java

@Test
public void testWeightTransformer() {
    RealMatrix iris = TestSuite.IRIS_DATASET.getData();

    // first test on 1.0 weights, assert same.
    double[] weights = VecUtils.rep(1.0, 4);
    WeightTransformer wt = new WeightTransformer(weights).fit(iris);
    assertTrue(MatUtils.equalsExactly(iris, wt.transform(iris)));

    // assert on 0.0 all 0.0
    weights = VecUtils.rep(0.0, 4);/* w  w w .j av a2  s. co  m*/
    wt = new WeightTransformer(weights).fit(iris);
    assertTrue(MatUtils.equalsExactly(new Array2DRowRealMatrix(new double[150][4], false), wt.transform(iris)));

    // assert that inv transform will create a matrix entirely of Infs...
    assertTrue(MatUtils.equalsExactly(
            new Array2DRowRealMatrix(MatUtils.rep(Double.POSITIVE_INFINITY, 150, 4), false),
            wt.inverseTransform(iris)));

    // assert dim mismatch on the fit, trans and inv trans methods.
    boolean a = false;
    try {
        wt.fit(TestSuite.getRandom(2, 2));
    } catch (DimensionMismatchException e) {
        a = true;
    } finally {
        assertTrue(a);
    }

    a = false;
    try {
        wt.transform(TestSuite.getRandom(2, 2));
    } catch (DimensionMismatchException e) {
        a = true;
    } finally {
        assertTrue(a);
    }

    a = false;
    try {
        wt.inverseTransform(TestSuite.getRandom(2, 2));
    } catch (DimensionMismatchException e) {
        a = true;
    } finally {
        assertTrue(a);
    }
}

From source file:com.AandR.beans.plotting.imagePlotPanel.CanvasPanel.java

private final double[] computeMinMax(double[][] data) {
    double[] minMax = new double[] { Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY };
    for (int j = 0; j < data[0].length; j++) {
        for (int i = 0; i < data.length; i++) {
            minMax[0] = data[i][j] < minMax[0] ? data[i][j] : minMax[0];
            minMax[1] = data[i][j] > minMax[1] ? data[i][j] : minMax[1];
        }//from w  ww  .j  a v a 2s  .  c om
    }
    return minMax;
}

From source file:ffx.xray.CrystalStats.java

/**
 * print signal to noise ratio statistics
 *//*from   w  w  w  .  j a v  a 2 s .  c o  m*/
public void printSNStats() {
    double res[][] = new double[n][2];
    double nhkl[] = new double[n + 1];
    double sn[][] = new double[n + 1][3];

    for (int i = 0; i < n; i++) {
        res[i][0] = Double.NEGATIVE_INFINITY;
        res[i][1] = Double.POSITIVE_INFINITY;
    }

    for (HKL ih : reflectionlist.hkllist) {
        int i = ih.index();
        int b = ih.bin();

        // ignored cases
        if (Double.isNaN(fo[i][0]) || fo[i][1] <= 0.0) {
            continue;
        }

        // determine res limits of each bin
        double rs = Crystal.res(crystal, ih);
        if (rs > res[b][0]) {
            res[b][0] = rs;
        }
        if (rs < res[b][1]) {
            res[b][1] = rs;
        }

        // running mean
        nhkl[b]++;
        nhkl[n]++;
        sn[b][0] += (fo[i][0] - sn[b][0]) / nhkl[b];
        sn[b][1] += (fo[i][1] - sn[b][1]) / nhkl[b];
        sn[b][2] += ((fo[i][0] / fo[i][1]) - sn[b][2]) / nhkl[b];

        sn[n][0] += (fo[i][0] - sn[n][0]) / nhkl[b];
        sn[n][1] += (fo[i][1] - sn[n][1]) / nhkl[b];
        sn[n][2] += ((fo[i][0] / fo[i][1]) - sn[n][2]) / nhkl[n];
    }

    StringBuilder sb = new StringBuilder(
            String.format("\n %15s | %7s | %7s | %7s \n", "Res. Range", "Signal", "Sigma", "S/N"));
    for (int i = 0; i < n; i++) {
        sb.append(String.format(" %7.3f %7.3f | ", res[i][0], res[i][1]));
        sb.append(String.format("%7.2f | %7.2f | %7.2f\n", sn[i][0], sn[i][1], sn[i][2]));
    }

    sb.append(String.format(" %7.3f %7.3f | ", res[0][0], res[n - 1][1]));
    sb.append(String.format("%7.2f | %7.2f | %7.2f", sn[n][0], sn[n][1], sn[n][2]));

    if (print) {
        logger.info(sb.toString());
    }
}

From source file:edu.cmu.tetrad.search.FastIca.java

private TetradMatrix icaParallel(TetradMatrix X, int numComponents, double tolerance, int function,
        final double alpha, int maxIterations, boolean verbose, TetradMatrix wInit) {
    int p = X.columns();
    TetradMatrix W = wInit;//from ww  w .j a v  a  2 s  .com

    SingularValueDecomposition sW = new SingularValueDecomposition(W.getRealMatrix());
    TetradMatrix D = new TetradMatrix(sW.getS());
    for (int i = 0; i < D.rows(); i++)
        D.set(i, i, 1.0 / D.get(i, i));

    TetradMatrix WTemp = new TetradMatrix(sW.getU()).times(D);
    WTemp = WTemp.times(new TetradMatrix(sW.getU()).transpose());
    WTemp = WTemp.times(W);
    W = WTemp;

    TetradMatrix W1;
    double _tolerance = Double.POSITIVE_INFINITY;
    int it = 0;

    if (function == LOGCOSH) {
        if (verbose) {
            TetradLogger.getInstance().log("info",
                    "Symmetric FastICA using logcosh approx. to neg-entropy function");
        }

        while (_tolerance > tolerance && it < maxIterations) {
            TetradMatrix wx = W.times(X);
            TetradMatrix gwx = new TetradMatrix(numComponents, p);

            for (int i = 0; i < numComponents; i++) {
                for (int j = 0; j < p; j++) {
                    gwx.set(i, j, Math.tanh(alpha * wx.get(i, j)));
                }
            }

            TetradMatrix v1 = gwx.times(X.transpose().copy().scalarMult(1.0 / p));
            TetradMatrix g_wx = gwx.copy();

            for (int i = 0; i < g_wx.rows(); i++) {
                for (int j = 0; j < g_wx.columns(); j++) {
                    double v = g_wx.get(i, j);
                    double w = alpha * (1.0 - v * v);
                    g_wx.set(i, j, w);
                }
            }

            TetradVector V20 = new TetradVector(numComponents);

            for (int k = 0; k < numComponents; k++) {
                V20.set(k, mean(g_wx.getRow(k)));
            }

            TetradMatrix v2 = V20.diag();
            v2 = v2.times(W);
            W1 = v1.minus(v2);

            SingularValueDecomposition sW1 = new SingularValueDecomposition(W1.getRealMatrix());
            TetradMatrix U = new TetradMatrix(sW1.getU());
            TetradMatrix sD = new TetradMatrix(sW1.getS());
            for (int i = 0; i < sD.rows(); i++)
                sD.set(i, i, 1.0 / sD.get(i, i));

            TetradMatrix W1Temp = U.times(sD);
            W1Temp = W1Temp.times(U.transpose());
            W1Temp = W1Temp.times(W1);
            W1 = W1Temp;

            TetradMatrix d1 = W1.times(W.transpose());
            TetradVector d = d1.diag();
            _tolerance = Double.NEGATIVE_INFINITY;

            for (int i = 0; i < d.size(); i++) {
                double m = Math.abs(Math.abs(d.get(i)) - 1);
                if (m > _tolerance)
                    _tolerance = m;
            }

            W = W1;

            if (verbose) {
                TetradLogger.getInstance().log("fastIcaDetails",
                        "Iteration " + (it + 1) + " tol = " + _tolerance);
            }

            it++;
        }
    } else if (function == EXP) {
        if (verbose) {
            TetradLogger.getInstance().log("info",
                    "Symmetric FastICA using exponential approx. to neg-entropy function");
        }

        while (_tolerance > tolerance && it < maxIterations) {
            TetradMatrix wx = W.times(X);
            TetradMatrix gwx = new TetradMatrix(numComponents, p);

            for (int i = 0; i < numComponents; i++) {
                for (int j = 0; j < p; j++) {
                    double v = wx.get(i, j);
                    gwx.set(i, j, v * Math.exp(-(v * v) / 2.0));
                }
            }

            TetradMatrix v1 = gwx.times(X.transpose().scalarMult(p));
            TetradMatrix g_wx = wx.copy();

            for (int i = 0; i < g_wx.rows(); i++) {
                for (int j = 0; j < g_wx.columns(); j++) {
                    double v = g_wx.get(i, j);
                    double w = (1.0 - v * v) * Math.exp(-(v * v) / 2.0);
                    g_wx.set(i, j, w);
                }
            }

            TetradVector V20 = new TetradVector(numComponents);

            for (int k = 0; k < numComponents; k++) {
                V20.set(k, mean(g_wx.getRow(k)));
            }

            TetradMatrix v2 = V20.diag();
            v2 = v2.times(W);
            W1 = v1.minus(v2);

            SingularValueDecomposition sW1 = new SingularValueDecomposition(W1.getRealMatrix());
            TetradMatrix U = new TetradMatrix(sW1.getU());
            TetradMatrix sD = new TetradMatrix(sW1.getS());
            for (int i = 0; i < sD.rows(); i++)
                sD.set(i, i, 1.0 / sD.get(i, i));

            TetradMatrix W1Temp = U.times(sD);
            W1Temp = W1Temp.times(U.transpose());
            W1Temp = W1Temp.times(W1);
            W1 = W1Temp;

            TetradMatrix d1 = W1.times(W.transpose());
            TetradVector d = d1.diag();
            _tolerance = Double.NEGATIVE_INFINITY;

            for (int i = 0; i < d.size(); i++) {
                double m = Math.abs(Math.abs(d.get(i)) - 1);
                if (m > _tolerance)
                    _tolerance = m;
            }

            W.assign(W1);

            if (verbose) {
                TetradLogger.getInstance().log("fastIcaDetails",
                        "Iteration " + (it + 1) + " tol = " + _tolerance);
            }

            it++;
        }
    }

    return W;
}