Example usage for java.lang Double POSITIVE_INFINITY

List of usage examples for java.lang Double POSITIVE_INFINITY

Introduction

In this page you can find the example usage for java.lang Double POSITIVE_INFINITY.

Prototype

double POSITIVE_INFINITY

To view the source code for java.lang Double POSITIVE_INFINITY.

Click Source Link

Document

A constant holding the positive infinity of type double .

Usage

From source file:com.clust4j.algo.KMeans.java

@Override
protected KMeans fit() {
    synchronized (fitLock) {

        if (null != labels) // already fit
            return this;

        final LogTimer timer = new LogTimer();
        final double[][] X = data.getData();
        final int n = data.getColumnDimension();
        final double nan = Double.NaN;

        // Corner case: K = 1 or all singular values
        if (1 == k) {
            labelFromSingularK(X);/*w ww.  j a  v a 2  s.c  o m*/
            fitSummary.add(new Object[] { iter, converged, tss, tss, nan, timer.wallTime() });
            sayBye(timer);
            return this;
        }

        // Nearest centroid model to predict labels
        NearestCentroid model = null;
        EntryPair<int[], double[]> label_dist;

        // Keep track of TSS (sum of barycentric distances)
        double last_wss_sum = Double.POSITIVE_INFINITY, wss_sum = 0;
        ArrayList<double[]> new_centroids;

        for (iter = 0; iter < maxIter; iter++) {

            // Get labels for nearest centroids
            try {
                model = new NearestCentroid(CentroidUtils.centroidsToMatrix(centroids, false),
                        VecUtils.arange(k), new NearestCentroidParameters().setSeed(getSeed())
                                .setMetric(getSeparabilityMetric()).setVerbose(false)).fit();
            } catch (NaNException NaN) {
                /*
                 * If they metric used produces lots of infs or -infs, it 
                 * makes it hard if not impossible to effectively segment the
                 * input space. Thus, the centroid assignment portion below can
                 * yield a zero count (denominator) for one or more of the centroids
                 * which makes the entire row NaN. We should tell the user to
                 * try a different metric, if that's the case.
                 *
                error(new IllegalClusterStateException(dist_metric.getName()+" produced an entirely " +
                  "infinite distance matrix, making it difficult to segment the input space. Try a different " +
                  "metric."));
                 */
                this.k = 1;
                warn("(dis)similarity metric (" + dist_metric
                        + ") cannot partition space without propagating Infs. Returning one cluster");

                labelFromSingularK(X);
                fitSummary.add(new Object[] { iter, converged, tss, tss, nan, timer.wallTime() });
                sayBye(timer);
                return this;
            }

            label_dist = model.predict(X);

            // unpack the EntryPair
            labels = label_dist.getKey();
            new_centroids = new ArrayList<>(k);

            int label;
            wss = new double[k];
            int[] centroid_counts = new int[k];
            double[] centroid;
            double[][] new_centroid_arrays = new double[k][n];
            for (int i = 0; i < m; i++) {
                label = labels[i];
                centroid = centroids.get(label);

                // increment count for this centroid
                double this_cost = 0;
                centroid_counts[label]++;
                for (int j = 0; j < centroid.length; j++) {
                    double diff = X[i][j] - centroid[j];
                    this_cost += (diff * diff);

                    // Add the the centroid sums
                    new_centroid_arrays[label][j] += X[i][j];
                }

                // add this cost to the WSS
                wss[label] += this_cost;
            }

            // one pass of K for some consolidation
            wss_sum = 0;
            for (int i = 0; i < k; i++) {
                wss_sum += wss[i];

                for (int j = 0; j < n; j++) // meanify
                    new_centroid_arrays[i][j] /= (double) centroid_counts[i];

                new_centroids.add(new_centroid_arrays[i]);
            }

            // update the BSS
            bss = tss - wss_sum;

            // Assign new centroids
            double diff = last_wss_sum - wss_sum;
            last_wss_sum = wss_sum;

            // Check for convergence and add summary:
            converged = FastMath.abs(diff) < tolerance; // first iter will be inf
            fitSummary.add(
                    new Object[] { converged ? iter++ : iter, converged, tss, wss_sum, bss, timer.wallTime() });

            if (converged) {
                break;
            } else {
                // otherwise, reassign centroids
                centroids = new_centroids;
            }

        } // end iterations

        // Reorder the labels, centroids and wss indices
        reorderLabelsAndCentroids();

        if (!converged)
            warn("algorithm did not converge");

        // wrap things up, create summary..
        sayBye(timer);

        return this;
    }

}

From source file:net.myrrix.common.random.RandomUtils.java

/**
 * @param dimensions dimensionality of resulting vector
 * @param farFrom vectors that the chosen vector should be "far from" -- not in the same direction as
 * @param random random number generator to use
 * @return a vector of length 1 over the given number of dimensions, whose direction is chosen uniformly
 *   at random (that is: a point chosen uniformly at random on the unit hypersphere), but preferring
 *   those not in the same direction as a set of existing vectors
 *///from  w  w w.  ja  v  a  2  s . c  o m
public static float[] randomUnitVectorFarFrom(int dimensions, List<float[]> farFrom, RandomGenerator random) {
    int size = farFrom.size();
    int numSamples = FastMath.min(100, size);
    float[] vector = new float[dimensions];
    boolean accepted = false;
    while (!accepted) {
        doRandomUnitVector(vector, random);
        double smallestDistSquared = Double.POSITIVE_INFINITY;
        for (int sample = 0; sample < numSamples; sample++) {
            float[] other = farFrom.get(size == numSamples ? sample : random.nextInt(size));
            // dot is the cosine here since both are unit vectors
            double distSquared = 2.0 - 2.0 * SimpleVectorMath.dot(vector, other);
            if (LangUtils.isFinite(distSquared) && distSquared < smallestDistSquared) {
                smallestDistSquared = distSquared;
            }
        }
        // Second condition covers 1-D case, where there are only 2 distinct unit vectors. If both have
        // been generated, keep accepting either of them.
        if (LangUtils.isFinite(smallestDistSquared) && !(dimensions == 1 && smallestDistSquared == 0.0)) {
            // Choose with probability proportional to squared distance, a la kmeans++ centroid selection
            double acceptProbability = smallestDistSquared / 4.0; // dist squared is in [0,4]
            accepted = random.nextDouble() < acceptProbability;
        } else {
            // kind of a default
            accepted = true;
        }
    }
    return vector;
}

From source file:org.jfree.data.statistics.DefaultStatisticalCategoryDatasetTest.java

/**
 * Some checks for the getRangeBounds() method.
 *///from   ww  w  .j  av a 2s .  c o m
@Test
public void testGetRangeBounds() {
    DefaultStatisticalCategoryDataset d = new DefaultStatisticalCategoryDataset();

    // an empty dataset should return null for bounds
    assertNull(d.getRangeBounds(true));

    // try a dataset with a single value
    d.add(4.5, 1.0, "R1", "C1");
    assertEquals(new Range(4.5, 4.5), d.getRangeBounds(false));
    assertEquals(new Range(3.5, 5.5), d.getRangeBounds(true));

    // try a dataset with two values
    d.add(0.5, 2.0, "R1", "C2");
    assertEquals(new Range(0.5, 4.5), d.getRangeBounds(false));
    assertEquals(new Range(-1.5, 5.5), d.getRangeBounds(true));

    // try a Double.NaN
    d.add(Double.NaN, 0.0, "R1", "C3");
    assertEquals(new Range(0.5, 4.5), d.getRangeBounds(false));
    assertEquals(new Range(-1.5, 5.5), d.getRangeBounds(true));

    // try a Double.NEGATIVE_INFINITY
    d.add(Double.NEGATIVE_INFINITY, 0.0, "R1", "C3");
    assertEquals(new Range(Double.NEGATIVE_INFINITY, 4.5), d.getRangeBounds(false));
    assertEquals(new Range(Double.NEGATIVE_INFINITY, 5.5), d.getRangeBounds(true));

    // try a Double.POSITIVE_INFINITY
    d.add(Double.POSITIVE_INFINITY, 0.0, "R1", "C3");
    assertEquals(new Range(0.5, Double.POSITIVE_INFINITY), d.getRangeBounds(false));
    assertEquals(new Range(-1.5, Double.POSITIVE_INFINITY), d.getRangeBounds(true));
}

From source file:com.cloudera.oryx.app.serving.als.model.LocalitySensitiveHash.java

LocalitySensitiveHash(double sampleRate, int numFeatures, int numCores) {

    // How many hash functions to use? use as few as possible that still achieve the desired sample
    // rate or less, approximately.
    int numHashes = 0;
    int bitsDiffering = 0;
    for (; numHashes < MAX_HASHES; numHashes++) {

        // For a given number of hashes, consider partitions differing from the target hash in how many bits?
        // Choose enough such that number to test is as large as possible while <= the number of cores
        bitsDiffering = 0;//from w w w.  jav a2  s. c om
        // Number of different partitions that are examined when allowing the given number of bits to differ
        long numPartitionsToTry = 1;
        // Make bitsDiffering as large as possible given number of cores
        while (bitsDiffering < numHashes && numPartitionsToTry < numCores) {
            // There are numHashes-choose-bitsDiffering ways for numHashes bits to differ in
            // exactly bitsDiffering bits
            bitsDiffering++;
            numPartitionsToTry += CombinatoricsUtils.binomialCoefficient(numHashes, bitsDiffering);
        }
        // Note that this allows numPartitionsToTry to overshoot numCores by one step

        if (bitsDiffering == numHashes && numPartitionsToTry < numCores) {
            // Can't keep busy enough; keep going
            continue;
        }

        // Consider what fraction of all 2^n partitions is then considered, as a proxy for the
        // sample rate
        // Stop as soon as it's <= target sample rate
        if (numPartitionsToTry <= sampleRate * (1L << numHashes)) {
            break;
        }
    }

    log.info("LSH with {} hashes, querying partitions with up to {} bits differing", numHashes, bitsDiffering);
    this.maxBitsDiffering = bitsDiffering;
    hashVectors = new float[numHashes][];

    RandomGenerator random = RandomManager.getRandom();
    for (int i = 0; i < numHashes; i++) {
        // Pick the most-orthogonal next random vector
        double bestTotalDot = Double.POSITIVE_INFINITY;
        float[] nextBest = null;
        // Try, like, lots of them
        int candidatesSinceBest = 0;
        while (candidatesSinceBest < 1000) {
            float[] candidate = VectorMath.randomVectorF(numFeatures, random);
            // measure by total (absolute) dot product
            double score = totalAbsCos(hashVectors, i, candidate);
            if (score < bestTotalDot) {
                nextBest = candidate;
                // Stop if best possible score
                if (score == 0.0) {
                    break;
                }
                bestTotalDot = score;
                candidatesSinceBest = 0;
            } else {
                candidatesSinceBest++;
            }
        }
        hashVectors[i] = nextBest;
    }
    log.info("Chose {} random hash vectors", hashVectors.length);

    // Contains all 2^numHashes integers from 0. The first element has 0 bits set. The next numHashes elements
    // are all such integers with 1 bit sets. Then 2 bits, and so on. This is used as a "mask" on top of an
    // initial candidate index in order to construct results in getCandidateIndices()
    candidateIndicesPrototype = new int[1 << numHashes];
    int[] offsetPerBitsActive = new int[numHashes + 1];
    for (int i = 1; i <= numHashes; i++) {
        offsetPerBitsActive[i] = offsetPerBitsActive[i - 1]
                + (int) CombinatoricsUtils.binomialCoefficient(numHashes, i - 1);
    }
    for (int i = 0; i < candidateIndicesPrototype.length; i++) {
        candidateIndicesPrototype[offsetPerBitsActive[Integer.bitCount(i)]++] = i;
    }

    // Contains all 2^numHashes integers from 0
    allIndices = new int[1 << numHashes];
    for (int i = 0; i < allIndices.length; i++) {
        allIndices[i] = i;
    }
}

From source file:ffx.algorithms.Stochastic.java

/**
 * Set the stochastic dynamics time-step.
 *
 * @param dt the time step.//ww w .  j  a  v  a2  s  .  com
 */
@Override
public void setTimeStep(double dt) {
    this.dt = dt;
    fdt = friction * dt;
    efdt = exp(-fdt);
    if (friction >= 0) {
        inverseFriction = 1.0 / friction;
    } else {
        inverseFriction = Double.POSITIVE_INFINITY;
    }
}

From source file:edu.uc.rphash.tests.kmeanspp.KMeansPlusPlus.java

/**
 * Runs the K-means++ clustering algorithm.
 *
 * @param points the points to cluster/*from ww w  . ja v a 2s.  c  om*/
 * @param k the number of clusters to split the data into
 * @param numTrials number of trial runs
 * @param maxIterationsPerTrial the maximum number of iterations to run the algorithm
 *     for at each trial run.  If negative, no maximum will be used
 * @return a list of clusters containing the points
 * @throws MathIllegalArgumentException if the data points are null or the number
 *     of clusters is larger than the number of data points
 * @throws ConvergenceException if an empty cluster is encountered and the
 * {@link #emptyStrategy} is set to {@code ERROR}
 */
public List<Cluster<T>> cluster(final Collection<T> points, final int k, int numTrials,
        int maxIterationsPerTrial) throws Exception {

    // at first, we have not found any clusters list yet
    List<Cluster<T>> best = null;
    double bestVarianceSum = Double.POSITIVE_INFINITY;

    // do several clustering trials
    for (int i = 0; i < numTrials; ++i) {

        // compute a clusters list
        List<Cluster<T>> clusters = cluster(points, k, maxIterationsPerTrial);

        // compute the variance of the current list
        double varianceSum = 0.0;
        for (final Cluster<T> cluster : clusters) {
            if (!cluster.getPoints().isEmpty()) {

                // compute the distance variance of the current cluster
                final T center = cluster.getCenter();

                double n = 0;
                double mean = 0;
                double M2 = 0;
                for (final T point : cluster.getPoints()) {
                    double x = point.distanceFrom(center);
                    n++;
                    double delta = x - mean;
                    mean = mean + delta / n;
                    M2 = M2 + delta * (x - mean);
                }
                varianceSum += M2 / (n - 1f);

                //                    final Variance stat = new Variance();
                //                    for (final T point : cluster.getPoints()) {
                //                        stat.increment(point.distanceFrom(center));
                //                    }
                //                    varianceSum += stat.getResult();

            }
        }

        if (varianceSum <= bestVarianceSum) {
            // this one is the best we have found so far, remember it
            best = clusters;
            bestVarianceSum = varianceSum;
        }

    }

    // return the best clusters list found
    return best;

}

From source file:classif.pukmeans.KMeansCachedSymbolicSequence.java

public void cluster() {

    Sequence[] initialCenters = new Sequence[nbClusters];
    affectation = new ArrayList[nbClusters];

    // init/*from   w w  w  .jav  a 2s.c  o  m*/
    for (int k = 0; k < affectation.length; k++) {
        affectation[k] = new ArrayList<Integer>();
    }

    // pickup centers
    int[] selected = randGen.nextPermutation(data.size(), nbClusters);
    for (int i = 0; i < selected.length; i++) {
        initialCenters[i] = data.get(selected[i]);
    }
    wcss = 0.0;
    // first affectation
    for (int j = 0; j < data.size(); j++) {

        double minDist = Double.MAX_VALUE;
        int bestK = -1;
        // for each cluster k
        for (int k = 0; k < initialCenters.length; k++) {
            // distance between cluster k and data point j
            double currentDist = initialCenters[k].distance(data.get(j));
            if (currentDist < minDist) {
                bestK = k;
                minDist = currentDist;
            }
        }
        wcss += minDist * minDist;
        // affect data point j to cluster affected to j
        affectation[bestK].add(j);
    }

    // for each iteration i
    for (int i = 0; i < 15; i++) {

        ArrayList<Integer>[] newAffectation = new ArrayList[nbClusters];
        // init
        for (int k = 0; k < newAffectation.length; k++) {
            newAffectation[k] = new ArrayList<Integer>();
        }
        wcss = 0.0;
        // reassign element to cluster
        for (int j = 0; j < data.size(); j++) {
            int bestK = -1;
            double bestDist = Double.POSITIVE_INFINITY;
            // for each cluster k
            for (int k = 0; k < nbClusters; k++) {
                if (affectation[k].size() == 0)
                    continue;
                double distToK = 0.0;
                for (Integer elIndex : affectation[k]) {
                    double tmpDist = distances[j][elIndex];
                    distToK += tmpDist;//TODO squared??
                }
                distToK /= affectation[k].size();

                if (distToK < bestDist) {
                    bestDist = distToK;
                    bestK = k;
                }

            }
            wcss += bestDist * bestDist;

            newAffectation[bestK].add(j);
        }

        affectation = newAffectation;

    }

    //find prototypes for classifier
    centers = new Sequence[nbClusters];
    for (int k = 0; k < nbClusters; k++) {
        if (affectation[k].size() == 0) {
            centers[k] = null;
        } else {
            int medoidIndex = Sequences.medoidIndex(affectation[k], distances);
            Sequence medoid = data.get(medoidIndex);

            Sequence[] sequenceTab = new Sequence[affectation[k].size()];
            for (int i = 0; i < sequenceTab.length; i++) {
                sequenceTab[i] = data.get(affectation[k].get(i));
            }
            centers[k] = Sequences.meanWithMedoid(medoid, sequenceTab);
        }
    }

}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.Timing.java

/**
 * Prints the relative magnitudes of the collected timer data to the
 * specified {@link PrintStream}.//from  w  w  w.j a  v  a2s. c o  m
 * 
 * @param out the stream to which data is printed
 */
public static void printMagnitudes(PrintStream out) {
    double min = Double.POSITIVE_INFINITY;

    for (Map.Entry<String, SummaryStatistics> entry : data.entrySet()) {
        min = Math.min(min, entry.getValue().getMean());
    }

    for (Map.Entry<String, SummaryStatistics> entry : data.entrySet()) {
        out.print(entry.getKey());
        out.print(": ");
        out.print(entry.getValue().getMean() / min);
        out.println();
    }
}

From source file:com.analog.lyric.dimple.factorfunctions.MultinomialEnergyParameters.java

@Override
public final double evalEnergy(Value[] arguments) {
    int index = 0;
    if (!_NParameterConstant) {
        _N = arguments[index++].getInt(); // First argument is N parameter
        if (_N < 0)
            return Double.POSITIVE_INFINITY;
        _negativeLogFactorialN = -org.apache.commons.math3.special.Gamma.logGamma(_N + 1);
    }//from w ww.  j a v a2  s. c  om

    for (int i = 0; i < _dimension; i++)
        _alpha[i] = arguments[index++].getDouble(); // Next _dimension arguments are vector of Alpha parameters

    if (arguments.length - index != _dimension)
        throw new DimpleException(
                "Number of count variables must equal the dimension of the parameter vector.");

    int countSum = 0;
    double parameterSum = 0;
    double sum = _negativeLogFactorialN;
    for (int i = 0; i < _dimension; i++) {
        final double alphai = _alpha[i];
        parameterSum += Math.exp(-alphai);

        final int x = arguments[index++].getInt(); // Remaining arguments are discrete count variables
        if (x < 0)
            return Double.POSITIVE_INFINITY;
        countSum += x;

        sum += x * alphai + org.apache.commons.math3.special.Gamma.logGamma(x + 1);
    }
    if (countSum != _N)
        return Double.POSITIVE_INFINITY;

    final double energy = sum + _N * Math.log(parameterSum);
    if (energy != energy) // Faster isNaN
        return Double.POSITIVE_INFINITY;

    return energy;
}

From source file:com.aliyun.odps.ship.common.RecordConverter.java

/**
 * tunnel record to byte[] array/*from   ww w  .j a va 2  s  . c om*/
 */
public byte[][] format(Record r) throws UnsupportedEncodingException {

    int cols = schema.getColumns().size();
    byte[][] line = new byte[cols][];
    byte[] colValue = null;
    for (int i = 0; i < cols; i++) {

        OdpsType t = schema.getColumn(i).getType();
        switch (t) {
        case BIGINT: {
            Long v = r.getBigint(i);
            colValue = v == null ? null : v.toString().getBytes(defaultCharset);
            break;
        }
        case DOUBLE: {
            Double v = r.getDouble(i);
            if (v == null) {
                colValue = null;
            } else if (v.equals(Double.POSITIVE_INFINITY) || v.equals(Double.NEGATIVE_INFINITY)) {
                colValue = v.toString().getBytes(defaultCharset);
            } else {
                colValue = doubleFormat.format(v).replaceAll(",", "").getBytes(defaultCharset);
            }
            break;
        }
        case DATETIME: {
            Date v = r.getDatetime(i);
            if (v == null) {
                colValue = null;
            } else {
                colValue = dateFormatter.format(v).getBytes(defaultCharset);
            }
            break;
        }
        case BOOLEAN: {
            Boolean v = r.getBoolean(i);
            colValue = v == null ? null : v.toString().getBytes(defaultCharset);
            break;
        }
        case STRING: {
            byte[] v = r.getBytes(i);
            if (v == null) {
                colValue = null;
            } else if (Util.isIgnoreCharset(charset)) {
                colValue = v;
            } else {
                // data at ODPS side is always utf-8
                colValue = new String(v, Constants.REMOTE_CHARSET).getBytes(charset);
            }
            break;
        }
        case DECIMAL: {
            BigDecimal v = r.getDecimal(i);
            colValue = v == null ? null : v.toPlainString().getBytes(defaultCharset);
            break;
        }
        default:
            throw new RuntimeException("Unknown column type: " + t);
        }

        if (colValue == null) {
            line[i] = nullBytes;
        } else {
            line[i] = colValue;
        }
    }
    return line;
}