Example usage for java.lang Double isInfinite

List of usage examples for java.lang Double isInfinite

Introduction

In this page you can find the example usage for java.lang Double isInfinite.

Prototype

public static boolean isInfinite(double v) 

Source Link

Document

Returns true if the specified number is infinitely large in magnitude, false otherwise.

Usage

From source file:org.geotools.data.couchdb.CouchDBFeatureStore.java

private Envelope getBBox(Query query) {
    Envelope envelope = (Envelope) query.getFilter().accept(ExtractBoundsFilterVisitor.BOUNDS_VISITOR, null);
    if (envelope == null || envelope.isNull() || Double.isInfinite(envelope.getArea())) {
        envelope = new Envelope(-180, 180, -90, 90);
    }// w  ww.  j av a2 s  . co  m
    return envelope;
}

From source file:com.ml.ira.algos.TrainLogistic.java

static void mainToOutput(String[] args, PrintWriter output) throws Exception {
    if (parseArgs(args)) {
        double logPEstimate = 0;
        int samples = 0;

        System.out.println("fieldNames: " + fieldNames);
        long ts = System.currentTimeMillis();
        CsvRecordFactory csv = lmp.getCsvRecordFactory();
        OnlineLogisticRegression lr = lmp.createRegression();
        for (int pass = 0; pass < passes; pass++) {
            System.out.println("at Round: " + pass);
            BufferedReader in = open(inputFile);
            try {
                // read variable names
                String line;/*from w  w w.  java  2 s. com*/
                if (fieldNames != null && fieldNames.length() > 0) {
                    csv.firstLine(fieldNames);
                } else {
                    csv.firstLine(in.readLine());
                }
                line = in.readLine();
                while (line != null) {
                    // for each new line, get target and predictors
                    Vector input = new RandomAccessSparseVector(lmp.getNumFeatures());
                    int targetValue = csv.processLine(line, input);

                    // check performance while this is still news
                    double logP = lr.logLikelihood(targetValue, input);
                    if (!Double.isInfinite(logP)) {
                        if (samples < 20) {
                            logPEstimate = (samples * logPEstimate + logP) / (samples + 1);
                        } else {
                            logPEstimate = 0.95 * logPEstimate + 0.05 * logP;
                        }
                        samples++;
                    }
                    double p = lr.classifyScalar(input);
                    if (scores) {
                        output.printf(Locale.ENGLISH, "%10d %2d %10.2f %2.4f %10.4f %10.4f%n", samples,
                                targetValue, lr.currentLearningRate(), p, logP, logPEstimate);
                    }

                    // now update model
                    lr.train(targetValue, input);

                    line = in.readLine();
                }
            } finally {
                Closeables.close(in, true);
            }
            output.println("duration: " + (System.currentTimeMillis() - ts));
        }

        if (outputFile.startsWith("hdfs://")) {
            lmp.saveTo(new Path(outputFile));
        } else {
            OutputStream modelOutput = new FileOutputStream(outputFile);
            try {
                lmp.saveTo(modelOutput);
            } finally {
                Closeables.close(modelOutput, false);
            }
        }

        output.println("duration: " + (System.currentTimeMillis() - ts));

        output.println(lmp.getNumFeatures());
        output.println(lmp.getTargetVariable() + " ~ ");
        String sep = "";
        for (String v : csv.getTraceDictionary().keySet()) {
            double weight = predictorWeight(lr, 0, csv, v);
            if (weight != 0) {
                output.printf(Locale.ENGLISH, "%s%.3f*%s", sep, weight, v);
                sep = " + ";
            }
        }
        output.printf("%n");
        model = lr;
        for (int row = 0; row < lr.getBeta().numRows(); row++) {
            for (String key : csv.getTraceDictionary().keySet()) {
                double weight = predictorWeight(lr, row, csv, key);
                if (weight != 0) {
                    output.printf(Locale.ENGLISH, "%20s %.5f%n", key, weight);
                }
            }
            for (int column = 0; column < lr.getBeta().numCols(); column++) {
                output.printf(Locale.ENGLISH, "%15.9f ", lr.getBeta().get(row, column));
            }
            output.println();
        }
    }
}

From source file:org.kordamp.json.util.JSONUtils.java

/**
 * Produce a string from a double. The string "null" will be returned if the
 * number is not finite./*from www.  j  a  v a2  s . com*/
 *
 * @param d A double.
 * @return A String.
 */
public static String doubleToString(double d) {
    if (Double.isInfinite(d) || Double.isNaN(d)) {
        return "null";
    }

    // Shave off trailing zeros, if possible, but preserve a single zero after decimal point

    String s = Double.toString(d);
    if (s.indexOf('.') > 0 && s.indexOf('e') < 0 && s.indexOf('E') < 0) {
        while (s.endsWith("0")) {
            s = s.substring(0, s.length() - 1);
        }
        if (s.endsWith(".")) {
            s = s + '0';
        }
    }
    return s;
}

From source file:edu.jhuapl.bsp.detector.OpenMath.java

public static double sum(int[] in) {
    if (in != null) {
        double sum = 0;
        for (int i = 0; i < in.length; i++) {
            sum += in[i];/*w  ww  .ja  va 2s. c o  m*/
        }
        if (Double.isNaN(sum) || Double.isInfinite(sum)) {
            return 0;
        } else {
            return sum;
        }
    }
    return 0;
}

From source file:org.nd4j.linalg.api.ops.impl.transforms.SigmoidDerivative.java

private static double sigmoidDeriv(double input) {
    double sigmoid = 1 / (1 + FastMath.exp(-input));
    double out = sigmoid * (1.0 - sigmoid);
    if (Nd4j.ENFORCE_NUMERICAL_STABILITY && (Double.isNaN(out) || Double.isInfinite(out))) {
        out = Nd4j.EPS_THRESHOLD;// w ww  .  j  ava 2  s  .  co m
    }
    return out;
}

From source file:com.opengamma.analytics.math.function.PiecewisePolynomialWithSensitivityFunction1D.java

/** 
 * @param pp {@link PiecewisePolynomialResultsWithSensitivity}
 * @param xKeys /*from w  w w.  j av  a  2  s  .  c  om*/
 * @return Node sensitivity value at x=xKeys
 */
public DoubleMatrix1D[] nodeSensitivity(final PiecewisePolynomialResultsWithSensitivity pp,
        final double[] xKeys) {
    ArgumentChecker.notNull(pp, "null pp");
    ArgumentChecker.notNull(xKeys, "null xKeys");
    final int nKeys = xKeys.length;
    final DoubleMatrix1D[] res = new DoubleMatrix1D[nKeys];

    for (int i = 0; i < nKeys; ++i) {
        ArgumentChecker.isFalse(Double.isNaN(xKeys[i]), "xKey containing NaN");
        ArgumentChecker.isFalse(Double.isInfinite(xKeys[i]), "xKey containing Infinity");
    }
    if (pp.getDimensions() > 1) {
        throw new NotImplementedException();
    }

    final double[] knots = pp.getKnots().getData();
    final int nKnots = knots.length;

    for (int j = 0; j < nKeys; ++j) {
        final double xKey = xKeys[j];
        int interval = FunctionUtils.getLowerBoundIndex(knots, xKey);
        if (interval == nKnots - 1) {
            interval--; // there is 1 less interval that knots
        }

        final double s = xKey - knots[interval];
        final DoubleMatrix2D a = pp.getCoefficientSensitivity(interval);
        final int nCoefs = a.getNumberOfRows();

        res[j] = a.getRowVector(0);
        for (int i = 1; i < nCoefs; i++) {
            res[j] = (DoubleMatrix1D) MA.scale(res[j], s);
            res[j] = (DoubleMatrix1D) MA.add(res[j], a.getRowVector(i));
        }
    }

    return res;
}

From source file:org.apache.carbondata.core.util.DataTypeUtil.java

/**
 * This method will convert a given value to its specific type
 *
 * @param msrValue/*from   w  w w .j a va 2s .c o  m*/
 * @param dataType
 * @param carbonMeasure
 * @return
 */
public static Object getMeasureValueBasedOnDataType(String msrValue, DataType dataType,
        CarbonMeasure carbonMeasure) {
    switch (dataType) {
    case DECIMAL:
        BigDecimal bigDecimal = new BigDecimal(msrValue).setScale(carbonMeasure.getScale(),
                RoundingMode.HALF_UP);
        return normalizeDecimalValue(bigDecimal, carbonMeasure.getPrecision());
    case INT:
        return Double.valueOf(msrValue).longValue();
    case LONG:
        return Long.valueOf(msrValue);
    default:
        Double parsedValue = Double.valueOf(msrValue);
        if (Double.isInfinite(parsedValue) || Double.isNaN(parsedValue)) {
            return null;
        }
        return parsedValue;
    }
}

From source file:dr.math.distributions.TruncatedDistribution.java

public double quantile(double y) {

    if (y == 0)//from w  ww .ja v  a  2  s  .c  om
        return lower;

    if (y == 1.0)
        return upper;

    if (Double.isInfinite(lower) && Double.isInfinite(upper)) {
        return source.quantile(y);
    }

    try {
        return super.inverseCumulativeProbability(y);
    } catch (MathException e) {
        //                throw MathRuntimeException.createIllegalArgumentException(                // AR - throwing exceptions deep in numerical code causes trouble. Catching runtime
        // exceptions is bad. Better to return NaN and let the calling code deal with it.
        return Double.NaN;

        //                    "Couldn't calculate beta quantile for alpha = " + alpha + ", beta = " + beta + ": " +e.getMessage());
    }
}

From source file:haflow.component.mahout.logistic.TrainLogistic.java

static void mainToOutput(String[] args) throws Exception {
    if (parseArgs(args)) {

        double logPEstimate = 0;
        int samples = 0;

        OutputStream o = HdfsUtil.writeHdfs(inforFile);
        PrintWriter output = new PrintWriter(o, true);

        CsvRecordFactory csv = lmp.getCsvRecordFactory();
        OnlineLogisticRegression lr = lmp.createRegression();
        for (int pass = 0; pass < passes; pass++) {
            BufferedReader in = new BufferedReader(new InputStreamReader(HdfsUtil.open(inputFile)));
            try {
                // read variable names
                csv.firstLine(in.readLine());

                String line = in.readLine();

                while (line != null) {
                    // for each new line, get target and predictors
                    Vector input = new RandomAccessSparseVector(lmp.getNumFeatures());
                    int targetValue = csv.processLine(line, input);

                    // check performance while this is still news
                    double logP = lr.logLikelihood(targetValue, input);
                    if (!Double.isInfinite(logP)) {
                        if (samples < 20) {
                            logPEstimate = (samples * logPEstimate + logP) / (samples + 1);
                        } else {
                            logPEstimate = 0.95 * logPEstimate + 0.05 * logP;
                        }/*ww  w  .j a v a  2  s .  c o m*/
                        samples++;
                    }
                    double p = lr.classifyScalar(input);
                    if (scores) {
                        output.printf(Locale.ENGLISH, "%10d %2d %10.2f %2.4f %10.4f %10.4f%n", samples,
                                targetValue, lr.currentLearningRate(), p, logP, logPEstimate);
                    }

                    // now update model
                    lr.train(targetValue, input);

                    line = in.readLine();
                }
            } finally {
                Closeables.close(in, true);
            }
        }

        //OutputStream modelOutput = new FileOutputStream(outputFile);
        OutputStream modelOutput = HdfsUtil.writeHdfs(outputFile);
        try {
            lmp.saveTo(modelOutput);
        } finally {
            Closeables.close(modelOutput, false);
        }

        output.println(lmp.getNumFeatures());
        output.println(lmp.getTargetVariable() + " ~ ");
        String sep = "";
        for (String v : csv.getTraceDictionary().keySet()) {
            double weight = predictorWeight(lr, 0, csv, v);
            if (weight != 0) {
                output.printf(Locale.ENGLISH, "%s%.3f*%s", sep, weight, v);
                sep = " + ";
            }
        }
        output.printf("%n");
        model = lr;
        for (int row = 0; row < lr.getBeta().numRows(); row++) {
            for (String key : csv.getTraceDictionary().keySet()) {
                double weight = predictorWeight(lr, row, csv, key);
                if (weight != 0) {
                    output.printf(Locale.ENGLISH, "%20s %.5f%n", key, weight);
                }
            }
            for (int column = 0; column < lr.getBeta().numCols(); column++) {
                output.printf(Locale.ENGLISH, "%15.9f ", lr.getBeta().get(row, column));
            }
            output.println();
        }
        output.close();
    }

}

From source file:com.opengamma.analytics.math.interpolation.MonotoneConvexSplineInterpolator.java

/**
 * Determine r(t)t = \int _{xValues_0}^{x} f(s) ds  for t >= min{xValues}
 * Extrapolation by a linear function in the region t > max{xValues}. To employ this extrapolation, use interpolate methods in this class. 
 * @param xValues Data t_i/*from w  w w  .j a  v  a2  s  .  co  m*/
 * @param yValues Data r_i*t_i
 * @return PiecewisePolynomialResult for r(t)t
 */
@Override
public PiecewisePolynomialResult interpolate(final double[] xValues, final double[] yValues) {

    ArgumentChecker.notNull(xValues, "xValues");
    ArgumentChecker.notNull(yValues, "yValues");

    ArgumentChecker.isTrue(xValues.length == yValues.length, " xValues length = yValues length");
    ArgumentChecker.isTrue(xValues.length > 1, "Data points should be more than 1");

    final int nDataPts = xValues.length;

    for (int i = 0; i < nDataPts; ++i) {
        ArgumentChecker.isFalse(Double.isNaN(xValues[i]), "xData containing NaN");
        ArgumentChecker.isFalse(Double.isInfinite(xValues[i]), "xData containing Infinity");
        ArgumentChecker.isFalse(Double.isNaN(yValues[i]), "yData containing NaN");
        ArgumentChecker.isFalse(Double.isInfinite(yValues[i]), "yData containing Infinity");
    }

    for (int i = 0; i < nDataPts; ++i) {
        for (int j = i + 1; j < nDataPts; ++j) {
            ArgumentChecker.isFalse(xValues[i] == xValues[j], "xValues should be distinct");
        }
    }

    for (int i = 0; i < nDataPts; ++i) {
        if (xValues[i] == 0.) {
            ArgumentChecker.isTrue(yValues[i] == 0., "r_i * t_i = 0 if t_i =0");
        }
    }

    double[] spotTmp = new double[nDataPts];
    for (int i = 0; i < nDataPts; ++i) {
        spotTmp[i] = xValues[i] == 0. ? 0. : yValues[i] / xValues[i];
    }

    _time = Arrays.copyOf(xValues, nDataPts);
    _spotRates = Arrays.copyOf(spotTmp, nDataPts);
    ParallelArrayBinarySort.parallelBinarySort(_time, _spotRates);

    final DoubleMatrix2D coefMatrix = solve(_time, _spotRates);
    final DoubleMatrix2D coefMatrixIntegrate = integration(_time, coefMatrix.getData());

    for (int i = 0; i < coefMatrixIntegrate.getNumberOfRows(); ++i) {
        for (int j = 0; j < coefMatrixIntegrate.getNumberOfColumns(); ++j) {
            ArgumentChecker.isFalse(Double.isNaN(coefMatrixIntegrate.getData()[i][j]), "Too large input");
            ArgumentChecker.isFalse(Double.isInfinite(coefMatrixIntegrate.getData()[i][j]), "Too large input");
        }
    }

    return new PiecewisePolynomialResult(new DoubleMatrix1D(_time), coefMatrixIntegrate,
            coefMatrixIntegrate.getNumberOfColumns(), 1);
}