Example usage for java.lang Math log

List of usage examples for java.lang Math log

Introduction

In this page you can find the example usage for java.lang Math log.

Prototype

@HotSpotIntrinsicCandidate
public static double log(double a) 

Source Link

Document

Returns the natural logarithm (base e) of a double value.

Usage

From source file:com.ibm.bluej.commonutil.PrecisionRecallThreshold.java

public SummaryScores computeSummaryScores(double limitProb) {
    if (limitProb < 0.5) {
        throw new IllegalArgumentException("Probabilities cannot be limited to below 50%");
    }/* w  w  w .j a v  a 2  s.  c om*/
    SummaryScores sum = new SummaryScores();
    Collections.shuffle(scoredPlusGold, RANDOMNESS);
    Collections.sort(scoredPlusGold, new FirstPairComparator(null));
    Collections.reverse(scoredPlusGold);

    double tpRelative = 0;
    double fpRelative = 0;

    double cummulativeCorrect = 0;
    double auc = 0;
    double allPositive = 0;
    double averageScore = 0;
    for (Pair<Double, Boolean> p : scoredPlusGold) {
        if (p.second)
            allPositive += 1;
        averageScore += p.first;
    }
    averageScore /= scoredPlusGold.size();
    sum.relativeThreshold = averageScore;

    double maxF = 0;
    double maxFThresh = 0;
    double logLike = 0;
    double maxAcc = 0;
    double maxAccThresh = 0;
    sum.relativePrecision = Double.NaN;
    for (int i = 0; i < scoredPlusGold.size(); ++i) {
        Pair<Double, Boolean> p = scoredPlusGold.get(i);
        if (p.second) {
            ++cummulativeCorrect;
            auc += cummulativeCorrect / ((i + 1) * allPositive);
        }

        double tp = cummulativeCorrect;
        double fp = (i + 1) - cummulativeCorrect;
        double fn = allPositive - cummulativeCorrect;
        double tn = (scoredPlusGold.size() - (i + 1)) - fn;
        double precision = (double) (tp) / (tp + fp);
        double recall = (double) (tp) / (tp + fn);
        double accuracy = (tp + tn) / scoredPlusGold.size();
        double f1 = 2 * precision * recall / (precision + recall);

        if (p.second) {
            if (p.first > sum.relativeThreshold)
                tpRelative++;
        } else {
            if (p.first > sum.relativeThreshold)
                fpRelative++;
        }

        if (f1 > maxF) {
            maxF = f1;
            maxFThresh = p.first;
        }
        if (accuracy > maxAcc) {
            maxAcc = accuracy;
            maxAccThresh = p.first;
        }

        double prob = p.second ? p.first : 1 - p.first;
        if (prob < 0 || prob > 1) {
            logLike = Double.NaN;
        }
        if (prob > limitProb) {
            prob = limitProb;
        }
        if (prob < 1 - limitProb) {
            prob = 1 - limitProb;
        }
        logLike += Math.log(prob);
    }

    sum.maxFScore = maxF;
    sum.maxFScoreThreshold = maxFThresh;
    sum.auc = auc;
    sum.pearsonsR = pearsonsR();
    sum.logLikelihood = logLike;
    sum.maxAccuracy = maxAcc;
    sum.maxAccuracyThreshold = maxAccThresh;

    sum.relativePrecision = tpRelative
            / (tpRelative + (fpRelative * (allPositive / (scoredPlusGold.size() - allPositive))));
    if (Double.isNaN(sum.relativePrecision))
        sum.relativePrecision = 0;
    sum.relativeRecall = tpRelative / allPositive;
    sum.relativeFScore = 2 * sum.relativePrecision * sum.relativeRecall
            / (sum.relativePrecision + sum.relativeRecall);
    if (Double.isNaN(sum.relativeFScore))
        sum.relativeFScore = 0;

    return sum;
}

From source file:com.itemanalysis.psychometrics.rasch.Theta.java

public void prox(double adjust) {
    theta = Math.log(getAdjustedRawScore(adjust) / (maximumPossibleScore - getAdjustedRawScore(adjust)));
}

From source file:com.opengamma.analytics.financial.model.volatility.local.DermanKaniImpliedBinomialTreeModel.java

@Override
public ImpliedTreeResult getImpliedTrees(final OptionDefinition definition,
        final StandardOptionDataBundle data) {
    Validate.notNull(definition, "definition");
    Validate.notNull(data, "data");

    final int m1 = RecombiningBinomialTree.NODES.evaluate(_n);
    final int m2 = RecombiningBinomialTree.NODES.evaluate(_n - 1);
    final double[][] impliedTree = new double[_n + 1][m1]; //TODO this wastes space

    final double[] transitionProbabilities = new double[m2];
    double[] arrowDebreu = new double[m1];
    final double[][] localVolatilityTree = new double[_n][m2];
    final double dt = definition.getTimeToExpiry(data.getDate()) / _n;
    double t = 0;
    final double spot = data.getSpot();
    impliedTree[0][0] = spot;//  w ww  .j av  a  2  s. co  m
    arrowDebreu[0] = 1;
    int previousNodes = 1;
    final ZonedDateTime date = data.getDate();
    for (int i = 1; i < _n + 1; i++) {
        final int nodes = RecombiningBinomialTree.NODES.evaluate(i);
        final BinomialOptionModel<StandardOptionDataBundle> crrModel = new BinomialOptionModel<>(CRR, i);
        t += dt;
        final double df1 = Math.exp(dt * data.getInterestRate(t));
        final double df2 = Math.exp(dt * data.getCostOfCarry());
        final Expiry expiry = new Expiry(DateUtils.getDateOffsetWithYearFraction(date, t));
        final int mid = i / 2;
        if (i % 2 == 0) {
            impliedTree[i][mid] = spot;
            addUpperNodes(data, impliedTree, arrowDebreu, i, crrModel, df1, df2, expiry, mid + 1);
            addLowerNodes(data, impliedTree, arrowDebreu, i, crrModel, df1, df2, expiry, mid - 1);
        } else {
            final double c = crrModel
                    .getTreeGeneratingFunction(new EuropeanVanillaOptionDefinition(spot, expiry, true))
                    .evaluate(data).getNode(0, 0).second;
            final double sigma = getUpperSigma(impliedTree, arrowDebreu, i - 1, df2, mid + 1);
            impliedTree[i][mid + 1] = spot * (df1 * c + arrowDebreu[mid] * spot - sigma)
                    / (arrowDebreu[mid] * impliedTree[i - 1][mid] * df2 - df1 * c + sigma);
            impliedTree[i][mid] = spot * spot / impliedTree[i][mid + 1];
            addUpperNodes(data, impliedTree, arrowDebreu, i, crrModel, df1, df2, expiry, mid + 2);
            addLowerNodes(data, impliedTree, arrowDebreu, i, crrModel, df1, df2, expiry, mid - 1);
        }
        for (int j = 0; j < previousNodes; j++) {
            final double f = impliedTree[i - 1][j] * df2;
            transitionProbabilities[j] = (f - impliedTree[i][j]) / (impliedTree[i][j + 1] - impliedTree[i][j]);
            //TODO emcleod 31-8-10 Need to check that transition probabilities are positive - use adjustment suggested in "The Volatility Smile and its Implied Tree"
            localVolatilityTree[i - 1][j] = Math
                    .sqrt(transitionProbabilities[j] * (1 - transitionProbabilities[j]))
                    * Math.log(impliedTree[i][j + 1] / impliedTree[i][j]); //TODO need 1/sqrt(dt) here
        }
        final double[] temp = new double[m1];
        temp[0] = (1 - transitionProbabilities[0]) * arrowDebreu[0] / df1;
        temp[nodes - 1] = (transitionProbabilities[previousNodes - 1] * arrowDebreu[previousNodes - 1]) / df1;
        for (int j = 1; j < nodes - 1; j++) {
            temp[j] = (transitionProbabilities[j - 1] * arrowDebreu[j - 1]
                    + (1 - transitionProbabilities[j]) * arrowDebreu[j]) / df1;
        }
        arrowDebreu = temp;
        previousNodes = nodes;
    }
    final Double[][] impliedTreeResult = new Double[_n + 1][m1];
    final Double[][] localVolResult = new Double[_n][m2];
    for (int i = 0; i < impliedTree.length; i++) {
        for (int j = 0; j < impliedTree[i].length; j++) {
            impliedTreeResult[i][j] = impliedTree[i][j];
            if (i < _n && j < m2) {
                localVolResult[i][j] = localVolatilityTree[i][j];
            }
        }
    }
    return new ImpliedTreeResult(new RecombiningBinomialTree<>(impliedTreeResult),
            new RecombiningBinomialTree<>(localVolResult));
}

From source file:com.hmsinc.epicenter.spatial.analysis.SpatialScanGrid.java

/** Analyze the grid to calculate values for C_all and B_all */
public void analyzeGrid() {
    B_all = 0.0;//  w  ww.j a  v  a  2  s .com
    C_all = 0.0;
    double sum_q = 0.0;

    // Iterate through the grid, summing all baseline and count values. We also 
    // calculate the sum of C/B ratios, for possible future use (these would be 
    // necessary to use population values as the baseline)
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < columns; j++) {

            B_all += grid[i][j].getBaseline();
            C_all += grid[i][j].getCount();

            // increment sum_q if baseline != 0 (to avoid NaN)
            if (grid[i][j].getBaseline() > 0.0) {
                sum_q += grid[i][j].getCount() / grid[i][j].getBaseline();
            }
            numCells++;
        }
    }
    // Disabled because we use expected value as the baseline...in this case, q0
    // is hard-coded to 1.0
    // q0 = sum_q/numCells;

    double alpha_all = q0 * B_all;
    double beta_all = B_all;

    // calculate log likelihood of the null hypothesis

    /** Note: Individual terms in this equation are too large, and return
     * infinite results. So, we reformulate the equation by taking its 
     * natural log and finding its exponential at the end of calculation.
     * 
     *    Thus the original equation (Neill et al, page 3)
     *    
     *    P(D|H0) ~      (beta_all)^alpha_all * Gamma(alpha_all + C_all)
     *              --------------------------------------------------------
     *              (beta_all + B_all)^(alpha_all + C_all) * Gamma(alpha_all)
     *              
     *    becomes
     *                  
     * log(P(D|H0)) ~     [ ( alpha_all * log(beta_all) ) + logGamma(alpha_all + C_all) ] - 
     *                [ ( (alpha_all + C_all) * log(beta_all + B_all) ) + logGamma(alpha_all) ]
     *              
     */

    logP_D_H0 = ((alpha_all * Math.log(beta_all)) + Gamma.logGamma((alpha_all + C_all)))
            - (((alpha_all + C_all) * (Math.log(beta_all + B_all))) + Gamma.logGamma(alpha_all));
}

From source file:edu.cmu.tetrad.search.EstimateRank.java

public int Estimate(int[] iA, int[] iB, double[][] cov, int N, double alpha) {
    this.alpha = alpha;
    this.iA = iA;
    this.iB = iB;
    this.cov = cov;
    this.N = N;//  w  ww.j  a  va  2s  .  c  o  m
    double[] Cors = CanCor(iA, iB, cov);
    int rank = 0;
    boolean reject = true;

    while (reject) {
        double sum = 0;
        int i;
        for (i = rank; i < Math.min(iA.length, iB.length); i++) {
            sum += Math.log(1 - Math.pow(Cors[i], 2));
        }
        double stat = -(N - .5 * (iA.length + iB.length + 3)) * sum;
        reject = ProbUtils.chisqCdf(stat, (iA.length - rank) * (iB.length - rank)) > (1 - alpha);
        if (reject & rank < Math.min(iA.length, iB.length)) {
            rank++;
        } else {
            reject = false;
        }
    }

    return rank;
}

From source file:mt.LengthDistribution.java

public static void GetLengthDistributionArray(ArrayList<File> AllMovies, double[] calibration) {

    ArrayList<Double> maxlist = new ArrayList<Double>();
    for (int i = 0; i < AllMovies.size(); ++i) {

        double maxlength = LengthDistribution.Lengthdistro(AllMovies.get(i));

        if (maxlength != Double.NaN && maxlength > 0)
            maxlist.add(maxlength);/*from ww  w . j a  va2 s  .  c  o m*/

    }
    Collections.sort(maxlist);

    int min = 0;
    int max = (int) Math.round(maxlist.get(maxlist.size() - 1)) + 1;
    XYSeries counterseries = new XYSeries("MT length distribution");
    XYSeries Logcounterseries = new XYSeries("MT Log length distribution");
    final ArrayList<Point> points = new ArrayList<Point>();
    for (int length = 0; length < max; ++length) {

        HashMap<Integer, Integer> frameseed = new HashMap<Integer, Integer>();

        int count = 0;
        for (int i = 0; i < AllMovies.size(); ++i) {

            File file = AllMovies.get(i);

            double currentlength = LengthDistribution.Lengthdistro(file);

            ArrayList<FLSobject> currentobject = Tracking.loadMTStat(file);

            if (currentlength > length) {

                for (int index = 0; index < currentobject.size(); ++index) {
                    ArrayList<Integer> seedlist = new ArrayList<Integer>();
                    if (currentobject.get(index).length >= length) {
                        seedlist.add(currentobject.get(index).seedID);
                        if (frameseed.get(currentobject.get(index).Framenumber) != null
                                && frameseed.get(currentobject.get(index).Framenumber) != Double.NaN) {

                            int currentcount = frameseed.get(currentobject.get(index).Framenumber);
                            frameseed.put(currentobject.get(index).Framenumber, seedlist.size() + currentcount);
                        } else if (currentobject.get(index) != null)
                            frameseed.put(currentobject.get(index).Framenumber, seedlist.size());

                    }

                }

            }

        }

        // Get maxima length, count
        int maxvalue = Integer.MIN_VALUE;

        for (int key : frameseed.keySet()) {

            int Count = frameseed.get(key);

            if (Count >= maxvalue)
                maxvalue = Count;
        }

        if (maxvalue != Integer.MIN_VALUE) {
            counterseries.add(length, maxvalue);

            if (maxvalue > 0) {
                Logcounterseries.add((length), Math.log(maxvalue));
                points.add(new Point(new double[] { length, Math.log(maxvalue) }));
            }

        }
    }

    final XYSeriesCollection dataset = new XYSeriesCollection();
    final XYSeriesCollection nofitdataset = new XYSeriesCollection();
    dataset.addSeries(counterseries);
    nofitdataset.addSeries(counterseries);
    final XYSeriesCollection Logdataset = new XYSeriesCollection();
    Logdataset.addSeries(Logcounterseries);

    final JFreeChart chart = ChartFactory.createScatterPlot("MT length distribution", "Number of MT",
            "Length (micrometer)", dataset);

    final JFreeChart nofitchart = ChartFactory.createScatterPlot("MT length distribution", "Number of MT",
            "Length (micrometer)", nofitdataset);

    // Fitting line to log of the length distribution
    interpolation.Polynomial poly = new interpolation.Polynomial(1);
    try {

        poly.fitFunction(points);

    } catch (NotEnoughDataPointsException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    DisplayPoints.display(nofitchart, new Dimension(800, 500));
    dataset.addSeries(Tracking.drawexpFunction(poly, counterseries.getMinX(), counterseries.getMaxX(), 0.5,
            "Exponential fit"));
    NumberFormat nf = NumberFormat.getInstance(Locale.ENGLISH);
    nf.setMaximumFractionDigits(3);
    TextTitle legendText = new TextTitle("Mean Length" + " : " + nf.format(-1.0 / poly.getCoefficients(1))
            + "  " + "Standard Deviation" + " : " + nf.format(poly.SSE));
    legendText.setPosition(RectangleEdge.RIGHT);

    DisplayPoints.display(chart, new Dimension(800, 500));
    chart.addSubtitle(legendText);

    final JFreeChart logchart = ChartFactory.createScatterPlot("MT Log length distribution", "Number of MT",
            "Length (micrometer)", Logdataset);
    //     DisplayPoints.display(logchart, new Dimension(800, 500));
    for (int i = 1; i >= 0; --i)
        System.out.println(poly.getCoefficients(i) + "  " + "x" + " X to the power of " + i);

    //  Logdataset.addSeries(Tracking.drawFunction(poly, counterseries.getMinX(), counterseries.getMaxX(), 0.5, "Straight line fit"));

    WriteLengthdistroFile(AllMovies, counterseries, 0);

}

From source file:com.github.feribg.audiogetter.helpers.Utils.java

/**
 * Make byte size in a readable format/*www.  ja  va 2 s  .c o  m*/
 *
 * @param bytes
 * @param decimal
 * @return
 */
public static String makeSizeHumanReadable(long bytes, boolean decimal) {
    String hr = "-";
    int unit = decimal ? 1000 : 1024;
    if (bytes < unit) {
        hr = bytes + " B";
    } else {
        int exp = (int) (Math.log(bytes) / Math.log(unit));
        String pre = (decimal ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (decimal ? "" : "i");
        hr = String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
    }
    hr = hr.replace("-1 B", "-").replace("0 B", "-");
    return hr;
}

From source file:com.analog.lyric.dimple.factorfunctions.Beta.java

@Override
public final double evalEnergy(Value[] arguments) {
    int index = 0;
    if (!_parametersConstant) {
        _alpha = arguments[index++].getDouble(); // First input is alpha parameter (must be
        // non-negative)
        _beta = arguments[index++].getDouble(); // Second input is beta parameter (must be
        // non-negative)
        _alphaMinusOne = _alpha - 1;/*from ww  w.ja v  a 2  s  . c om*/
        _betaMinusOne = _beta - 1;
        _logBetaAlphaBeta = org.apache.commons.math3.special.Beta.logBeta(_alpha, _beta);
        if (_alpha < 0)
            return Double.POSITIVE_INFINITY;
        if (_beta < 0)
            return Double.POSITIVE_INFINITY;
    }
    final int length = arguments.length;
    final int N = length - index; // Number of non-parameter variables
    double sum = 0;
    if (_alpha == 1 && _beta == 1) {
        for (; index < length; index++) {
            final double x = arguments[index].getDouble(); // Remaining inputs are Beta
            // variables
            if (x < 0 || x > 1)
                return Double.POSITIVE_INFINITY;
        }
        return 0; // Uniform within 0 <= x <= 1
    } else if (_alpha == 1) {
        for (; index < length; index++) {
            final double x = arguments[index].getDouble(); // Remaining inputs are Beta
            // variables
            if (x < 0 || x > 1)
                return Double.POSITIVE_INFINITY;
            sum += Math.log(1 - x);
        }
        return N * _logBetaAlphaBeta - sum * _betaMinusOne;
    } else if (_beta == 1) {
        for (; index < length; index++) {
            final double x = arguments[index].getDouble(); // Remaining inputs are Beta
            // variables
            if (x < 0 || x > 1)
                return Double.POSITIVE_INFINITY;
            sum += Math.log(x);
        }
        return N * _logBetaAlphaBeta - sum * _alphaMinusOne;
    } else {
        for (; index < length; index++) {
            final double x = arguments[index].getDouble(); // Remaining inputs are Beta
            // variables
            if (x < 0 || x > 1)
                return Double.POSITIVE_INFINITY;
            sum += _alphaMinusOne * Math.log(x) + _betaMinusOne * Math.log(1 - x);
        }
        return N * _logBetaAlphaBeta - sum;
    }
}

From source file:se.crisp.codekvast.warehouse.file_import.ZipFileImporterImpl.java

private String humanReadableByteCount(long bytes) {
    if (bytes < 1000) {
        return bytes + " B";
    }/*  w ww.  j a  va2s.c o m*/
    int exponent = (int) (Math.log(bytes) / Math.log(1000));
    String unit = " kMGTPE".charAt(exponent) + "B";
    return format("%.1f %s", bytes / Math.pow(1000, exponent), unit);
}

From source file:com.rapidminer.operator.preprocessing.filter.TFIDFFilter.java

@Override
public ExampleSet apply(ExampleSet exampleSet) throws OperatorException {
    if (exampleSet.size() < 1) {
        throw new UserError(this, 110, new Object[] { "1" });
    }//from   w  ww.j a v a2 s . co  m
    if (exampleSet.getAttributes().size() == 0) {
        throw new UserError(this, 106, new Object[0]);
    }

    // init
    double[] termFrequencySum = new double[exampleSet.size()];
    List<Attribute> attributes = new LinkedList<Attribute>();
    for (Attribute attribute : exampleSet.getAttributes()) {
        if (attribute.isNumerical()) {
            attributes.add(attribute);
        }
    }
    int[] documentFrequencies = new int[attributes.size()];

    // calculate frequencies
    int index = 0;
    for (Attribute attribute : attributes) {
        int exampleCounter = 0;
        for (Example example : exampleSet) {
            double value = example.getValue(attribute);
            termFrequencySum[exampleCounter] += value;
            if (value > 0) {
                documentFrequencies[index]++;
            }
            exampleCounter++;
        }
        index++;
        checkForStop();
    }

    // calculate IDF values
    double[] inverseDocumentFrequencies = new double[documentFrequencies.length];
    for (int i = 0; i < attributes.size(); i++) {
        inverseDocumentFrequencies[i] = Math.log((double) exampleSet.size() / (double) documentFrequencies[i]);
    }

    // set values
    boolean calculateTermFrequencies = getParameterAsBoolean(PARAMETER_CALCULATE_TERM_FREQUENCIES);
    index = 0;
    for (Attribute attribute : attributes) {
        int exampleCounter = 0;
        for (Example example : exampleSet) {
            double value = example.getValue(attribute);
            if (termFrequencySum[exampleCounter] == 0.0d || Double.isNaN(inverseDocumentFrequencies[index])) {
                example.setValue(attribute, 0.0d);
            } else {
                double tf = value;
                if (calculateTermFrequencies) {
                    tf /= termFrequencySum[exampleCounter];
                }
                double idf = inverseDocumentFrequencies[index];
                example.setValue(attribute, (tf * idf));
            }
            exampleCounter++;
        }
        index++;
        checkForStop();
    }
    return exampleSet;
}