Example usage for org.apache.commons.math3.stat.interval ConfidenceInterval getLowerBound

List of usage examples for org.apache.commons.math3.stat.interval ConfidenceInterval getLowerBound

Introduction

In this page you can find the example usage for org.apache.commons.math3.stat.interval ConfidenceInterval getLowerBound.

Prototype

public double getLowerBound() 

Source Link

Usage

From source file:com.examples.abelanav2.datastore.DbClient.java

/**
 * Updates the vote counts of a photo and its popularity bounds.
 * @param photoKey the photo key./*  www  .  j  av a 2 s.  c  o m*/
 * @param oldVote the old vote.
 * @param newVote the new vote.
 * @return a boolean indicating success.
 * @throws DatastoreException  if there is a datastore error.
 */
public boolean updateVotesAndBounds(final Key photoKey, final long oldVote, final long newVote)
        throws DatastoreException {
    Entity photoEntity = getPhoto(photoKey);
    Map<String, Value> propsPhoto = getPropertyMap(photoEntity);
    long newVoteCount = propsPhoto.get("numberVotes").getIntegerValue();
    long newPositiveVoteCount = propsPhoto.get("numberPositiveVotes").getIntegerValue();
    if (newVote == 0) {
        newVoteCount--;
    }
    if (oldVote == 0) {
        newVoteCount++;
    }
    if (oldVote == 1 && newVote != 1) {
        newPositiveVoteCount--;
    }
    if (oldVote != 1 && newVote == 1) {
        newPositiveVoteCount++;
    }

    // If there is no vote for a picture, it means that even the author
    // has removed his vote for this picture. Let's send it down the
    // abyss with upperTruePopularity of 0. If there is at least one
    // vote, let's compute the new popularity of the photo.
    double lowerTruePopularity = 0;
    double upperTruePopularity = 0;
    if (newVoteCount > 0) {
        WilsonScoreInterval wilsonScoreInterval = new WilsonScoreInterval();
        ConfidenceInterval confidenceInterval = wilsonScoreInterval.createInterval((int) newVoteCount,
                (int) newPositiveVoteCount, BackendConstants.CONFIDENCE_INTERVAL);
        lowerTruePopularity = confidenceInterval.getLowerBound();
        upperTruePopularity = confidenceInterval.getUpperBound();
    }

    List<Property> properties = ImmutableList.of(makeProperty("numberVotes", makeValue(newVoteCount)).build(),
            makeProperty("numberPositiveVotes", makeValue(newPositiveVoteCount)).build(),
            makeProperty("lowerTruePopularity", makeValue(lowerTruePopularity)).build(),
            makeProperty("upperTruePopularity", makeValue(upperTruePopularity)).build());

    return DbUtils.updateEntity(datastore, photoEntity.getKey(), properties);
}

From source file:uk.ac.babraham.SeqMonk.Filters.BinomialFilterForRev.java

protected void generateProbeList() {

    boolean aboveOnly = false;
    boolean belowOnly = false;

    if (options.directionBox.getSelectedItem().equals("Above"))
        aboveOnly = true;/*from ww  w. j av a  2  s  .c om*/
    else if (options.directionBox.getSelectedItem().equals("Below"))
        belowOnly = true;

    if (options.stringencyField.getText().length() == 0) {
        stringency = 0.05;
    } else {
        stringency = Double.parseDouble(options.stringencyField.getText());
    }
    if (options.minObservationsField.getText().length() == 0) {
        minObservations = 10;
    } else {
        minObservations = Integer.parseInt(options.minObservationsField.getText());
    }
    if (options.minDifferenceField.getText().length() == 0) {
        minPercentShift = 10;
    } else {
        minPercentShift = Integer.parseInt(options.minDifferenceField.getText());
    }

    applyMultipleTestingCorrection = options.multiTestBox.isSelected();

    ProbeList newList;

    if (applyMultipleTestingCorrection) {
        newList = new ProbeList(startingList, "Filtered Probes", "", "Q-value");
    } else {
        newList = new ProbeList(startingList, "Filtered Probes", "", "P-value");
    }

    Probe[] probes = startingList.getAllProbes();

    // We need to create a set of mean end methylation values for all starting values
    // We found to the nearest percent so we'll end up with a set of 101 values (0-100)
    // which are the expected end points
    double[] expectedEnds = calculateEnds(probes);

    if (expectedEnds == null)
        return; // They cancelled whilst calculating.

    for (int i = 0; i < expectedEnds.length; i++) {
        System.err.println("" + i + "\t" + expectedEnds[i]);
    }

    // This is where we'll store any hits
    Vector<ProbeTTestValue> hits = new Vector<ProbeTTestValue>();
    BinomialTest bt = new BinomialTest();
    AlternativeHypothesis hypothesis = AlternativeHypothesis.TWO_SIDED;

    if (aboveOnly)
        hypothesis = AlternativeHypothesis.GREATER_THAN;
    if (belowOnly)
        hypothesis = AlternativeHypothesis.LESS_THAN;

    for (int p = 0; p < probes.length; p++) {

        if (p % 100 == 0) {
            progressUpdated("Processed " + p + " probes", p, probes.length);
        }

        if (cancel) {
            cancel = false;
            progressCancelled();
            return;
        }

        long[] reads = fromStore.getReadsForProbe(probes[p]);

        int forCount = 0;
        int revCount = 0;

        for (int r = 0; r < reads.length; r++) {
            if (SequenceRead.strand(reads[r]) == Location.FORWARD) {
                ++forCount;
            } else if (SequenceRead.strand(reads[r]) == Location.REVERSE) {
                ++revCount;
            }
        }

        if (forCount + revCount < minObservations)
            continue;

        int fromPercent = Math.round((forCount * 100f) / (forCount + revCount));

        // We need to calculate the confidence range for the from reads and work
        // out the most pessimistic value we could take as a starting value
        WilsonScoreInterval wi = new WilsonScoreInterval();
        ConfidenceInterval ci = wi.createInterval(forCount + revCount, forCount, 1 - stringency);
        //         System.err.println("From percent="+fromPercent+" meth="+forCount+" unmeth="+revCount+" sig="+stringency+" ci="+ci.getLowerBound()*100+" - "+ci.getUpperBound()*100);         

        reads = toStore.getReadsForProbe(probes[p]);

        forCount = 0;
        revCount = 0;

        for (int r = 0; r < reads.length; r++) {
            if (SequenceRead.strand(reads[r]) == Location.FORWARD) {
                ++forCount;
            } else if (SequenceRead.strand(reads[r]) == Location.REVERSE) {
                ++revCount;
            }
        }

        if (forCount + revCount < minObservations)
            continue;

        float toPercent = (forCount * 100f) / (forCount + revCount);

        //         System.err.println("Observed toPercent is "+toPercent+ "from meth="+forCount+" unmeth="+revCount+" and true predicted is "+expectedEnds[Math.round(toPercent)]);

        // Find the most pessimistic fromPercent such that the expected toPercent is as close
        // to the observed value based on the confidence interval we calculated before.

        double worseCaseExpectedPercent = 0;
        double smallestTheoreticalToActualDiff = 100;

        // Just taking the abs diff can still leave us with a closest value which is still
        // quite far from where we are.  We therefore also check if our confidence interval
        // gives us a potential value range which spans the actual value, and if it does we
        // fail it without even running the test.
        boolean seenLower = false;
        boolean seenHigher = false;

        for (int m = Math.max((int) Math.floor(ci.getLowerBound() * 100), 0); m <= Math
                .min((int) Math.ceil(ci.getUpperBound() * 100), 100); m++) {
            double expectedPercent = expectedEnds[m];
            double diff = expectedPercent - toPercent;
            if (diff <= 0)
                seenLower = true;
            if (diff >= 0)
                seenHigher = true;

            if (Math.abs(diff) < smallestTheoreticalToActualDiff) {
                worseCaseExpectedPercent = expectedPercent;
                smallestTheoreticalToActualDiff = Math.abs(diff);
            }
        }

        //         System.err.println("Worst case percent is "+worseCaseExpectedPercent+" with diff of "+smallestTheoreticalToActualDiff+" to "+toPercent);   

        // Sanity check
        if (smallestTheoreticalToActualDiff > Math.abs((toPercent - expectedEnds[Math.round(fromPercent)]))) {
            throw new IllegalStateException("Can't have a worst case which is better than the actual");
        }

        if (Math.abs(toPercent - worseCaseExpectedPercent) < minPercentShift)
            continue;

        // Check the directionality
        if (aboveOnly && worseCaseExpectedPercent - toPercent > 0)
            continue;
        if (belowOnly && worseCaseExpectedPercent - toPercent < 0)
            continue;

        // Now perform the Binomial test.

        double pValue = bt.binomialTest(forCount + revCount, forCount, worseCaseExpectedPercent / 100d,
                hypothesis);

        if (seenLower && seenHigher)
            pValue = 0.5; // Our confidence range spanned the actual value we had so we can't be significant

        //         System.err.println("P value is "+pValue);

        // Store this as a potential hit (after correcting p-values later)
        hits.add(new ProbeTTestValue(probes[p], pValue));

    }

    // Now we can correct the p-values if we need to

    ProbeTTestValue[] rawHits = hits.toArray(new ProbeTTestValue[0]);

    if (applyMultipleTestingCorrection) {

        //         System.err.println("Correcting for "+rawHits.length+" tests");
        BenjHochFDR.calculateQValues(rawHits);
    }

    for (int h = 0; h < rawHits.length; h++) {
        if (applyMultipleTestingCorrection) {
            if (rawHits[h].q < stringency) {
                newList.addProbe(rawHits[h].probe, (float) rawHits[h].q);
            }
        } else {
            if (rawHits[h].p < stringency) {
                newList.addProbe(rawHits[h].probe, (float) rawHits[h].p);
            }
        }
    }

    filterFinished(newList);

}