Example usage for org.apache.commons.math3.distribution BinomialDistribution cumulativeProbability

List of usage examples for org.apache.commons.math3.distribution BinomialDistribution cumulativeProbability

Introduction

In this page you can find the example usage for org.apache.commons.math3.distribution BinomialDistribution cumulativeProbability.

Prototype

public double cumulativeProbability(int x) 

Source Link

Usage

From source file:com.cloudera.oryx.app.speed.rdf.RDFSpeedIT.java

private static void checkProbability(int majorityCount, int count, BinomialDistribution dist) {
    double expected = 0.9 * count;
    double probAsExtreme = majorityCount <= expected ? dist.cumulativeProbability(majorityCount)
            : (1.0 - dist.cumulativeProbability(majorityCount)) + dist.probability(majorityCount);
    assertTrue(majorityCount + " should be about " + expected + " (~90% of " + count + ")",
            probAsExtreme >= 0.001);//from   w  w w. ja  va 2 s  .  c o  m
}

From source file:gr.eap.LSHDB.HammingKey.java

public int getLc() {
    double p = 1 - (t * 1.0) / (this.size * 1.0);
    p = Math.pow(p, k);/*from w w w . jav a  2 s .c om*/
    double exp = (L * p);
    double std = Math.sqrt(exp * (1 - p));
    int C = (int) Math.round(exp - std);

    double x = (Math.sqrt(Math.log(delta) * Math.log(delta) - 2 * C * Math.log(delta)) - Math.log(delta) + C)
            / p;
    int Lc = (int) Math.ceil(x);
    double b = Lc * p;
    if (C > b)
        System.out.println("does not apply C > np.");
    BinomialDistribution bd1 = new BinomialDistribution(L, p);
    for (int l = L; l < L * 2; l++) {
        bd1 = new BinomialDistribution(l, p);
        double result = bd1.cumulativeProbability(C - 1);
        if (result < delta) {
            Lc = l;
            break;
        }
    }
    System.out.println("Lc reduced to=" + Lc);
    return Lc;
}

From source file:org.aika.network.neuron.lattice.AndNode.java

public void computeWeight() {
    if (Network.numberOfPositions == 0 || frequency < Node.minFrequency) {
        return;/*  ww w . j a v a 2  s  . c  o  m*/
    }

    double nullHyp = 1.0;
    for (InputNode ref : parents.keySet()) {
        Node in = ref.inputNeuron.node;
        double p = (double) in.frequency / (double) Network.numberOfPositions;
        if (p > 1.0)
            p = 1.0;
        nullHyp *= p;
    }

    BinomialDistribution binDist = new BinomialDistribution(null, Network.numberOfPositions, nullHyp);

    weight = binDist.cumulativeProbability(frequency - 1);

    n = Network.numberOfPositions;
    /*
            double minPRel = 1.0;
            for(Map.Entry<InputNode, Node> me: parents.entrySet()) {
    double p = 1.0 - ((double) frequency / (double) me.getValue().frequency);
    if(minPRel > p) minPRel = p;
            
    p = 1.0 - ((double) frequency / (double) me.getKey().inputNeuron.node.frequency);
    if(minPRel > p) minPRel = p;
            }
            
            minPRelevance = minPRel;
    */
    if (level == 2) {
        double minPRel = 1.0;
        for (Map.Entry<InputNode, Node> me : parents.entrySet()) {
            double p = 1.0 - ((double) frequency / (double) me.getValue().frequency);
            if (minPRel > p)
                minPRel = p;
        }

        if (minPRel < 0.1) {
            shadowedInput = true;
        }
    }

    setSignificant(weight > 0.99);
}

From source file:org.cbioportal.mutationhotspots.mutationhotspotsdetection.impl.HotspotImpl.java

private double binomialTest(int numberOfMutationInHotspot, int numberOfAllMutations, int hotspotLength,
        int proteinLength) {
    double p = 1.0 * hotspotLength / proteinLength;
    BinomialDistribution distribution = new BinomialDistribution(numberOfAllMutations, p);
    return 1 - distribution.cumulativeProbability(numberOfMutationInHotspot - 1);
}

From source file:org.gitools.analysis.stats.test.BinomialTest.java

private static CommonResult resultWithExact(int observed, int n, double p, double expectedMean,
        double expectedStdev) {

    double leftPvalue;
    double rightPvalue;
    double twoTailPvalue;

    //FIXME: May be it's better to return null ???
    if (n == 0) {
        leftPvalue = rightPvalue = twoTailPvalue = 1.0;
    } else {/*from w w  w.  j a va  2s  .  c  om*/

        BinomialDistribution distribution = new BinomialDistribution(n, p);

        leftPvalue = distribution.cumulativeProbability(observed);
        rightPvalue = observed > 0 ? distribution.cumulativeProbability(observed - 1, n) : 1.0;
        twoTailPvalue = leftPvalue + rightPvalue;
        twoTailPvalue = twoTailPvalue > 1.0 ? 1.0 : twoTailPvalue;
    }

    return new BinomialResult(BinomialResult.Distribution.BINOMIAL, n, leftPvalue, rightPvalue, twoTailPvalue,
            observed, expectedMean, expectedStdev, p);
}

From source file:osh.driver.simulation.MieleApplianceSimulationDriver.java

@Override
protected void generateNewDof(boolean useRandomDof, int actionCountPerDay, long applianceActionTimeTick,
        OSHRandomGenerator randomGen) {//from w ww  .j a  v a  2s.c  o m
    super.generateNewDof(useRandomDof, actionCountPerDay, applianceActionTimeTick, randomGen);

    // generate DOFs with binary distribution
    if (getSimulationEngine().getScreenplayType() == ScreenplayType.DYNAMIC) {
        if (86400 / ((getProgamDuration() + 1) * actionCountPerDay) < 1 && actionCountPerDay > 1) {
            throw new RuntimeException("Program duration to long for multiple runs per day");
        }

        // (there could be a scheduled/running action from the other day)
        // this could cause an endless loop
        int maxDof = calcMaxDof(actionCountPerDay, 86400);

        int newValue = maxDof;
        if (useRandomDof) {
            // in 15 minutes steps only
            int stepSize = 900;
            maxDof = maxDof / stepSize;
            //deviate
            //E(X)=0.5*max=28800s=8h or E(X)=0.5*max=14400s=4h
            BinomialDistribution binDistribution = new BinomialDistribution(maxDof, 0.5);
            double rand = randomGen.getNextDouble();

            for (int i = 0; i < maxDof; i++) {
                if (binDistribution.cumulativeProbability(i) > rand) {
                    newValue = i;
                    break;
                }
            }
            newValue = newValue * stepSize;
        }

        //create now the new action

        SubjectAction dofAction = new SubjectAction();
        dofAction.setTick(applianceActionTimeTick - 1); // Do 1 sec in advance!
        dofAction.setDeviceID(this.getDeviceID().toString());
        dofAction.setActionType(ActionType.USER_ACTION);
        dofAction.setNextState(false);
        PerformAction dofAction2Perform = new PerformAction();
        ActionParameters dofActionParameters = new ActionParameters();
        dofActionParameters.setParametersName("dof");
        ActionParameter dofActionParameter = new ActionParameter();
        dofActionParameter.setName("tdof");
        dofActionParameter.setValue("" + newValue);
        dofActionParameters.getParameter().add(dofActionParameter);
        dofAction2Perform.getActionParameterCollection().add(dofActionParameters);
        dofAction.getPerformAction().add(dofAction2Perform);

        this.setAction(dofAction);
    }
}

From source file:uk.ac.babraham.FastQC.Modules.KmerContent.java

private synchronized void calculateEnrichment() {

    /*//from   www  . j  a  va 2 s .c  o m
     * For each Kmer we work out whether there is a statistically
     * significant deviation in its coverage at any given position
     * compared to its average coverage over all positions.
     */

    // We'll be grouping together positions later so make up the groups now
    groups = BaseGroup.makeBaseGroups((longestSequence - MIN_KMER_SIZE) + 1);

    Vector<Kmer> unevenKmers = new Vector<Kmer>();

    Iterator<Kmer> rawKmers = kmers.values().iterator();

    while (rawKmers.hasNext()) {
        Kmer k = rawKmers.next();
        char[] chars = k.sequence().toCharArray();

        long totalKmerCount = 0;

        // This gets us the total number of Kmers of this type in the whole
        // dataset.
        for (int i = 0; i < totalKmerCounts.length; i++) {
            totalKmerCount += totalKmerCounts[i][k.sequence().length() - 1];
        }

        // This is the expected proportion of all Kmers which have this
        // specific Kmer sequence.  We no longer make any attempt to judge
        // overall enrichment or depletion of this sequence since once you
        // get to longer lengths the distribution isn't flat anyway

        float expectedProportion = k.count / (float) totalKmerCount;

        // We now want to go through each of the positions looking for whether
        // this Kmer was seen an unexpected number of times compared to what we
        // expected from the global values

        float[] obsExpPositions = new float[groups.length];
        float[] binomialPValues = new float[groups.length];

        long[] positionCounts = k.getPositions();

        for (int g = 0; g < groups.length; g++) {
            // This is a summation of the number of Kmers of this length which
            // fall into this base group
            long totalGroupCount = 0;

            // This is a summation of the number of hit Kmers which fall within
            // this base group.
            long totalGroupHits = 0;
            for (int p = groups[g].lowerCount() - 1; p < groups[g].upperCount()
                    && p < positionCounts.length; p++) {
                totalGroupCount += totalKmerCounts[p][chars.length - 1];
                totalGroupHits += positionCounts[p];
            }

            float predicted = expectedProportion * totalGroupCount;
            //            obsExpPositions[g] = (float)(Math.log(totalGroupHits/predicted)/Math.log(2));
            obsExpPositions[g] = (float) (totalGroupHits / predicted);

            // Now we can run a binomial test to see if there is a significant
            // deviation from what we expect given the number of observations we've
            // made

            BinomialDistribution bd = new BinomialDistribution((int) totalGroupCount, expectedProportion);
            if (totalGroupHits > predicted) {
                binomialPValues[g] = (float) ((1 - bd.cumulativeProbability((int) totalGroupHits))
                        * Math.pow(4, chars.length));
            } else {
                binomialPValues[g] = 1;
            }

        }

        k.setObsExpPositions(obsExpPositions);

        // To keep this we need a p-value below 0.01 and an obs/exp above 5 (actual values are log2 transformed)
        float lowestPValue = 1;
        for (int i = 0; i < binomialPValues.length; i++) {
            //            if (binomialPValues[i] < 0.01 && obsExpPositions[i] > (Math.log(5)/Math.log(2))) {
            if (binomialPValues[i] < 0.01 && obsExpPositions[i] > 5) {
                if (binomialPValues[i] < lowestPValue) {
                    lowestPValue = binomialPValues[i];
                }
            }
        }

        if (lowestPValue < 0.01) {
            k.setLowestPValue(lowestPValue);
            unevenKmers.add(k);
        }

    }

    Kmer[] finalKMers = unevenKmers.toArray(new Kmer[0]);

    // We sort by the highest degree of enrichment over the average      
    Arrays.sort(finalKMers);

    // So we don't end up with stupidly long lists of Kmers in the
    // report we'll only report the top 20
    if (finalKMers.length > 20) {
        Kmer[] shortenedKmers = new Kmer[20];
        for (int i = 0; i < shortenedKmers.length; i++) {
            shortenedKmers[i] = finalKMers[i];
        }

        finalKMers = shortenedKmers;
    }

    // Now we take the enrichment positions for the top 6 hits and
    // record these so we can plot them on a line graph
    enrichments = new double[Math.min(6, finalKMers.length)][];
    xLabels = new String[enrichments.length];

    xCategories = new String[groups.length];

    for (int i = 0; i < xCategories.length; i++) {
        xCategories[i] = groups[i].toString();
    }

    for (int k = 0; k < enrichments.length; k++) {
        enrichments[k] = new double[groups.length];

        float[] obsExpPos = finalKMers[k].getObsExpPositions();

        for (int g = 0; g < groups.length; g++) {
            enrichments[k][g] = obsExpPos[g];
            if (obsExpPos[g] > maxGraphValue)
                maxGraphValue = obsExpPos[g];
            if (obsExpPos[g] < minGraphValue)
                minGraphValue = obsExpPos[g];
        }

        xLabels[k] = finalKMers[k].sequence();

    }

    minGraphValue = 0;

    //      System.err.println("Max value="+maxGraphValue+" min value="+minGraphValue);

    this.enrichedKmers = finalKMers;

    // Delete the initial data structure so we don't suck up more memory
    // than we have to.
    kmers.clear();

    calculated = true;
}

From source file:uk.ac.babraham.SeqMonk.DataTypes.Interaction.HiCInteractionStrengthCalculator.java

private double getPValue(int interactionCount, int probe1CisCount, int probe1TransCount, int probe2CisCount,
        int probe2TransCount, Probe probe1, Probe probe2) {

    if (probe1.chromosome().equals(probe2.chromosome())) {

        BinomialDistribution bd = new BinomialDistribution(probe1CisCount, probabilityOfCrossover);

        //         System.err.println("Significance is "+(1-bd.cumulativeProbability(interactionCount)));

        // Since the distribution gives us the probability of getting an interaction higher
        // than the one we observed we need to subtract 1 from it to get the probability of
        // this level of interaction or higher.
        return 1 - bd.cumulativeProbability(interactionCount - 1);

    }//  www. j  av a  2  s  . c  om

    else {
        // We need to work out the chances of a trans hit coming up
        // anywhere

        BinomialDistribution bd = new BinomialDistribution(probe1TransCount, probabilityOfCrossover);

        //         System.err.println("Significance is "+(1-bd.cumulativeProbability(interactionCount)));

        // Since the distribution gives us the probability of getting an interaction higher
        // than the one we observed we need to subtract 1 from it to get the probability of
        // this level of interaction or higher.
        return 1 - bd.cumulativeProbability(interactionCount - 1);
    }

}

From source file:uk.ac.babraham.SeqMonk.Pipelines.AntisenseTranscriptionPipeline.java

protected void startPipeline() {

    // We first need to generate probes over all of the features listed in
    // the feature types.  The probes should cover the whole area of the
    // feature regardless of where it splices.

    Vector<Probe> probes = new Vector<Probe>();
    double pValue = optionsPanel.pValue();
    QuantitationStrandType readFilter = optionsPanel.readFilter();

    long[] senseCounts = new long[data.length];
    long[] antisenseCounts = new long[data.length];

    Chromosome[] chrs = collection().genome().getAllChromosomes();

    // First find the overall rate of antisense reads

    for (int c = 0; c < chrs.length; c++) {
        if (cancel) {
            progressCancelled();/*from  w w  w  . ja v a  2  s  .c  o  m*/
            return;
        }

        progressUpdated("Getting total antisense rate for chr" + chrs[c].name(), c, chrs.length * 2);

        Feature[] features = getValidFeatures(chrs[c]);

        for (int f = 0; f < features.length; f++) {
            if (cancel) {
                progressCancelled();
                return;
            }
            Probe p = new Probe(chrs[c], features[f].location().start(), features[f].location().end(),
                    features[f].location().strand(), features[f].name());
            probes.add(p);

            for (int d = 0; d < data.length; d++) {
                long[] reads = data[d].getReadsForProbe(p);
                for (int r = 0; r < reads.length; r++) {
                    if (readFilter.useRead(p, reads[r])) {
                        senseCounts[d] += SequenceRead.length(reads[r]);
                    } else {
                        antisenseCounts[d] += SequenceRead.length(reads[r]);
                    }
                }
            }

        }
    }

    Probe[] allProbes = probes.toArray(new Probe[0]);

    collection().setProbeSet(new ProbeSet("Features over " + optionsPanel.getSelectedFeatureType(), allProbes));

    // Now we can work out the overall antisense rate
    double[] antisenseProbability = new double[data.length];
    for (int d = 0; d < data.length; d++) {

        System.err
                .println("Antisense counts are " + antisenseCounts[d] + " sense counts are " + senseCounts[d]);

        antisenseProbability[d] = antisenseCounts[d] / (double) (antisenseCounts[d] + senseCounts[d]);

        System.err.println("Antisense probability for " + data[d].name() + " is " + antisenseProbability[d]);
    }

    // Now we can quantitate each individual feature and test for whether it is significantly 
    // showing antisense expression

    ArrayList<Vector<ProbeTTestValue>> significantProbes = new ArrayList<Vector<ProbeTTestValue>>();

    for (int d = 0; d < data.length; d++) {
        significantProbes.add(new Vector<ProbeTTestValue>());
    }

    int[] readLengths = new int[data.length];

    for (int d = 0; d < readLengths.length; d++) {
        readLengths[d] = data[d].getMaxReadLength();
        System.err.println("For " + data[d].name() + " max read len is " + readLengths[d]);
    }

    for (int c = 0; c < chrs.length; c++) {
        if (cancel) {
            progressCancelled();
            return;
        }

        progressUpdated("Quantitating features on chr" + chrs[c].name(), chrs.length + c, chrs.length * 2);

        Probe[] thisChrProbes = collection().probeSet().getProbesForChromosome(chrs[c]);

        for (int p = 0; p < thisChrProbes.length; p++) {

            for (int d = 0; d < data.length; d++) {
                if (cancel) {
                    progressCancelled();
                    return;
                }

                long senseCount = 0;
                long antisenseCount = 0;

                long[] reads = data[d].getReadsForProbe(thisChrProbes[p]);
                for (int r = 0; r < reads.length; r++) {

                    if (readFilter.useRead(thisChrProbes[p], reads[r])) {
                        // TODO: Just count overlap?
                        senseCount += SequenceRead.length(reads[r]);
                    } else {
                        antisenseCount += SequenceRead.length(reads[r]);
                    }

                }

                //               if (thisChrProbes[p].name().equals("RP4-798A10.2")) {
                //                  System.err.println("Raw base counts are sense="+senseCount+" anti="+antisenseCount+" from "+reads.length+" reads");
                //               }

                int senseReads = (int) (senseCount / readLengths[d]);
                int antisenseReads = (int) (antisenseCount / readLengths[d]);

                //               if (thisChrProbes[p].name().equals("RP4-798A10.2")) {
                //                  System.err.println("Raw read counts are sense="+senseReads+" anti="+antisenseReads+" from "+reads.length+" reads");
                //               }

                BinomialDistribution bd = new BinomialDistribution(senseReads + antisenseReads,
                        antisenseProbability[d]);

                // Since the binomial distribution gives the probability of getting a value higher than
                // this we need to subtract one so we get the probability of this or higher.
                double thisPValue = 1 - bd.cumulativeProbability(antisenseReads - 1);

                if (antisenseReads == 0)
                    thisPValue = 1;

                // We have to add all results at this stage so we don't mess up the multiple 
                // testing correction later on.
                significantProbes.get(d).add(new ProbeTTestValue(thisChrProbes[p], thisPValue));

                double expected = ((senseReads + antisenseReads) * antisenseProbability[d]);

                //               if (thisChrProbes[p].name().equals("RP4-798A10.2")) {
                //                  System.err.println("Probe="+thisChrProbes[p]+" sense="+senseReads+" anti="+antisenseReads+" anti-prob="+antisenseProbability[d]+" expected="+expected+" raw-p="+thisPValue);
                //               }

                if (expected < 1)
                    expected = 1;

                float obsExp = antisenseReads / (float) expected;

                data[d].setValueForProbe(thisChrProbes[p], obsExp);

            }

        }
    }

    // Now we can go through the set of significant probes, applying a correction and then
    // filtering those which pass our p-value cutoff
    for (int d = 0; d < data.length; d++) {

        ProbeTTestValue[] ttestResults = significantProbes.get(d).toArray(new ProbeTTestValue[0]);

        BenjHochFDR.calculateQValues(ttestResults);

        ProbeList newList = new ProbeList(collection().probeSet(),
                "Antisense < " + pValue + " in " + data[d].name(),
                "Probes showing significant antisense transcription from a basal level of "
                        + antisenseProbability[d] + " with a cutoff of " + pValue,
                "FDR");

        for (int i = 0; i < ttestResults.length; i++) {
            if (ttestResults[i].probe.name().equals("RP4-798A10.2")) {
                System.err.println("Raw p=" + ttestResults[i].p + " q=" + ttestResults[i].q);
            }

            if (ttestResults[i].q < pValue) {
                newList.addProbe(ttestResults[i].probe, (float) ttestResults[i].q);
            }
        }

    }

    StringBuffer quantitationDescription = new StringBuffer();
    quantitationDescription.append("Antisense transcription pipeline quantitation ");
    quantitationDescription.append(". Directionality was ");
    quantitationDescription.append(optionsPanel.libraryTypeBox.getSelectedItem());

    if (optionsPanel.ignoreOverlaps()) {
        quantitationDescription.append(". Ignoring existing overlaps");
    }

    quantitationDescription.append(". P-value cutoff was ");
    quantitationDescription.append(optionsPanel.pValue());

    collection().probeSet().setCurrentQuantitation(quantitationDescription.toString());

    quantitatonComplete();

}

From source file:uk.ac.babraham.SeqMonk.Pipelines.CodonBiasPipeline.java

protected void startPipeline() {

    // We first need to generate probes over all of the features listed in
    // the feature types.  The probes should cover the whole area of the
    // feature regardless of where it splices.

    Vector<Probe> probes = new Vector<Probe>();
    double pValue = optionsPanel.pValue();

    String libraryType = optionsPanel.libraryType();

    Chromosome[] chrs = collection().genome().getAllChromosomes();

    for (int c = 0; c < chrs.length; c++) {
        if (cancel) {
            progressCancelled();/*from ww w  .j  av  a  2s  .co m*/
            return;
        }

        progressUpdated("Making probes for chr" + chrs[c].name(), c, chrs.length * 2);

        Feature[] features = collection().genome().annotationCollection().getFeaturesForType(chrs[c],
                optionsPanel.getSelectedFeatureType());

        for (int f = 0; f < features.length; f++) {
            if (cancel) {
                progressCancelled();
                return;
            }
            Probe p = new Probe(chrs[c], features[f].location().start(), features[f].location().end(),
                    features[f].location().strand(), features[f].name());
            probes.add(p);

        }
    }

    allProbes = probes.toArray(new Probe[0]);

    collection().setProbeSet(new ProbeSet("Features over " + optionsPanel.getSelectedFeatureType(), allProbes));

    // Now we can quantitate each individual feature and test for whether it is significantly 
    // showing codon bias      
    ArrayList<Vector<ProbeTTestValue>> significantProbes = new ArrayList<Vector<ProbeTTestValue>>();

    // data contains the data stores that this pipeline is going to use. We need to test each data store.
    for (int d = 0; d < data.length; d++) {
        significantProbes.add(new Vector<ProbeTTestValue>());
    }

    int probeCounter = 0;

    for (int c = 0; c < chrs.length; c++) {
        if (cancel) {
            progressCancelled();
            return;
        }

        progressUpdated("Quantitating features on chr" + chrs[c].name(), chrs.length + c, chrs.length * 2);

        Feature[] features = collection().genome().annotationCollection().getFeaturesForType(chrs[c],
                optionsPanel.getSelectedFeatureType());

        for (int p = 0; p < features.length; p++) {

            //Get the corresponding feature and work out the mapping between genomic position and codon sub position.   
            int[] mappingArray = createGenomeMappingArray(features[p]);

            DATASTORE_LOOP: for (int d = 0; d < data.length; d++) {

                if (cancel) {
                    progressCancelled();
                    return;
                }

                long[] reads = data[d].getReadsForProbe(allProbes[probeCounter]);

                //System.err.println("Number of reads = " + reads.length);               

                //TODO: make this configurable
                if (reads.length < 5) {

                    data[d].setValueForProbe(allProbes[probeCounter], Float.NaN);
                    continue DATASTORE_LOOP;
                }

                int pos1Count = 0;
                int pos2Count = 0;
                int pos3Count = 0;

                //System.out.println("quantitating " + data[d].name());
                //System.err.println("Number of reads = " + reads.length);   

                READ_LOOP: for (int r = 0; r < reads.length; r++) {

                    int genomicReadStart = SequenceRead.start(reads[r]);
                    int genomicReadEnd = SequenceRead.end(reads[r]);
                    int readStrand = SequenceRead.strand(reads[r]);
                    int relativeReadStart = -1;

                    // work out where the start of the read is relative to the feature, 
                    // depending on strandedness of read, probe and library type  

                    // forward reads
                    if (readStrand == 1) {

                        if (libraryType == "Same strand specific") {

                            if (features[p].location().strand() == Location.FORWARD) {

                                // The start of the read needs to be within the feature
                                if (genomicReadStart - features[p].location().start() < 0) {
                                    continue READ_LOOP;
                                }

                                else {
                                    // look up the read start pos in the mapping array
                                    relativeReadStart = mappingArray[genomicReadStart
                                            - features[p].location().start()];

                                }
                            }

                        }

                        else if (libraryType == "Opposing strand specific") {

                            if (features[p].location().strand() == Location.REVERSE) {

                                // The start of the read needs to be within the feature
                                // The "start" of a reverse read/probe is actually the end  
                                if (features[p].location().end() - genomicReadEnd < 0) {
                                    continue READ_LOOP;
                                }

                                else {
                                    relativeReadStart = mappingArray[features[p].location().end()
                                            - genomicReadEnd];
                                }
                            }
                        }
                    }

                    // reverse reads
                    if (readStrand == -1) {

                        if (libraryType == "Same strand specific") {

                            if (features[p].location().strand() == Location.REVERSE) {

                                if (features[p].location().end() - genomicReadEnd < 0) {
                                    continue READ_LOOP;
                                }

                                else {
                                    //System.out.println("features[p].location().end() is " + features[p].location().end() + ", genomicReadEnd is " + genomicReadEnd);
                                    //System.out.println("mapping array[0] is " + mappingArray[0]);
                                    relativeReadStart = mappingArray[features[p].location().end()
                                            - genomicReadEnd];
                                }
                            }
                        }

                        else if (libraryType == "Opposing strand specific") {

                            if (features[p].location().strand() == Location.FORWARD) {

                                // The start of the read needs to be within the feature
                                if (genomicReadStart - features[p].location().start() < 0) {
                                    continue READ_LOOP;
                                }

                                else {
                                    //    look up the read start position in the mapping array
                                    relativeReadStart = mappingArray[genomicReadStart
                                            - features[p].location().start()];
                                }
                            }
                        }

                    }

                    // find out which position the read is in
                    if (relativeReadStart == -1) {
                        continue READ_LOOP;
                    } else if (relativeReadStart % 3 == 0) {
                        pos3Count++;
                        continue READ_LOOP;
                    } else if ((relativeReadStart + 1) % 3 == 0) {
                        pos2Count++;
                        continue READ_LOOP;
                    } else if ((relativeReadStart + 2) % 3 == 0) {
                        pos1Count++;
                    }

                } // closing bracket for read loop

                //System.out.println("pos1Count for "+ features[p].name() + " is " + pos1Count);
                //System.out.println("pos2Count for "+ features[p].name() + " is " + pos2Count);
                //System.out.println("pos3Count for "+ features[p].name() + " is " + pos3Count);

                int interestingCodonCount = 0;
                int otherCodonCount = 0;

                if (optionsPanel.codonSubPosition() == 1) {
                    interestingCodonCount = pos1Count;
                    otherCodonCount = pos2Count + pos3Count;
                }

                else if (optionsPanel.codonSubPosition() == 2) {
                    interestingCodonCount = pos2Count;
                    otherCodonCount = pos1Count + pos3Count;
                }

                else if (optionsPanel.codonSubPosition() == 3) {
                    interestingCodonCount = pos3Count;
                    otherCodonCount = pos1Count + pos2Count;
                }

                int totalCount = interestingCodonCount + otherCodonCount;

                //BinomialDistribution bd = new BinomialDistribution(interestingCodonCount+otherCodonCount, 1/3d);

                BinomialDistribution bd = new BinomialDistribution(totalCount, 1 / 3d);

                // Since the binomial distribution gives the probability of getting a value higher than
                // this we need to subtract one so we get the probability of this or higher.
                double thisPValue = 1 - bd.cumulativeProbability(interestingCodonCount - 1);

                if (interestingCodonCount == 0)
                    thisPValue = 1;

                // We have to add all results at this stage so we don't mess up the multiple 
                // testing correction later on.
                significantProbes.get(d).add(new ProbeTTestValue(allProbes[probeCounter], thisPValue));

                float percentageCount;

                if (totalCount == 0) {
                    percentageCount = 0;
                } else {
                    percentageCount = ((float) interestingCodonCount / (float) totalCount) * 100;
                }

                data[d].setValueForProbe(allProbes[probeCounter], percentageCount);

                //System.out.println("totalCount = " + totalCount);
                //System.out.println("interestingCodonCount " + interestingCodonCount);
                //System.out.println("pValue = " + thisPValue);
                //System.out.println("percentageCount = " + percentageCount);
                //System.out.println("");

            }
            probeCounter++;
        }
    }

    // Now we can go through the set of significant probes, applying a correction and then
    // filtering those which pass our p-value cutoff
    for (int d = 0; d < data.length; d++) {

        ProbeTTestValue[] ttestResults = significantProbes.get(d).toArray(new ProbeTTestValue[0]);

        BenjHochFDR.calculateQValues(ttestResults);

        ProbeList newList = new ProbeList(collection().probeSet(),
                "Codon bias < " + pValue + " in " + data[d].name(),
                "Probes showing significant codon bias for position " + optionsPanel.codonSubPosition()
                        + " with a cutoff of " + pValue,
                "FDR");

        for (int i = 0; i < ttestResults.length; i++) {

            //System.out.println("p value is " + ttestResults[i].p + ", q value is " + ttestResults[i].q);

            if (ttestResults[i].q < pValue) {
                newList.addProbe(ttestResults[i].probe, (float) ttestResults[i].q);
            }
        }
    }

    StringBuffer quantitationDescription = new StringBuffer();
    quantitationDescription.append("Codon bias pipeline using codon position " + optionsPanel.codonSubPosition()
            + " for " + optionsPanel.libraryType() + " library.");

    collection().probeSet().setCurrentQuantitation(quantitationDescription.toString());

    quantitatonComplete();

}