Example usage for org.apache.lucene.util FixedBitSet flip

List of usage examples for org.apache.lucene.util FixedBitSet flip

Introduction

In this page you can find the example usage for org.apache.lucene.util FixedBitSet flip.

Prototype

public void flip(int startIndex, int endIndex) 

Source Link

Document

Flips a range of bits

Usage

From source file:net.conquiris.lucene.search.NegatingFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
    final int n = reader.maxDoc();
    final FixedBitSet bits = new FixedBitSet(reader.maxDoc());
    final DocIdSet set = filter.getDocIdSet(reader);
    if (set == null || set == DocIdSet.EMPTY_DOCIDSET) {
        bits.set(0, n);// ww w. ja  v a 2s. c  om
    } else {
        DocIdSetIterator i = set.iterator();
        if (i == null) {
            bits.set(0, n);
        } else {
            bits.or(i);
            bits.flip(0, n);
        }
    }
    return bits;
}

From source file:org.apache.solr.legacy.TestLegacyNumericUtils.java

License:Apache License

/** Note: The neededBounds Iterable must be unsigned (easier understanding what's happening) */
private void assertIntRangeSplit(final int lower, final int upper, int precisionStep, final boolean useBitSet,
        final Iterable<Integer> expectedBounds, final Iterable<Integer> expectedShifts) {
    final FixedBitSet bits = useBitSet ? new FixedBitSet(upper - lower + 1) : null;
    final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
    final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();

    LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
        @Override/*from w w  w  .  j  a  v  a  2 s .co  m*/
        public void addRange(int min, int max, int shift) {
            assertTrue("min, max should be inside bounds",
                    min >= lower && min <= upper && max >= lower && max <= upper);
            if (useBitSet)
                for (int i = min; i <= max; i++) {
                    assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
                    // extra exit condition to prevent overflow on MAX_VALUE
                    if (i == max)
                        break;
                }
            if (neededBounds == null)
                return;
            // make unsigned ints for easier display and understanding
            min ^= 0x80000000;
            max ^= 0x80000000;
            //System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
            assertEquals("shift", neededShifts.next().intValue(), shift);
            assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
            assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
        }
    }, precisionStep, lower, upper);

    if (useBitSet) {
        // after flipping all bits in the range, the cardinality should be zero
        bits.flip(0, upper - lower + 1);
        assertEquals("The sub-range concenated should match the whole range", 0, bits.cardinality());
    }
}

From source file:org.apache.solr.search.facet.UnInvertedField.java

License:Apache License

private void getCounts(FacetFieldProcessorByArrayUIF processor, CountSlotAcc counts) throws IOException {
    DocSet docs = processor.fcontext.base;
    int baseSize = docs.size();
    int maxDoc = searcher.maxDoc();

    // what about allBuckets?
    if (baseSize < processor.effectiveMincount) {
        return;//from  w  w  w  . j  ava  2s.  c o  m
    }

    final int[] index = this.index;

    boolean doNegative = baseSize > maxDoc >> 1 && termInstances > 0 && docs instanceof BitDocSet;

    if (doNegative) {
        FixedBitSet bs = ((BitDocSet) docs).getBits().clone();
        bs.flip(0, maxDoc);
        // TODO: when iterator across negative elements is available, use that
        // instead of creating a new bitset and inverting.
        docs = new BitDocSet(bs, maxDoc - baseSize);
        // simply negating will mean that we have deleted docs in the set.
        // that should be OK, as their entries in our table should be empty.
    }

    // For the biggest terms, do straight set intersections
    for (TopTerm tt : bigTerms.values()) {
        // TODO: counts could be deferred if sorting by index order
        counts.incrementCount(tt.termNum, searcher.numDocs(tt.termQuery, docs));
    }

    // TODO: we could short-circuit counting altogether for sorted faceting
    // where we already have enough terms from the bigTerms

    if (termInstances > 0) {
        DocIterator iter = docs.iterator();
        while (iter.hasNext()) {
            int doc = iter.nextDoc();
            int code = index[doc];

            if ((code & 0xff) == 1) {
                int pos = code >>> 8;
                int whichArray = (doc >>> 16) & 0xff;
                byte[] arr = tnums[whichArray];
                int tnum = 0;
                for (;;) {
                    int delta = 0;
                    for (;;) {
                        byte b = arr[pos++];
                        delta = (delta << 7) | (b & 0x7f);
                        if ((b & 0x80) == 0)
                            break;
                    }
                    if (delta == 0)
                        break;
                    tnum += delta - TNUM_OFFSET;
                    counts.incrementCount(tnum, 1);
                }
            } else {
                int tnum = 0;
                int delta = 0;
                for (;;) {
                    delta = (delta << 7) | (code & 0x7f);
                    if ((code & 0x80) == 0) {
                        if (delta == 0)
                            break;
                        tnum += delta - TNUM_OFFSET;
                        counts.incrementCount(tnum, 1);
                        delta = 0;
                    }
                    code >>>= 8;
                }
            }
        }
    }

    if (doNegative) {
        for (int i = 0; i < numTermsInField; i++) {
            //       counts[i] = maxTermCounts[i] - counts[i];
            counts.incrementCount(i, maxTermCounts[i] - counts.getCount(i) * 2);
        }
    }

    /*** TODO - future optimization to handle allBuckets
    if (processor.allBucketsSlot >= 0) {
      int all = 0;  // overflow potential
      for (int i=0; i<numTermsInField; i++) {
        all += counts.getCount(i);
      }
      counts.incrementCount(processor.allBucketsSlot, all);
    }
     ***/
}

From source file:org.geotoolkit.lucene.filter.SerialChainFilter.java

License:Open Source License

@Override
public DocIdSet getDocIdSet(final LeafReaderContext ctx, final Bits b)
        throws CorruptIndexException, IOException {

    final int chainSize = chain.size();
    final int actionSize = actionType.length;

    final FixedBitSet bits = (FixedBitSet) ((BitDocIdSet) chain.get(0).getDocIdSet(ctx, b)).bits();

    //if there is only an operand not we don't enter the loop
    int j = 0;//w w w .ja v  a2  s. c  o  m
    if (actionType[j] == NOT) {
        bits.flip(0, ctx.reader().maxDoc());
        j++;
    }

    for (int i = 1; i < chainSize; i++) {

        LogicalFilterType action;
        if (j < actionSize) {
            action = actionType[j];
            j++;
        } else {
            action = DEFAULT;
        }

        final FixedBitSet nextFilterResponse = (FixedBitSet) ((BitDocIdSet) chain.get(i).getDocIdSet(ctx, b))
                .bits();

        //if the next operator is NOT we have to process the action before the current operand
        if (j < actionSize && actionType[j] == NOT) {
            nextFilterResponse.flip(0, ctx.reader().maxDoc());
            j++;
        }

        switch (action) {

        case AND:
            bits.and(nextFilterResponse);
            break;
        case OR:
            bits.or(nextFilterResponse);
            break;
        case XOR:
            bits.xor(nextFilterResponse);
            break;
        default:
            bits.or(nextFilterResponse);
            break;

        }

    }
    // invalidate deleted document
    return invalidateDeletedDocument(bits, b);
}