List of usage examples for org.apache.lucene.util FixedBitSet clear
@Override
public void clear(int index)
From source file:com.floragunn.searchguard.configuration.DlsFlsFilterLeafReader.java
License:Open Source License
DlsFlsFilterLeafReader(final LeafReader delegate, final Set<String> includes, final Query dlsQuery) { super(delegate); flsEnabled = includes != null && !includes.isEmpty(); dlsEnabled = dlsQuery != null;/* w w w . j a va 2 s. c o m*/ if (flsEnabled) { this.includes = includes.toArray(new String[0]); final FieldInfos infos = delegate.getFieldInfos(); final List<FieldInfo> fi = new ArrayList<FieldInfo>(infos.size()); for (final FieldInfo info : infos) { final String fname = info.name; if ((!WildcardMatcher.containsWildcard(fname) && includes.contains(fname)) || WildcardMatcher.matchAny(this.includes, fname)) { fi.add(info); } } this.flsFieldInfos = new FieldInfos(fi.toArray(new FieldInfo[0])); } else { this.includes = null; this.flsFieldInfos = null; } if (dlsEnabled) { try { //borrowed from Apache Lucene (Copyright Apache Software Foundation (ASF)) final IndexSearcher searcher = new IndexSearcher(this); searcher.setQueryCache(null); final boolean needsScores = false; final Weight preserveWeight = searcher.createNormalizedWeight(dlsQuery, needsScores); final int maxDoc = in.maxDoc(); final FixedBitSet bits = new FixedBitSet(maxDoc); final Scorer preverveScorer = preserveWeight.scorer(this.getContext()); if (preverveScorer != null) { bits.or(preverveScorer.iterator()); } if (in.hasDeletions()) { final Bits oldLiveDocs = in.getLiveDocs(); assert oldLiveDocs != null; final DocIdSetIterator it = new BitSetIterator(bits, 0L); for (int i = it.nextDoc(); i != DocIdSetIterator.NO_MORE_DOCS; i = it.nextDoc()) { if (!oldLiveDocs.get(i)) { bits.clear(i); } } } this.liveDocs = bits; this.numDocs = bits.cardinality(); } catch (Exception e) { throw new RuntimeException(e); } } else { this.liveDocs = null; this.numDocs = -1; } }
From source file:org.apache.solr.handler.ExportWriter.java
License:Apache License
protected void writeDoc(SortDoc sortDoc, List<LeafReaderContext> leaves, EntryWriter ew) throws IOException { int ord = sortDoc.ord; FixedBitSet set = sets[ord]; set.clear(sortDoc.docId); LeafReaderContext context = leaves.get(ord); int fieldIndex = 0; for (FieldWriter fieldWriter : fieldWriters) { if (fieldWriter.write(sortDoc.docId, context.reader(), ew, fieldIndex)) { ++fieldIndex;/*from w w w .java 2s . com*/ } } }
From source file:org.apache.solr.response.SortingResponseWriter.java
License:Apache License
protected void writeDoc(SortDoc sortDoc, List<AtomicReaderContext> leaves, FieldWriter[] fieldWriters, FixedBitSet[] sets, Writer out) throws IOException { int ord = sortDoc.ord; FixedBitSet set = sets[ord]; set.clear(sortDoc.docId); AtomicReaderContext context = leaves.get(ord); boolean needsComma = false; for (FieldWriter fieldWriter : fieldWriters) { if (needsComma) { out.write(','); }//w w w.j av a2s . c om fieldWriter.write(sortDoc.docId, context.reader(), out); needsComma = true; } }
From source file:org.elasticsearch.common.lucene.search.XBooleanFilter.java
License:Apache License
/** * Returns the a DocIdSetIterator representing the Boolean composition * of the filters that have been added.//from www .j ava 2 s.c o m */ @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { FixedBitSet res = null; final AtomicReader reader = context.reader(); // optimize single case... if (clauses.size() == 1) { FilterClause clause = clauses.get(0); DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs); if (clause.getOccur() == Occur.MUST_NOT) { if (DocIdSets.isEmpty(set)) { return new AllDocIdSet(reader.maxDoc()); } else { return new NotDocIdSet(set, reader.maxDoc()); } } // SHOULD or MUST, just return the set... if (DocIdSets.isEmpty(set)) { return null; } return set; } // first, go over and see if we can shortcut the execution // and gather Bits if we need to List<ResultClause> results = new ArrayList<ResultClause>(clauses.size()); boolean hasShouldClauses = false; boolean hasNonEmptyShouldClause = false; boolean hasMustClauses = false; boolean hasMustNotClauses = false; for (int i = 0; i < clauses.size(); i++) { FilterClause clause = clauses.get(i); DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs); if (clause.getOccur() == Occur.MUST) { hasMustClauses = true; if (DocIdSets.isEmpty(set)) { return null; } } else if (clause.getOccur() == Occur.SHOULD) { hasShouldClauses = true; if (DocIdSets.isEmpty(set)) { continue; } hasNonEmptyShouldClause = true; } else if (clause.getOccur() == Occur.MUST_NOT) { hasMustNotClauses = true; if (DocIdSets.isEmpty(set)) { // we mark empty ones as null for must_not, handle it in the next run... results.add(new ResultClause(null, null, clause)); continue; } } Bits bits = null; if (!DocIdSets.isFastIterator(set)) { bits = set.bits(); } results.add(new ResultClause(set, bits, clause)); } if (hasShouldClauses && !hasNonEmptyShouldClause) { return null; } // now, go over the clauses and apply the "fast" ones first... hasNonEmptyShouldClause = false; boolean hasBits = false; // But first we need to handle the "fast" should clauses, otherwise a should clause can unset docs // that don't match with a must or must_not clause. List<ResultClause> fastOrClauses = new ArrayList<ResultClause>(); for (int i = 0; i < results.size(); i++) { ResultClause clause = results.get(i); // we apply bits in based ones (slow) in the second run if (clause.bits != null) { hasBits = true; continue; } if (clause.clause.getOccur() == Occur.SHOULD) { if (hasMustClauses || hasMustNotClauses) { fastOrClauses.add(clause); } else if (res == null) { DocIdSetIterator it = clause.docIdSet.iterator(); if (it != null) { hasNonEmptyShouldClause = true; res = new FixedBitSet(reader.maxDoc()); res.or(it); } } else { DocIdSetIterator it = clause.docIdSet.iterator(); if (it != null) { hasNonEmptyShouldClause = true; res.or(it); } } } } // Now we safely handle the "fast" must and must_not clauses. for (int i = 0; i < results.size(); i++) { ResultClause clause = results.get(i); // we apply bits in based ones (slow) in the second run if (clause.bits != null) { hasBits = true; continue; } if (clause.clause.getOccur() == Occur.MUST) { DocIdSetIterator it = clause.docIdSet.iterator(); if (it == null) { return null; } if (res == null) { res = new FixedBitSet(reader.maxDoc()); res.or(it); } else { res.and(it); } } else if (clause.clause.getOccur() == Occur.MUST_NOT) { if (res == null) { res = new FixedBitSet(reader.maxDoc()); res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs } if (clause.docIdSet != null) { DocIdSetIterator it = clause.docIdSet.iterator(); if (it != null) { res.andNot(it); } } } } if (!hasBits) { if (!fastOrClauses.isEmpty()) { DocIdSetIterator it = res.iterator(); at_least_one_should_clause_iter: for (int setDoc = it .nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) { for (ResultClause fastOrClause : fastOrClauses) { DocIdSetIterator clauseIterator = fastOrClause.iterator(); if (clauseIterator == null) { continue; } if (iteratorMatch(clauseIterator, setDoc)) { hasNonEmptyShouldClause = true; continue at_least_one_should_clause_iter; } } res.clear(setDoc); } } if (hasShouldClauses && !hasNonEmptyShouldClause) { return null; } else { return res; } } // we have some clauses with bits, apply them... // we let the "res" drive the computation, and check Bits for that List<ResultClause> slowOrClauses = new ArrayList<ResultClause>(); for (int i = 0; i < results.size(); i++) { ResultClause clause = results.get(i); if (clause.bits == null) { continue; } if (clause.clause.getOccur() == Occur.SHOULD) { if (hasMustClauses || hasMustNotClauses) { slowOrClauses.add(clause); } else { if (res == null) { DocIdSetIterator it = clause.docIdSet.iterator(); if (it == null) { continue; } hasNonEmptyShouldClause = true; res = new FixedBitSet(reader.maxDoc()); res.or(it); } else { for (int doc = 0; doc < reader.maxDoc(); doc++) { if (!res.get(doc) && clause.bits.get(doc)) { hasNonEmptyShouldClause = true; res.set(doc); } } } } } else if (clause.clause.getOccur() == Occur.MUST) { if (res == null) { // nothing we can do, just or it... res = new FixedBitSet(reader.maxDoc()); DocIdSetIterator it = clause.docIdSet.iterator(); if (it == null) { return null; } res.or(it); } else { Bits bits = clause.bits; // use the "res" to drive the iteration DocIdSetIterator it = res.iterator(); for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) { if (!bits.get(doc)) { res.clear(doc); } } } } else if (clause.clause.getOccur() == Occur.MUST_NOT) { if (res == null) { res = new FixedBitSet(reader.maxDoc()); res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs DocIdSetIterator it = clause.docIdSet.iterator(); if (it != null) { res.andNot(it); } } else { Bits bits = clause.bits; // let res drive the iteration DocIdSetIterator it = res.iterator(); for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) { if (bits.get(doc)) { res.clear(doc); } } } } } // From a boolean_logic behavior point of view a should clause doesn't have impact on a bool filter if there // is already a must or must_not clause. However in the current ES bool filter behaviour at least one should // clause must match in order for a doc to be a match. What we do here is checking if matched docs match with // any should filter. TODO: Add an option to have disable minimum_should_match=1 behaviour if (!slowOrClauses.isEmpty() || !fastOrClauses.isEmpty()) { DocIdSetIterator it = res.iterator(); at_least_one_should_clause_iter: for (int setDoc = it .nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) { for (ResultClause fastOrClause : fastOrClauses) { DocIdSetIterator clauseIterator = fastOrClause.iterator(); if (clauseIterator == null) { continue; } if (iteratorMatch(clauseIterator, setDoc)) { hasNonEmptyShouldClause = true; continue at_least_one_should_clause_iter; } } for (ResultClause slowOrClause : slowOrClauses) { if (slowOrClause.bits.get(setDoc)) { hasNonEmptyShouldClause = true; continue at_least_one_should_clause_iter; } } res.clear(setDoc); } } if (hasShouldClauses && !hasNonEmptyShouldClause) { return null; } else { return res; } }
From source file:org.elasticsearch.common.lucene.uid.VersionLookupTests.java
License:Apache License
/** * test version lookup with two documents matching the ID *///from w w w. j a va2 s. c om public void testTwoDocuments() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); Document doc = new Document(); doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer, false); LeafReaderContext segment = reader.leaves().get(0); PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader()); // return the last doc when there are duplicates DocIdAndVersion result = lookup.lookup(new BytesRef("6"), null, segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(1, result.docId); // delete the first doc only FixedBitSet live = new FixedBitSet(2); live.set(1); result = lookup.lookup(new BytesRef("6"), live, segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(1, result.docId); // delete the second doc only live.clear(1); live.set(0); result = lookup.lookup(new BytesRef("6"), live, segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(0, result.docId); // delete both docs assertNull(lookup.lookup(new BytesRef("6"), new Bits.MatchNoBits(2), segment)); reader.close(); writer.close(); dir.close(); }
From source file:org.elasticsearch.search.aggregations.metrics.RedBlackTreeTests.java
License:Apache License
public void testRemove() { final int numValues = scaledRandomIntBetween(200, 1000); final FixedBitSet values = new FixedBitSet(numValues); values.set(0, numValues);/* ww w . ja va 2 s . c om*/ IntRedBlackTree tree = new IntRedBlackTree(); for (int i = 0; i < numValues; ++i) { tree.add(i); } final int iters = scaledRandomIntBetween(300, 1000); for (int i = 0; i < iters; ++i) { final int value = randomInt(numValues - 1); final boolean removed = tree.remove(value); assertEquals(removed, values.get(value)); values.clear(value); assertEquals(values.cardinality(), tree.size()); tree.assertConsistent(); } int size = 0; int previousValue = Integer.MIN_VALUE; for (IntCursor cursor : tree) { ++size; final int value = tree.values[cursor.value]; assertTrue(previousValue < value); assertTrue(values.get(value)); previousValue = value; } assertEquals(values.cardinality(), size); }
From source file:org.opengrok.suggest.query.customized.CustomSloppyPhraseScorer.java
License:Apache License
/** pp was just advanced. If that caused a repeater collision, resolve by advancing the lesser * of the two colliding pps. Note that there can only be one collision, as by the initialization * there were no collisions before pp was advanced. */ private boolean advanceRpts(PhrasePositions pp) throws IOException { if (pp.rptGroup < 0) { return true; // not a repeater }/* ww w . j a v a 2 s . co m*/ PhrasePositions[] rg = rptGroups[pp.rptGroup]; FixedBitSet bits = new FixedBitSet(rg.length); // for re-queuing after collisions are resolved int k0 = pp.rptInd; int k; while ((k = collide(pp)) >= 0) { pp = lesser(pp, rg[k]); // always advance the lesser of the (only) two colliding pps if (!advancePP(pp)) { return false; // exhausted } if (k != k0) { // careful: mark only those currently in the queue bits = FixedBitSet.ensureCapacity(bits, k); bits.set(k); // mark that pp2 need to be re-queued } } // collisions resolved, now re-queue // empty (partially) the queue until seeing all pps advanced for resolving collisions int n = 0; // TODO would be good if we can avoid calling cardinality() in each iteration! int numBits = bits.length(); // larges bit we set while (bits.cardinality() > 0) { PhrasePositions pp2 = pq.pop(); rptStack[n++] = pp2; if (pp2.rptGroup >= 0 && pp2.rptInd < numBits // this bit may not have been set && bits.get(pp2.rptInd)) { bits.clear(pp2.rptInd); } } // add back to queue for (int i = n - 1; i >= 0; i--) { pq.add(rptStack[i]); } return true; }
From source file:tech.beshu.ror.es.security.DocumentFilterReader.java
License:Open Source License
private DocumentFilterReader(LeafReader reader, Query query) throws IOException { super(reader); final IndexSearcher searcher = new IndexSearcher(this); searcher.setQueryCache(null);//from w w w .ja v a2 s.c om final boolean needsScores = false; final Weight preserveWeight = searcher.createWeight(query, needsScores, 0); final int maxDoc = this.in.maxDoc(); final FixedBitSet bits = new FixedBitSet(maxDoc); final Scorer preserveScorer = preserveWeight.scorer(this.getContext()); if (preserveScorer != null) { bits.or(preserveScorer.iterator()); } if (in.hasDeletions()) { final Bits oldLiveDocs = in.getLiveDocs(); assert oldLiveDocs != null; final DocIdSetIterator it = new BitSetIterator(bits, 0L); for (int i = it.nextDoc(); i != DocIdSetIterator.NO_MORE_DOCS; i = it.nextDoc()) { if (!oldLiveDocs.get(i)) { bits.clear(i); } } } this.liveDocs = bits; this.numDocs = bits.cardinality(); }