Example usage for org.apache.lucene.index TermsEnum seekExact

List of usage examples for org.apache.lucene.index TermsEnum seekExact

Introduction

In this page you can find the example usage for org.apache.lucene.index TermsEnum seekExact.

Prototype

public abstract void seekExact(long ord) throws IOException;

Source Link

Document

Seeks to the specified term by ordinal (position) as previously returned by #ord .

Usage

From source file:com.github.flaxsearch.util.ReaderManager.java

License:Apache License

default TermsEnum findTermPostings(Integer segment, String field, String term) throws IOException {

    Fields fields = getFields(segment);//from   w  w  w  .  java2 s.  c om
    Terms terms = fields.terms(field);

    if (terms == null) {
        String msg = String.format("No field %s", field);
        throw new WebApplicationException(msg, Response.Status.NOT_FOUND);
    }

    TermsEnum te = terms.iterator();

    assert (term != null);
    if (!te.seekExact(new BytesRef(term))) {
        String msg = String.format("No term %s on field %s", term, field);
        throw new WebApplicationException(msg, Response.Status.NOT_FOUND);
    }

    return te;
}

From source file:com.joliciel.jochre.search.highlight.LuceneQueryHighlighter.java

License:Open Source License

public Map<Integer, Set<HighlightTerm>> highlight(Set<Integer> docIds, Set<String> fields) {
    try {/*from  ww  w  . ja  va 2  s.c om*/
        Map<Integer, Set<HighlightTerm>> termMap = new HashMap<Integer, Set<HighlightTerm>>();
        Map<Integer, Document> idToDocMap = new HashMap<Integer, Document>();
        Map<Integer, CoordinateStorage> idToCoordinateStorageMap = new HashMap<Integer, CoordinateStorage>();

        Map<Integer, Set<Integer>> myLeaves = new HashMap<Integer, Set<Integer>>();
        for (int docId : docIds) {
            Document luceneDoc = indexSearcher.doc(docId);
            idToDocMap.put(docId, luceneDoc);
            JochreIndexDocument jochreDoc = searchService.getJochreIndexDocument(indexSearcher, docId);
            idToCoordinateStorageMap.put(docId, jochreDoc.getCoordinateStorage());
            termMap.put(docId, new TreeSet<HighlightTerm>());
            int leaf = ReaderUtil.subIndex(docId, leaves);
            Set<Integer> docsPerLeaf = myLeaves.get(leaf);
            if (docsPerLeaf == null) {
                docsPerLeaf = new HashSet<Integer>();
                myLeaves.put(leaf, docsPerLeaf);
            }
            docsPerLeaf.add(docId);
        }

        for (int leaf : myLeaves.keySet()) {
            if (LOG.isTraceEnabled())
                LOG.trace("Searching leaf " + leaf);
            Set<Integer> docsPerLeaf = myLeaves.get(leaf);
            AtomicReaderContext subContext = leaves.get(leaf);
            AtomicReader atomicReader = subContext.reader();

            int fieldCounter = 0;
            for (String field : fields) {
                fieldCounter++;
                if (LOG.isTraceEnabled())
                    LOG.trace("Field " + fieldCounter + ": " + field);

                Terms atomicReaderTerms = atomicReader.terms(field);
                if (atomicReaderTerms == null) {
                    continue; // nothing to do
                }
                TermsEnum termsEnum = atomicReaderTerms.iterator(TermsEnum.EMPTY);

                int termCounter = 0;
                for (BytesRef term : terms) {
                    termCounter++;
                    if (LOG.isTraceEnabled())
                        LOG.trace("Searching for term " + termCounter + ": " + term.utf8ToString()
                                + " in field " + field);

                    if (!termsEnum.seekExact(term)) {
                        continue; // term not found
                    }

                    DocsAndPositionsEnum docPosEnum = termsEnum.docsAndPositions(null, null,
                            DocsAndPositionsEnum.FLAG_OFFSETS);
                    int relativeDocId = docPosEnum.nextDoc();
                    while (relativeDocId != DocsAndPositionsEnum.NO_MORE_DOCS) {
                        int docId = subContext.docBase + relativeDocId;
                        if (docsPerLeaf.contains(docId)) {
                            Document doc = idToDocMap.get(docId);
                            Set<HighlightTerm> highlightTerms = termMap.get(docId);
                            //Retrieve the term frequency in the current document
                            int freq = docPosEnum.freq();
                            if (LOG.isTraceEnabled()) {
                                String extId = doc.get("id");
                                String path = doc.get("path");
                                LOG.trace("Found " + freq + " matches for doc " + docId + ", extId: " + extId
                                        + ", path: " + path);
                            }

                            for (int i = 0; i < freq; i++) {
                                int position = docPosEnum.nextPosition();
                                int start = docPosEnum.startOffset();
                                int end = docPosEnum.endOffset();

                                if (LOG.isTraceEnabled())
                                    LOG.trace("Found match " + position + " at docId " + docId + ", field "
                                            + field + " start=" + start + ", end=" + end);

                                CoordinateStorage coordinateStorage = idToCoordinateStorageMap.get(docId);
                                int imageIndex = coordinateStorage.getImageIndex(start);
                                int pageIndex = coordinateStorage.getPageIndex(start);

                                HighlightTerm highlightTerm = new HighlightTerm(docId, field, start, end,
                                        imageIndex, pageIndex);
                                highlightTerm.setWeight(this.weigh(term));
                                if (highlightTerm.getWeight() > 0)
                                    highlightTerms.add(highlightTerm);
                            }
                        }
                        relativeDocId = docPosEnum.nextDoc();
                    }
                } // next term
            } // next field
        } // next index leaf to search

        return termMap;
    } catch (IOException e) {
        LogUtils.logError(LOG, e);
        throw new RuntimeException(e);
    }
}

From source file:com.meizu.nlp.classification.BooleanPerceptronClassifier.java

License:Apache License

private void updateWeights(LeafReader leafReader, int docId, Boolean assignedClass,
        SortedMap<String, Double> weights, double modifier, boolean updateFST) throws IOException {
    TermsEnum cte = textTerms.iterator();

    // get the doc term vectors
    Terms terms = leafReader.getTermVector(docId, textFieldName);

    if (terms == null) {
        throw new IOException("term vectors must be stored for field " + textFieldName);
    }/* w ww  .  j av a  2  s.c  om*/

    TermsEnum termsEnum = terms.iterator();

    BytesRef term;

    while ((term = termsEnum.next()) != null) {
        cte.seekExact(term);
        if (assignedClass != null) {
            long termFreqLocal = termsEnum.totalTermFreq();
            // update weights
            Long previousValue = Util.get(fst, term);
            String termString = term.utf8ToString();
            weights.put(termString, previousValue + modifier * termFreqLocal);
        }
    }
    if (updateFST) {
        updateFST(weights);
    }
}

From source file:com.rocana.lucene.codec.v1.RocanaBasePostingsFormatTestCase.java

License:Apache License

@Override
public void testInvertedWrite() throws Exception {
    Directory dir = newDirectory();// w  w w . j  a v  a2s .  c o  m
    MockAnalyzer analyzer = new MockAnalyzer(random());
    analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));
    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);

    // Must be concurrent because thread(s) can be merging
    // while up to one thread flushes, and each of those
    // threads iterates over the map while the flushing
    // thread might be adding to it:
    final Map<String, TermFreqs> termFreqs = new ConcurrentHashMap<>();

    final AtomicLong sumDocFreq = new AtomicLong();
    final AtomicLong sumTotalTermFreq = new AtomicLong();

    // TODO: would be better to use / delegate to the current
    // Codec returned by getCodec()

    iwc.setCodec(new AssertingCodec() {
        @Override
        public PostingsFormat getPostingsFormatForField(String field) {

            PostingsFormat p = getCodec().postingsFormat();
            if (p instanceof PerFieldPostingsFormat) {
                p = ((PerFieldPostingsFormat) p).getPostingsFormatForField(field);
            }
            if (p instanceof RocanaPerFieldPostingsFormat) {
                p = ((RocanaPerFieldPostingsFormat) p).getPostingsFormatForField(field);
            }
            final PostingsFormat defaultPostingsFormat = p;

            final Thread mainThread = Thread.currentThread();

            if (field.equals("body")) {

                // A PF that counts up some stats and then in
                // the end we verify the stats match what the
                // final IndexReader says, just to exercise the
                // new freedom of iterating the postings more
                // than once at flush/merge:

                return new PostingsFormat(defaultPostingsFormat.getName()) {

                    @Override
                    public FieldsConsumer fieldsConsumer(final SegmentWriteState state) throws IOException {

                        final FieldsConsumer fieldsConsumer = defaultPostingsFormat.fieldsConsumer(state);

                        return new FieldsConsumer() {
                            @Override
                            public void write(Fields fields) throws IOException {
                                fieldsConsumer.write(fields);

                                boolean isMerge = state.context.context == IOContext.Context.MERGE;

                                // We only use one thread for flushing
                                // in this test:
                                assert isMerge || Thread.currentThread() == mainThread;

                                // We iterate the provided TermsEnum
                                // twice, so we excercise this new freedom
                                // with the inverted API; if
                                // addOnSecondPass is true, we add up
                                // term stats on the 2nd iteration:
                                boolean addOnSecondPass = random().nextBoolean();

                                //System.out.println("write isMerge=" + isMerge + " 2ndPass=" + addOnSecondPass);

                                // Gather our own stats:
                                Terms terms = fields.terms("body");
                                assert terms != null;

                                TermsEnum termsEnum = terms.iterator();
                                PostingsEnum docs = null;
                                while (termsEnum.next() != null) {
                                    BytesRef term = termsEnum.term();
                                    // TODO: also sometimes ask for payloads/offsets?
                                    boolean noPositions = random().nextBoolean();
                                    if (noPositions) {
                                        docs = termsEnum.postings(docs, PostingsEnum.FREQS);
                                    } else {
                                        docs = termsEnum.postings(null, PostingsEnum.POSITIONS);
                                    }
                                    int docFreq = 0;
                                    long totalTermFreq = 0;
                                    while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
                                        docFreq++;
                                        totalTermFreq += docs.freq();
                                        int limit = TestUtil.nextInt(random(), 1, docs.freq());
                                        if (!noPositions) {
                                            for (int i = 0; i < limit; i++) {
                                                docs.nextPosition();
                                            }
                                        }
                                    }

                                    String termString = term.utf8ToString();

                                    // During merge we should only see terms
                                    // we had already seen during a
                                    // previous flush:
                                    assertTrue(isMerge == false || termFreqs.containsKey(termString));

                                    if (isMerge == false) {
                                        if (addOnSecondPass == false) {
                                            TermFreqs tf = termFreqs.get(termString);
                                            if (tf == null) {
                                                tf = new TermFreqs();
                                                termFreqs.put(termString, tf);
                                            }
                                            tf.docFreq += docFreq;
                                            tf.totalTermFreq += totalTermFreq;
                                            sumDocFreq.addAndGet(docFreq);
                                            sumTotalTermFreq.addAndGet(totalTermFreq);
                                        } else if (termFreqs.containsKey(termString) == false) {
                                            // Add placeholder (2nd pass will
                                            // set its counts):
                                            termFreqs.put(termString, new TermFreqs());
                                        }
                                    }
                                }

                                // Also test seeking the TermsEnum:
                                for (String term : termFreqs.keySet()) {
                                    if (termsEnum.seekExact(new BytesRef(term))) {
                                        // TODO: also sometimes ask for payloads/offsets?
                                        boolean noPositions = random().nextBoolean();
                                        if (noPositions) {
                                            docs = termsEnum.postings(docs, PostingsEnum.FREQS);
                                        } else {
                                            docs = termsEnum.postings(null, PostingsEnum.POSITIONS);
                                        }

                                        int docFreq = 0;
                                        long totalTermFreq = 0;
                                        while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
                                            docFreq++;
                                            totalTermFreq += docs.freq();
                                            int limit = TestUtil.nextInt(random(), 1, docs.freq());
                                            if (!noPositions) {
                                                for (int i = 0; i < limit; i++) {
                                                    docs.nextPosition();
                                                }
                                            }
                                        }

                                        if (isMerge == false && addOnSecondPass) {
                                            TermFreqs tf = termFreqs.get(term);
                                            assert tf != null;
                                            tf.docFreq += docFreq;
                                            tf.totalTermFreq += totalTermFreq;
                                            sumDocFreq.addAndGet(docFreq);
                                            sumTotalTermFreq.addAndGet(totalTermFreq);
                                        }

                                        //System.out.println("  term=" + term + " docFreq=" + docFreq + " ttDF=" + termToDocFreq.get(term));
                                        assertTrue(docFreq <= termFreqs.get(term).docFreq);
                                        assertTrue(totalTermFreq <= termFreqs.get(term).totalTermFreq);
                                    }
                                }

                                // Also test seekCeil
                                for (int iter = 0; iter < 10; iter++) {
                                    BytesRef term = new BytesRef(
                                            TestUtil.randomRealisticUnicodeString(random()));
                                    SeekStatus status = termsEnum.seekCeil(term);
                                    if (status == SeekStatus.NOT_FOUND) {
                                        assertTrue(term.compareTo(termsEnum.term()) < 0);
                                    }
                                }
                            }

                            @Override
                            public void close() throws IOException {
                                fieldsConsumer.close();
                            }
                        };
                    }

                    @Override
                    public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
                        return defaultPostingsFormat.fieldsProducer(state);
                    }
                };
            } else {
                return defaultPostingsFormat;
            }
        }
    });

    RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

    LineFileDocs docs = new LineFileDocs(random());
    int bytesToIndex = atLeast(100) * 1024;
    int bytesIndexed = 0;
    while (bytesIndexed < bytesToIndex) {
        Document doc = docs.nextDoc();
        w.addDocument(doc);
        bytesIndexed += RamUsageTester.sizeOf(doc);
    }

    IndexReader r = w.getReader();
    w.close();

    Terms terms = MultiFields.getTerms(r, "body");
    assertEquals(sumDocFreq.get(), terms.getSumDocFreq());
    assertEquals(sumTotalTermFreq.get(), terms.getSumTotalTermFreq());

    TermsEnum termsEnum = terms.iterator();
    long termCount = 0;
    boolean supportsOrds = true;
    while (termsEnum.next() != null) {
        BytesRef term = termsEnum.term();
        assertEquals(termFreqs.get(term.utf8ToString()).docFreq, termsEnum.docFreq());
        assertEquals(termFreqs.get(term.utf8ToString()).totalTermFreq, termsEnum.totalTermFreq());
        if (supportsOrds) {
            long ord;
            try {
                ord = termsEnum.ord();
            } catch (UnsupportedOperationException uoe) {
                supportsOrds = false;
                ord = -1;
            }
            if (ord != -1) {
                assertEquals(termCount, ord);
            }
        }
        termCount++;
    }
    assertEquals(termFreqs.size(), termCount);

    r.close();
    dir.close();
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Exception {
    TermsEnum leftEnum = null;
    TermsEnum rightEnum = null;//  w  w  w .  j  a  v  a 2s  .c o m

    // just an upper bound
    int numTests = atLeast(20);
    Random random = random();

    // collect this number of terms from the left side
    HashSet<BytesRef> tests = new HashSet<>();
    int numPasses = 0;
    while (numPasses < 10 && tests.size() < numTests) {
        leftEnum = leftTerms.iterator();
        BytesRef term = null;
        while ((term = leftEnum.next()) != null) {
            int code = random.nextInt(10);
            if (code == 0) {
                // the term
                tests.add(BytesRef.deepCopyOf(term));
            } else if (code == 1) {
                // truncated subsequence of term
                term = BytesRef.deepCopyOf(term);
                if (term.length > 0) {
                    // truncate it
                    term.length = random.nextInt(term.length);
                }
            } else if (code == 2) {
                // term, but ensure a non-zero offset
                byte newbytes[] = new byte[term.length + 5];
                System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
                tests.add(new BytesRef(newbytes, 5, term.length));
            }
        }
        numPasses++;
    }

    ArrayList<BytesRef> shuffledTests = new ArrayList<>(tests);
    Collections.shuffle(shuffledTests, random);

    for (BytesRef b : shuffledTests) {
        leftEnum = leftTerms.iterator();
        rightEnum = rightTerms.iterator();

        assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));
        assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));

        SeekStatus leftStatus;
        SeekStatus rightStatus;

        leftStatus = leftEnum.seekCeil(b);
        rightStatus = rightEnum.seekCeil(b);
        assertEquals(leftStatus, rightStatus);
        if (leftStatus != SeekStatus.END) {
            assertEquals(leftEnum.term(), rightEnum.term());
        }

        leftStatus = leftEnum.seekCeil(b);
        rightStatus = rightEnum.seekCeil(b);
        assertEquals(leftStatus, rightStatus);
        if (leftStatus != SeekStatus.END) {
            assertEquals(leftEnum.term(), rightEnum.term());
        }
    }
}

From source file:com.tcdi.zombodb.query.VisibilityQueryHelper.java

License:Apache License

private static boolean isCommitted(TermsEnum termsEnum, long xid, BytesRefBuilder builder) throws IOException {
    if (KNOWN_COMMITTED_XIDS.contains(xid))
        return true;

    if (termsEnum == null)
        return false;

    NumericUtils.longToPrefixCoded(xid, 0, builder);
    boolean isCommitted = termsEnum.seekExact(builder.toBytesRef());

    if (isCommitted)
        KNOWN_COMMITTED_XIDS.add(xid);/* w  w  w . j a  va  2s.  c  o  m*/

    builder.clear();
    return isCommitted;
}

From source file:de.unihildesheim.iw.lucene.index.FDRIndexDataProvider.java

License:Open Source License

@SuppressFBWarnings("EXS_EXCEPTION_SOFTENING_NO_CONSTRAINTS")
@Override//from w  w w  . j  av a2  s.  c  o m
public long getTermFrequency(@NotNull final BytesRef term) {
    // try get a cached value first
    @Nullable
    Long tf = this.cache_tf.get(term);
    if (tf == null) {
        tf = 0L;
        for (final LeafReaderContext lrc : this.index.reader.leaves()) {
            final LeafReader r = lrc.reader();
            long fieldTf = 0L;
            if (r.numDocs() > 0) {
                try {
                    for (final String s : r.fields()) {
                        @Nullable
                        final Terms terms = r.terms(s);
                        if (terms != null) {
                            final TermsEnum termsEnum = terms.iterator(null);
                            if (termsEnum.seekExact(term)) {
                                fieldTf += termsEnum.totalTermFreq();
                            }
                        }
                    }
                } catch (final IOException e) {
                    throw new UncheckedIOException(e);
                }
            }
            tf += fieldTf;
        }
        this.cache_tf.put(BytesRef.deepCopyOf(term), tf);
    }

    return tf;
}

From source file:de.unihildesheim.iw.lucene.index.FDRIndexDataProvider.java

License:Open Source License

@SuppressFBWarnings("EXS_EXCEPTION_SOFTENING_NO_CONSTRAINTS")
@Override/*from w w w  .j  ava  2 s.  c om*/
public int getDocumentFrequency(@NotNull final BytesRef term) {
    Integer df = this.cache_df.get(term);
    if (df == null) {
        df = this.index.reader.leaves().stream().map(LeafReaderContext::reader).filter(r -> r.numDocs() > 0)
                .mapToInt(r -> {
                    try {
                        return StreamSupport.stream(r.fields().spliterator(), false).mapToInt(f -> {
                            try {
                                @Nullable
                                final Terms terms = r.terms(f);
                                if (terms == null) {
                                    return 0;
                                }
                                final TermsEnum termsEnum = terms.iterator(null);
                                return termsEnum.seekExact(term) ? termsEnum.docFreq() : 0;
                            } catch (final IOException e) {
                                throw new UncheckedIOException(e);
                            }
                        }).max().orElse(0);
                    } catch (final IOException e) {
                        throw new UncheckedIOException(e);
                    }
                }).sum();
        this.cache_df.put(BytesRef.deepCopyOf(term), df);
    }
    return df;
}

From source file:de.unihildesheim.iw.lucene.index.FilteredDirectoryReaderTest.java

License:Open Source License

/**
 * Test basic {@link TermFilter} usage./*from  ww w .ja  v a 2s  . c o m*/
 *
 * @throws Exception
 */
@SuppressWarnings({ "AnonymousInnerClassMayBeStatic", "ImplicitNumericConversion" })
@Test
public void testBuilder_termFilter() throws Exception {
    try (TestMemIndex idx = new TestMemIndex(Index.PLAIN)) {
        final String skipTerm = "first";
        final DirectoryReader reader = DirectoryReader.open(idx.dir);
        final FilteredDirectoryReader fReader = new Builder(reader).termFilter(new TermFilter() {
            @Override
            public boolean isAccepted(@Nullable final TermsEnum termsEnum, @NotNull final BytesRef term) {
                return !skipTerm.equals(term.utf8ToString());
            }
        }).build();

        new LeafReaderInstanceTest() {

            @Override
            void testHasDeletions() throws Exception {
                Assert.assertFalse("Reader has deletions.", fReader.hasDeletions());
            }

            @Override
            void testFieldCount() throws Exception {
                Assert.assertEquals("Field count mismatch.", idx.flds.size(), fReader.getFields().size());
            }

            @Override
            void testFieldNames() throws Exception {
                Assert.assertTrue("Visible field not found.", fReader.getFields().containsAll(idx.flds));
            }

            @Override
            void testTotalTermFreq() throws Exception {
                Assert.assertEquals("TotalTermFreq mismatch for visible term.", idx.docs,
                        fReader.totalTermFreq(new Term("f1", "field")));
                Assert.assertEquals("TotalTermFreq mismatch for missing term.", 0L,
                        fReader.totalTermFreq(new Term("f1", "foo")));
                Assert.assertEquals("TotalTermFreq mismatch for hidden term.", 0L,
                        fReader.totalTermFreq(new Term("f1", "first")));
            }

            @Override
            void testSumTotalTermFreq() throws Exception {
                Assert.assertEquals("SumTotalTermFreq mismatch for visible term.", 14L,
                        fReader.getSumTotalTermFreq("f1"));
            }

            @Override
            void testDocCount() throws Exception {
                Assert.assertEquals("Doc count mismatch.", idx.docs, fReader.getDocCount("f1"));
            }

            @SuppressWarnings("ObjectAllocationInLoop")
            @Override
            void testDocFreq() throws Exception {
                for (final String f : idx.flds) {
                    Assert.assertEquals("Missing term from all documents.", idx.docs,
                            fReader.docFreq(new Term(f, "value")));
                    Assert.assertEquals("Found hidden term.", 0L, fReader.docFreq(new Term(f, "first")));
                }
            }

            @Override
            void testSumDocFreq() throws Exception {
                Assert.assertEquals("SumDocFreq mismatch for visible term.", 14L, fReader.getSumDocFreq("f1"));
            }

            @Override
            void testTermVectors() throws Exception {
                final BytesRef term = new BytesRef("first");
                for (int i = 0; i < idx.docs - 1; i++) {
                    final Fields f = fReader.getTermVectors(i);
                    Assert.assertEquals("Too much fields retrieved from TermVector.", 1L, f.size());
                    final TermsEnum te = f.terms("f1").iterator(null);
                    Assert.assertFalse("Hidden term found.", te.seekExact(term));
                }
            }

            @Override
            void testNumDocs() throws Exception {
                Assert.assertEquals("NumDocs mismatch.", idx.docs, fReader.numDocs());
            }

            @Override
            void testMaxDoc() throws Exception {
                Assert.assertEquals("MaxDoc mismatch.", idx.docs, fReader.maxDoc());
            }
        };
    }
}

From source file:de.unihildesheim.iw.lucene.index.FilteredDirectoryReaderTest.java

License:Open Source License

/**
 * Test {@link Filter} usage in combination with {@link TermFilter}
 * restriction./*from   w  w w  .  j a v a 2  s .com*/
 *
 * @throws Exception
 */
@SuppressWarnings({ "AnonymousInnerClassMayBeStatic", "ImplicitNumericConversion" })
@Test
public void testBuilder_filter_and_termFilter() throws Exception {
    try (TestMemIndex idx = new TestMemIndex(Index.ALL_FIELDS)) {
        final String skipTerm = "document2field3";
        final Query q = new TermQuery(new Term("f1", "document2field1"));
        final Filter f = new QueryWrapperFilter(q);
        final DirectoryReader reader = DirectoryReader.open(idx.dir);
        final FilteredDirectoryReader fReader = new Builder(reader).queryFilter(f).termFilter(new TermFilter() {
            @Override
            public boolean isAccepted(@Nullable final TermsEnum termsEnum, @NotNull final BytesRef term) {
                return !skipTerm.equals(term.utf8ToString());
            }
        }).build();

        new LeafReaderInstanceTest() {

            @Override
            void testHasDeletions() throws Exception {
                Assert.assertFalse("Reader has deletions.", fReader.hasDeletions());
            }

            @Override
            void testFieldCount() throws Exception {
                Assert.assertEquals("Field count mismatch.", 3L, fReader.getFields().size());
            }

            @Override
            void testFieldNames() throws Exception {
                for (final String fld : idx.flds) {
                    Assert.assertTrue("Visible field not found.", fReader.getFields().contains(fld));
                }
            }

            @Override
            void testTotalTermFreq() throws Exception {
                Assert.assertEquals("TotalTermFreq mismatch for visible term.", 1L,
                        fReader.totalTermFreq(new Term("f1", "field1")));
                Assert.assertEquals("TotalTermFreq mismatch for visible term.", 1L,
                        fReader.totalTermFreq(new Term("f2", "field2")));
                Assert.assertEquals("TotalTermFreq mismatch for visible term.", 1L,
                        fReader.totalTermFreq(new Term("f3", "field3")));
                Assert.assertEquals("TotalTermFreq mismatch for hidden term.", 0L,
                        fReader.totalTermFreq(new Term("f3", "document2field3")));
            }

            @Override
            void testSumTotalTermFreq() throws Exception {
                Assert.assertEquals("SumTotalTermFreq mismatch for visible terms.", 6L,
                        fReader.getSumTotalTermFreq("f2"));
            }

            @Override
            void testDocCount() throws Exception {
                for (final String fld : idx.flds) {
                    Assert.assertEquals("Doc count mismatch.", 1L, fReader.getDocCount(fld));
                }
            }

            @SuppressWarnings("ObjectAllocationInLoop")
            @Override
            void testDocFreq() throws Exception {
                Assert.assertEquals("Missing term from visible document.", 1L,
                        fReader.docFreq(new Term("f2", "value")));
                Assert.assertEquals("Hidden term found.", 0L,
                        fReader.docFreq(new Term("f1", "document1field1")));
                Assert.assertEquals("Hidden term found.", 0L,
                        fReader.docFreq(new Term("f3", "document2field3")));
            }

            @Override
            void testSumDocFreq() throws Exception {
                Assert.assertEquals("SumDocFreq mismatch for visible term.", 6L, fReader.getSumDocFreq("f2"));
                Assert.assertEquals("SumDocFreq mismatch for visible term.", 5L, fReader.getSumDocFreq("f3"));
            }

            @Override
            void testTermVectors() throws Exception {
                boolean match = false;
                final BytesRef term = new BytesRef(skipTerm);
                for (int i = 0; i < fReader.maxDoc(); i++) {
                    final Fields fld = fReader.getTermVectors(i);
                    if (fld != null) {
                        match = true;
                        Assert.assertEquals("Number of fields retrieved from TermVector do not match.", 3L,
                                fld.size());
                        final Terms t = fld.terms("f3");
                        if (t != null) {
                            final TermsEnum te = t.iterator(null);
                            Assert.assertFalse("Hidden term found.", te.seekExact(term));
                        }
                    }
                }
                Assert.assertTrue("Fields not found.", match);
            }

            @Override
            void testNumDocs() throws Exception {
                Assert.assertEquals("NumDocs mismatch.", 1L, fReader.numDocs());
            }

            @Override
            void testMaxDoc() throws Exception {
                Assert.assertEquals("MaxDoc mismatch.", 2L, fReader.maxDoc());
            }
        };
    }
}