Example usage for org.apache.lucene.index IndexReader leaves

List of usage examples for org.apache.lucene.index IndexReader leaves

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader leaves.

Prototype

public final List<LeafReaderContext> leaves() 

Source Link

Document

Returns the reader's leaves, or itself if this reader is atomic.

Usage

From source file:org.apache.blur.lucene.security.IndexSearcherTest.java

License:Apache License

private void validate(int expected, int leafCount, Collection<String> readAuthorizations,
        Collection<String> discoverAuthorizations, Collection<String> discoverableFields, Directory dir,
        IndexReader reader) throws IOException {
    List<AtomicReaderContext> leaves = reader.leaves();
    assertEquals(leafCount, leaves.size());
    SecureIndexSearcher searcher = new SecureIndexSearcher(reader, getAccessControlFactory(),
            readAuthorizations, discoverAuthorizations, toSet(discoverableFields));
    TopDocs topDocs;// ww w .  j a v  a  2  s  .  c o m
    Query query = new MatchAllDocsQuery();
    {
        topDocs = searcher.search(query, 10);
        assertEquals(expected, topDocs.totalHits);
    }
    DocumentAuthorizations readDocumentAuthorizations = new DocumentAuthorizations(readAuthorizations);
    DocumentAuthorizations discoverDocumentAuthorizations = new DocumentAuthorizations(discoverAuthorizations);
    DocumentVisibilityEvaluator readVisibilityEvaluator = new DocumentVisibilityEvaluator(
            readDocumentAuthorizations);
    DocumentVisibilityEvaluator discoverVisibilityEvaluator = new DocumentVisibilityEvaluator(
            discoverDocumentAuthorizations);
    for (int i = 0; i < topDocs.totalHits & i < topDocs.scoreDocs.length; i++) {
        Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
        String read = doc.get("_read_");
        String discover = doc.get("_discover_");
        if (read != null && discover != null) {
            DocumentVisibility readVisibility = new DocumentVisibility(read);
            DocumentVisibility discoverVisibility = new DocumentVisibility(discover);
            assertTrue(readVisibilityEvaluator.evaluate(readVisibility)
                    || discoverVisibilityEvaluator.evaluate(discoverVisibility));
        } else if (read != null) {
            DocumentVisibility readVisibility = new DocumentVisibility(read);
            assertTrue(readVisibilityEvaluator.evaluate(readVisibility));
        } else if (discover != null) {
            DocumentVisibility discoverVisibility = new DocumentVisibility(discover);
            assertTrue(discoverVisibilityEvaluator.evaluate(discoverVisibility));
            // Since this document is only discoverable validate fields that are
            // being returned.
            validateDiscoverFields(doc, discoverableFields);
        } else {
            fail("Should not fetch empty document.");
        }
    }
    searcher.search(query, new Collector() {

        @Override
        public void setScorer(Scorer scorer) throws IOException {
        }

        @Override
        public void setNextReader(AtomicReaderContext context) throws IOException {
            assertTrue(context.reader() instanceof SecureAtomicReader);
        }

        @Override
        public void collect(int doc) throws IOException {

        }

        @Override
        public boolean acceptsDocsOutOfOrder() {
            return false;
        }
    });
}

From source file:org.apache.solr.core.TestMergePolicyConfig.java

License:Apache License

/**
 * Given an IndexReader, asserts that there is at least one AtomcReader leaf,
 * and that all AtomicReader leaves are SegmentReader's that have a compound 
 * file status that matches the expected input.
 *///from   w  ww .  j a v  a 2 s .  c  o  m
private static void assertCompoundSegments(IndexReader reader, boolean compound) {

    assertNotNull("Null leaves", reader.leaves());
    assertTrue("no leaves", 0 < reader.leaves().size());

    for (AtomicReaderContext atomic : reader.leaves()) {
        assertTrue("not a segment reader: " + atomic.reader().toString(),
                atomic.reader() instanceof SegmentReader);

        assertEquals("Compound status incorrect for: " + atomic.reader().toString(), compound,
                ((SegmentReader) atomic.reader()).getSegmentInfo().info.getUseCompoundFile());
    }
}

From source file:org.apache.solr.response.transform.ValueSourceAugmenter.java

License:Apache License

@Override
public void setContext(TransformContext context) {
    try {//from  w  w w  . j a  v  a2s .  c o m
        IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
        readerContexts = reader.leaves();
        docValuesArr = new FunctionValues[readerContexts.size()];

        searcher = qparser.getReq().getSearcher();
        fcontext = ValueSource.newContext(searcher);
        this.valueSource.createWeight(fcontext, searcher);
    } catch (IOException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
    }
}

From source file:org.apache.solr.schema.TestPointFields.java

License:Apache License

private void doTestInternals(String field, String[] values) throws IOException {
    assertTrue(h.getCore().getLatestSchema().getField(field).getType() instanceof PointField);
    for (int i = 0; i < 10; i++) {
        assertU(adoc("id", String.valueOf(i), field, values[i]));
    }//ww w  .j ava2 s .c  om
    assertU(commit());
    IndexReader ir;
    RefCounted<SolrIndexSearcher> ref = null;
    SchemaField sf = h.getCore().getLatestSchema().getField(field);
    boolean ignoredField = !(sf.indexed() || sf.stored() || sf.hasDocValues());
    try {
        ref = h.getCore().getSearcher();
        SolrIndexSearcher searcher = ref.get();
        ir = searcher.getIndexReader();
        // our own SlowCompositeReader to check DocValues on disk w/o the UninvertingReader added by SolrIndexSearcher
        final LeafReader leafReaderForCheckingDVs = SlowCompositeReaderWrapper.wrap(searcher.getRawReader());

        if (sf.indexed()) {
            assertEquals("Field " + field + " should have point values", 10, PointValues.size(ir, field));
        } else {
            assertEquals("Field " + field + " should have no point values", 0, PointValues.size(ir, field));
        }
        if (ignoredField) {
            assertTrue("Field " + field + " should not have docValues",
                    DocValues.getSortedNumeric(leafReaderForCheckingDVs, field)
                            .nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
            assertTrue("Field " + field + " should not have docValues", DocValues
                    .getNumeric(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
            assertTrue("Field " + field + " should not have docValues", DocValues
                    .getSorted(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
            assertTrue("Field " + field + " should not have docValues", DocValues
                    .getBinary(leafReaderForCheckingDVs, field).nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
        } else {
            if (sf.hasDocValues()) {
                if (sf.multiValued()) {
                    assertFalse("Field " + field + " should have docValues",
                            DocValues.getSortedNumeric(leafReaderForCheckingDVs, field)
                                    .nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
                } else {
                    assertFalse("Field " + field + " should have docValues",
                            DocValues.getNumeric(leafReaderForCheckingDVs, field)
                                    .nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
                }
            } else {
                expectThrows(IllegalStateException.class,
                        () -> DocValues.getSortedNumeric(leafReaderForCheckingDVs, field));
                expectThrows(IllegalStateException.class,
                        () -> DocValues.getNumeric(leafReaderForCheckingDVs, field));
            }
            expectThrows(IllegalStateException.class,
                    () -> DocValues.getSorted(leafReaderForCheckingDVs, field));
            expectThrows(IllegalStateException.class,
                    () -> DocValues.getBinary(leafReaderForCheckingDVs, field));
        }
        for (LeafReaderContext leave : ir.leaves()) {
            LeafReader reader = leave.reader();
            for (int i = 0; i < reader.numDocs(); i++) {
                Document doc = reader.document(i);
                if (sf.stored()) {
                    assertNotNull("Field " + field + " not found. Doc: " + doc, doc.get(field));
                } else {
                    assertNull(doc.get(field));
                }
            }
        }
    } finally {
        ref.decref();
    }
    clearIndex();
    assertU(commit());
}

From source file:org.apache.solr.search.DocSetUtil.java

License:Apache License

public static void collectSortedDocSet(DocSet docs, IndexReader reader, Collector collector)
        throws IOException {
    // TODO add SortedDocSet sub-interface and take that.
    // TODO collectUnsortedDocSet: iterate segment, then all docSet per segment.

    final List<LeafReaderContext> leaves = reader.leaves();
    final Iterator<LeafReaderContext> ctxIt = leaves.iterator();
    int segBase = 0;
    int segMax;// ww  w  .  j  a v  a2s .  c om
    int adjustedMax = 0;
    LeafReaderContext ctx = null;
    LeafCollector leafCollector = null;
    for (DocIterator docsIt = docs.iterator(); docsIt.hasNext();) {
        final int doc = docsIt.nextDoc();
        if (doc >= adjustedMax) {
            do {
                ctx = ctxIt.next();
                segBase = ctx.docBase;
                segMax = ctx.reader().maxDoc();
                adjustedMax = segBase + segMax;
            } while (doc >= adjustedMax);
            leafCollector = collector.getLeafCollector(ctx);
        }
        if (doc < segBase) {
            throw new IllegalStateException("algorithm expects sorted DocSet but wasn't: " + docs.getClass());
        }
        leafCollector.collect(doc - segBase); // per-seg collectors
    }
}

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testQueryWrapperFilterPropagatesApproximations() throws IOException {
    Directory dir = newDirectory();/*  www.j  av a  2  s .c  om*/
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new StringField("foo", "bar", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    final IndexReader reader = writer.getReader();
    writer.close();
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // to still have approximations
    final Query query = new QueryWrapperFilter(
            new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
    final Weight weight = searcher.createNormalizedWeight(query, random().nextBoolean());
    final Scorer scorer = weight.scorer(reader.leaves().get(0));
    assertNotNull(scorer.twoPhaseIterator());
    reader.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCache.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    NUM_DOCS = atLeast(500);//from   w ww  . j av a 2  s .  c  o  m
    NUM_ORDS = atLeast(2);
    directory = newDirectory();
    IndexWriter writer = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(new LogDocMergePolicy()));
    long theLong = Long.MAX_VALUE;
    double theDouble = Double.MAX_VALUE;
    int theInt = Integer.MAX_VALUE;
    float theFloat = Float.MAX_VALUE;
    unicodeStrings = new String[NUM_DOCS];
    multiValued = new BytesRef[NUM_DOCS][NUM_ORDS];
    if (VERBOSE) {
        System.out.println("TEST: setUp");
    }
    for (int i = 0; i < NUM_DOCS; i++) {
        Document doc = new Document();
        doc.add(new LongPoint("theLong", theLong--));
        doc.add(new DoublePoint("theDouble", theDouble--));
        doc.add(new IntPoint("theInt", theInt--));
        doc.add(new FloatPoint("theFloat", theFloat--));
        if (i % 2 == 0) {
            doc.add(new IntPoint("sparse", i));
        }

        if (i % 2 == 0) {
            doc.add(new IntPoint("numInt", i));
        }

        // sometimes skip the field:
        if (random().nextInt(40) != 17) {
            unicodeStrings[i] = generateString(i);
            doc.add(newStringField("theRandomUnicodeString", unicodeStrings[i], Field.Store.YES));
        }

        // sometimes skip the field:
        if (random().nextInt(10) != 8) {
            for (int j = 0; j < NUM_ORDS; j++) {
                String newValue = generateString(i);
                multiValued[i][j] = new BytesRef(newValue);
                doc.add(newStringField("theRandomUnicodeMultiValuedField", newValue, Field.Store.YES));
            }
            Arrays.sort(multiValued[i]);
        }
        writer.addDocument(doc);
    }
    writer.forceMerge(1); // this test relies on one segment and docid order
    IndexReader r = DirectoryReader.open(writer);
    assertEquals(1, r.leaves().size());
    reader = r.leaves().get(0).reader();
    TestUtil.checkReader(reader);
    writer.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheWithThreads.java

License:Apache License

public void test() throws Exception {
    Directory dir = newDirectory();/*from ww w .ja  v a 2  s .c  o  m*/
    IndexWriter w = new IndexWriter(dir,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

    final List<Long> numbers = new ArrayList<>();
    final List<BytesRef> binary = new ArrayList<>();
    final List<BytesRef> sorted = new ArrayList<>();
    final int numDocs = atLeast(100);
    for (int i = 0; i < numDocs; i++) {
        Document d = new Document();
        long number = random().nextLong();
        d.add(new NumericDocValuesField("number", number));
        BytesRef bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
        d.add(new BinaryDocValuesField("bytes", bytes));
        binary.add(bytes);
        bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
        d.add(new SortedDocValuesField("sorted", bytes));
        sorted.add(bytes);
        w.addDocument(d);
        numbers.add(number);
    }

    w.forceMerge(1);
    final IndexReader r = DirectoryReader.open(w);
    w.close();

    assertEquals(1, r.leaves().size());
    final LeafReader ar = r.leaves().get(0).reader();

    int numThreads = TestUtil.nextInt(random(), 2, 5);
    List<Thread> threads = new ArrayList<>();
    final CountDownLatch startingGun = new CountDownLatch(1);
    for (int t = 0; t < numThreads; t++) {
        final Random threadRandom = new Random(random().nextLong());
        Thread thread = new Thread() {
            @Override
            public void run() {
                try {
                    startingGun.await();
                    int iters = atLeast(1000);
                    for (int iter = 0; iter < iters; iter++) {
                        int docID = threadRandom.nextInt(numDocs);
                        switch (threadRandom.nextInt(4)) {
                        case 0: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.INT_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 1: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.LONG_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 2: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.FLOAT_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 3: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.DOUBLE_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        }
                        BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes");
                        assertEquals(docID, bdv.advance(docID));
                        assertEquals(binary.get(docID), bdv.binaryValue());
                        SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
                        assertEquals(docID, sdv.advance(docID));
                        assertEquals(sorted.get(docID), sdv.binaryValue());
                    }
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        };
        thread.start();
        threads.add(thread);
    }

    startingGun.countDown();

    for (Thread thread : threads) {
        thread.join();
    }

    r.close();
    dir.close();
}

From source file:org.codelibs.elasticsearch.common.lucene.all.AllTermQuery.java

License:Apache License

@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }//from w w  w  .ja v a 2s.  co m
    boolean fieldExists = false;
    boolean hasPayloads = false;
    for (LeafReaderContext context : reader.leaves()) {
        final Terms terms = context.reader().terms(term.field());
        if (terms != null) {
            fieldExists = true;
            if (terms.hasPayloads()) {
                hasPayloads = true;
                break;
            }
        }
    }
    if (fieldExists == false) {
        return new MatchNoDocsQuery();
    }
    if (hasPayloads == false) {
        return new TermQuery(term);
    }
    return this;
}

From source file:org.codelibs.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }/*w w w  . j a v a2  s.com*/
    this.docsEnumFlag = docsEnumFlag;
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            bits = BitSet.of(docs, context.reader().maxDoc());
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}