Example usage for org.apache.lucene.index IndexReader getContext

List of usage examples for org.apache.lucene.index IndexReader getContext

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader getContext.

Prototype

public abstract IndexReaderContext getContext();

Source Link

Document

Expert: Returns the root IndexReaderContext for this IndexReader 's sub-reader tree.

Usage

From source file:org.elasticsearch.search.highlight.vectorhighlight.SourceSimpleFragmentsBuilder.java

License:Apache License

@Override
protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException {
    // we know its low level reader, and matching docId, since that's how we call the highlighter with
    SearchLookup lookup = searchContext.lookup();
    lookup.setNextReader((AtomicReaderContext) reader.getContext());
    lookup.setNextDocId(docId);/*from   w  w w . j av a  2  s  .  com*/

    List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
    if (values.isEmpty()) {
        return EMPTY_FIELDS;
    }
    Field[] fields = new Field[values.size()];
    for (int i = 0; i < values.size(); i++) {
        fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
    }
    return fields;
}

From source file:org.sindice.siren.util.SirenTestCase.java

License:Apache License

/**
 * Create a new searcher over the reader. This searcher might randomly use
 * threads.//from   ww w  . j  a v  a  2 s  .  c  o m
 * <p>
 * Override the original {@link LuceneTestCase#newSearcher(IndexReader)}
 * implementation in order to avoid getting {@link AssertingIndexSearcher}
 * which is incompatible with SIREn.
 */
public static IndexSearcher newSearcher(final IndexReader r) throws IOException {
    final Random random = random();
    if (usually()) {
        // compared to the original implementation, we do not wrap to avoid
        // wrapping into an AssertingAtomicReader
        return random.nextBoolean() ? new IndexSearcher(r) : new IndexSearcher(r.getContext());
    } else {
        int threads = 0;
        final ThreadPoolExecutor ex;
        if (random.nextBoolean()) {
            ex = null;
        } else {
            threads = _TestUtil.nextInt(random, 1, 8);
            ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS,
                    new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("LuceneTestCase"));
        }
        if (ex != null) {
            if (VERBOSE) {
                System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
            }
            r.addReaderClosedListener(new ReaderClosedListener() {
                @Override
                public void onClose(final IndexReader reader) {
                    _TestUtil.shutdownExecutorService(ex);
                }
            });
        }
        final IndexSearcher ret = random.nextBoolean() ? new IndexSearcher(r, ex)
                : new IndexSearcher(r.getContext(), ex);
        return ret;
    }
}

From source file:pretraga.IsolationSimilarity.java

public void test(String vec) {
    List<String> vector = processInput(vec);
    HashMap<String, Long> map = new HashMap<>();
    try {//  w w  w.j a  va  2s  .  c o  m
        Directory dir = FSDirectory.open(new File(indexDirectoryPath).toPath());

        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher searcher = new IndexSearcher(reader);

        List<Integer> docId = getDocumentsFromVector(vector, reader, searcher);

        for (int i = 0; i < docId.size(); i++) {
            Fields ff = reader.getTermVectors(docId.get(i));
            Terms terms = ff.terms(CONTENT);

            TermsEnum te = terms.iterator();
            Object tmp = te.next();
            while (tmp != null) {
                BytesRef by = (BytesRef) tmp;
                String term = by.utf8ToString();

                ClassicSimilarity sim = null;
                if (searcher.getSimilarity(true) instanceof ClassicSimilarity) {
                    sim = (ClassicSimilarity) searcher.getSimilarity(true);
                }
                float idf = sim.idf(te.docFreq(), reader.maxDoc());
                float tf = sim.tf(te.totalTermFreq());
                //System.out.println("idf = " + idf + ", tf = " + tf + ", docF: " + te.totalTermFreq());
                TermStatistics ts = new TermStatistics(by, te.docFreq(), te.totalTermFreq());
                CollectionStatistics s = new CollectionStatistics(CONTENT, reader.maxDoc(), terms.getDocCount(),
                        terms.getSumTotalTermFreq(), terms.getSumDocFreq());
                Document d = reader.document(docId.get(i));
                if (vector.contains(term)) {
                    float ttt = sim.simScorer(sim.computeWeight(s, ts), reader.getContext().leaves().get(0))
                            .score(docId.get(i), te.totalTermFreq());
                    System.out.println(ttt + ", " + d.get(TITLE) + ", term: " + term);
                }
                tmp = te.next();
            }

            /*Iterator<String> ss = ff.iterator();
            while (ss.hasNext()) {
            String fieldString = ss.next();
            System.out.println(fieldString);
            }*/
        }
    } catch (Exception e) {

    }
}

From source file:project.lucene.RelativeTermWeightQuery.java

License:Apache License

public void collectTermContext(IndexReader reader, List<AtomicReaderContext> leaves, TermContext[] contextArray,
        Term[] queryTerms) throws IOException {
    TermsEnum termsEnum = null;/*from ww w .  j  a v  a  2s  .  c  om*/
    for (AtomicReaderContext context : leaves) {
        final Fields fields = context.reader().fields();
        if (fields == null) {
            // reader has no fields
            continue;
        }
        for (int i = 0; i < queryTerms.length; i++) {
            Term term = queryTerms[i];
            TermContext termContext = contextArray[i];
            final Terms terms = fields.terms(term.field());
            if (terms == null) {
                // field does not exist
                continue;
            }
            termsEnum = terms.iterator(termsEnum);
            assert termsEnum != null;

            if (termsEnum == TermsEnum.EMPTY)
                continue;
            if (termsEnum.seekExact(term.bytes())) {
                if (termContext == null) {
                    contextArray[i] = new TermContext(reader.getContext(), termsEnum.termState(), context.ord,
                            termsEnum.docFreq(), termsEnum.totalTermFreq());
                } else {
                    termContext.register(termsEnum.termState(), context.ord, termsEnum.docFreq(),
                            termsEnum.totalTermFreq());
                }
            }
        }
    }
}