List of usage examples for org.apache.lucene.index IndexReader getContext
public abstract IndexReaderContext getContext();
From source file:lucene.security.search.SecureIndexSearcher.java
License:Apache License
public SecureIndexSearcher(IndexReader r, ExecutorService executor, AccessControlFactory accessControlFactory, Collection<String> readAuthorizations, Collection<String> discoverAuthorizations, Set<String> discoverableFields) throws IOException { this(r.getContext(), executor, accessControlFactory, readAuthorizations, discoverAuthorizations, discoverableFields);//from www. j a va 2s. co m }
From source file:nl.inl.blacklab.search.HitsImpl.java
License:Apache License
/** * Construct a Hits object from a SpanQuery. * * @param searcher/*from www . j a va2 s . co m*/ * the searcher object * @param concordanceFieldPropName * field to use by default when finding concordances * @param sourceQuery * the query to execute to get the hits * @throws TooManyClauses if the query is overly broad (expands to too many terms) */ HitsImpl(Searcher searcher, SpanQuery sourceQuery) throws TooManyClauses { this(searcher, (List<Hit>) null); try { IndexReader reader = searcher.getIndexReader(); spanQuery = (SpanQuery) sourceQuery.rewrite(reader); termContexts = new HashMap<>(); Set<Term> terms = new HashSet<>(); extractTermsFromSpanQuery(terms); etiquette = new ThreadPriority(); for (Term term : terms) { try { etiquette.behave(); } catch (InterruptedException e) { // Taking too long, break it off. // Not a very graceful way to do it... but at least it won't // be stuck forever. Thread.currentThread().interrupt(); // client can check this throw new RuntimeException("Query matches too many terms; aborted."); } termContexts.put(term, TermContext.build(reader.getContext(), term)); } currentSourceSpans = null; atomicReaderContexts = reader == null ? null : reader.leaves(); atomicReaderContextIndex = -1; //sourceSpans = BLSpansWrapper.optWrap(spanQuery.getSpans(srw != null ? srw.getContext() : null, srw != null ? srw.getLiveDocs() : null, termContexts)); } catch (IOException e) { throw new RuntimeException(e); } sourceSpansFullyRead = false; }
From source file:org.apache.blur.lucene.warmup.IndexWarmup.java
License:Apache License
private Directory getDirectory(IndexReader reader, String segmentName, String context) { if (reader instanceof AtomicReader) { return getDirectory((AtomicReader) reader, segmentName, context); }/*w ww . j ava2s . co m*/ for (IndexReaderContext ctext : reader.getContext().leaves()) { if (_isClosed.get()) { LOG.info("Context [{0}] index closed", context); return null; } AtomicReaderContext atomicReaderContext = (AtomicReaderContext) ctext; AtomicReader atomicReader = atomicReaderContext.reader(); if (atomicReader instanceof SegmentReader) { SegmentReader segmentReader = (SegmentReader) atomicReader; if (segmentReader.getSegmentName().equals(segmentName)) { return segmentReader.directory(); } } } return null; }
From source file:org.apache.blur.lucene.warmup.IndexWarmup.java
License:Apache License
public Map<String, List<IndexTracerResult>> sampleIndex(IndexReader reader, String context) throws IOException { Map<String, List<IndexTracerResult>> results = new HashMap<String, List<IndexTracerResult>>(); for (IndexReaderContext ctext : reader.getContext().leaves()) { if (_isClosed.get()) { LOG.info("Context [{0}] index closed", context); return null; }//from ww w.j a v a 2 s . co m AtomicReaderContext atomicReaderContext = (AtomicReaderContext) ctext; AtomicReader atomicReader = atomicReaderContext.reader(); results.putAll(sampleIndex(atomicReader, context)); } return results; }
From source file:org.apache.blur.manager.writer.BlurIndex.java
License:Apache License
public long getSegmentCount() throws IOException { IndexSearcherCloseable indexSearcherClosable = getIndexSearcher(); try {//from w w w . j a va 2s. c o m IndexReader indexReader = indexSearcherClosable.getIndexReader(); IndexReaderContext context = indexReader.getContext(); return context.leaves().size(); } finally { indexSearcherClosable.close(); } }
From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java
License:Apache License
@Override public long getSegmentCount() throws IOException { IndexSearcherCloseable indexSearcherClosable = getIndexSearcher(false); try {//from ww w . j a v a 2s. c om IndexReader indexReader = indexSearcherClosable.getIndexReader(); IndexReaderContext context = indexReader.getContext(); return context.leaves().size(); } finally { indexSearcherClosable.close(); } }
From source file:org.apache.solr.search.TestDocSet.java
License:Apache License
public void doFilterTest(IndexReader reader) throws IOException { IndexReaderContext topLevelContext = reader.getContext(); OpenBitSet bs = getRandomSet(reader.maxDoc(), rand.nextInt(reader.maxDoc() + 1)); DocSet a = new BitDocSet(bs); DocSet b = getIntDocSet(bs);/*w ww . jav a 2 s . c om*/ Filter fa = a.getTopFilter(); Filter fb = b.getTopFilter(); /*** top level filters are no longer supported // test top-level DocIdSet da = fa.getDocIdSet(topLevelContext); DocIdSet db = fb.getDocIdSet(topLevelContext); doTestIteratorEqual(da, db); ***/ DocIdSet da; DocIdSet db; List<AtomicReaderContext> leaves = topLevelContext.leaves(); // first test in-sequence sub readers for (AtomicReaderContext readerContext : leaves) { da = fa.getDocIdSet(readerContext, null); db = fb.getDocIdSet(readerContext, null); doTestIteratorEqual(da, db); } int nReaders = leaves.size(); // now test out-of-sequence sub readers for (int i = 0; i < nReaders; i++) { AtomicReaderContext readerContext = leaves.get(rand.nextInt(nReaders)); da = fa.getDocIdSet(readerContext, null); db = fb.getDocIdSet(readerContext, null); doTestIteratorEqual(da, db); } }
From source file:org.elasticsearch.search.fetch.subphase.highlight.SourceScoreOrderFragmentsBuilder.java
License:Apache License
@Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with SourceLookup sourceLookup = searchContext.lookup().source(); sourceLookup.setSegmentAndDocument((LeafReaderContext) reader.getContext(), docId); List<Object> values = sourceLookup.extractRawValues(mapper.fieldType().name()); Field[] fields = new Field[values.size()]; for (int i = 0; i < values.size(); i++) { fields[i] = new Field(mapper.fieldType().name(), values.get(i).toString(), TextField.TYPE_NOT_STORED); }/*from w ww .j a va 2s. c o m*/ return fields; }
From source file:org.elasticsearch.search.fetch.subphase.highlight.SourceSimpleFragmentsBuilder.java
License:Apache License
@Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with SourceLookup sourceLookup = searchContext.lookup().source(); sourceLookup.setSegmentAndDocument((LeafReaderContext) reader.getContext(), docId); List<Object> values = sourceLookup.extractRawValues(mapper.fieldType().name()); if (values.isEmpty()) { return EMPTY_FIELDS; }/*w ww .jav a2s.c o m*/ Field[] fields = new Field[values.size()]; for (int i = 0; i < values.size(); i++) { fields[i] = new Field(mapper.fieldType().name(), values.get(i).toString(), TextField.TYPE_NOT_STORED); } return fields; }
From source file:org.elasticsearch.search.highlight.vectorhighlight.SourceScoreOrderFragmentsBuilder.java
License:Apache License
@Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with SearchLookup lookup = searchContext.lookup(); lookup.setNextReader((AtomicReaderContext) reader.getContext()); lookup.setNextDocId(docId);/*from ww w . j a v a2s .co m*/ List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath()); Field[] fields = new Field[values.size()]; for (int i = 0; i < values.size(); i++) { fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED); } return fields; }