Example usage for org.apache.lucene.index ReaderUtil getTopLevelContext

List of usage examples for org.apache.lucene.index ReaderUtil getTopLevelContext

Introduction

In this page you can find the example usage for org.apache.lucene.index ReaderUtil getTopLevelContext.

Prototype

public static IndexReaderContext getTopLevelContext(IndexReaderContext context) 

Source Link

Document

Walks up the reader tree and return the given context's top level reader context, or in other words the reader tree's root context.

Usage

From source file:org.apache.solr.schema.RandomSortField.java

License:Apache License

/** 
 * Given a field name and an IndexReader, get a random hash seed.
 * Using dynamic fields, you can force the random order to change 
 *//*from   w ww .j  a  v a  2  s  . co  m*/
private static int getSeed(String fieldName, AtomicReaderContext context) {
    final DirectoryReader top = (DirectoryReader) ReaderUtil.getTopLevelContext(context).reader();
    // calling getVersion() on a segment will currently give you a null pointer exception, so
    // we use the top-level reader.
    return fieldName.hashCode() + context.docBase + (int) top.getVersion();
}

From source file:org.apache.solr.search.function.FileFloatSource.java

License:Apache License

@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    final int off = readerContext.docBase;
    IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(readerContext);

    final float[] arr = getCachedFloats(topLevelContext.reader());
    return new FloatDocValues(this) {
        @Override/*w  ww.j  av  a  2 s  . c  o m*/
        public float floatVal(int doc) {
            return arr[doc + off];
        }

        @Override
        public Object objectVal(int doc) {
            return floatVal(doc); // TODO: keep track of missing values
        }
    };
}

From source file:org.codelibs.elasticsearch.search.aggregations.bucket.nested.NestedAggregator.java

License:Apache License

@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub)
        throws IOException {
    IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);/*from w  w  w . j a v  a2 s  .  com*/
    Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);

    final BitSet parentDocs = parentFilter.getBitSet(ctx);
    final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int parentDoc, long bucket) throws IOException {
            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
            // doc), so we can skip:
            if (parentDoc == 0 || parentDocs == null || childDocs == null) {
                return;
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}

From source file:org.elasticsearch.index.cache.bitset.BitsetFilterCache.java

License:Apache License

private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context)
        throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }// w  w  w .  j  a  va 2  s .c o m
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query, new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}

From source file:org.elasticsearch.index.shard.ShardSplittingQuery.java

License:Apache License

private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) {
    return context -> {
        Query query = Queries.newNonNestedFilter(indexVersionCreated);
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);//from www  .j a v  a2  s .c  om
        final Weight weight = searcher.createNormalizedWeight(query, false);
        Scorer s = weight.scorer(context);
        return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc());
    };
}

From source file:org.elasticsearch.search.lookup.LeafIndexLookup.java

License:Apache License

public LeafIndexLookup(LeafReaderContext ctx) {
    reader = ctx.reader();/*from   ww  w .  j  av  a2 s . co  m*/
    parentReader = ReaderUtil.getTopLevelContext(ctx).reader();
    indexSearcher = new IndexSearcher(parentReader);
    indexSearcher.setQueryCache(null);
}

From source file:org.meresco.lucene.search.DeDupFilterSuperCollector.java

License:Open Source License

public DeDupFilterSuperCollector.Key keyForDocId(int docId) throws IOException {
    if (this.topLevelReaderContext == null)
        this.topLevelReaderContext = ReaderUtil.getTopLevelContext(super.subs.get(0).context);

    List<AtomicReaderContext> leaves = this.topLevelReaderContext.leaves();
    AtomicReaderContext context = leaves.get(ReaderUtil.subIndex(docId, leaves));
    NumericDocValues docValues = context.reader().getNumericDocValues(this.keyName);
    if (docValues == null)
        return null;
    long keyValue = docValues.get(docId - context.docBase);
    if (keyValue == 0)
        return null;
    return this.keys.get(keyValue).get();
}

From source file:org.meresco.lucene.search.GroupSuperCollector.java

License:Open Source License

private Long getKeyForDocId(int docId) throws IOException {
    if (this.topLevelReaderContext == null)
        this.topLevelReaderContext = ReaderUtil.getTopLevelContext(super.subs.get(0).context);

    List<AtomicReaderContext> leaves = this.topLevelReaderContext.leaves();
    AtomicReaderContext context = leaves.get(ReaderUtil.subIndex(docId, leaves));
    NumericDocValues docValues = context.reader().getNumericDocValues(this.keyName);
    if (docValues == null)
        return null;
    return docValues.get(docId - context.docBase);
}

From source file:org.meresco.lucene.search.GroupSuperCollector.java

License:Open Source License

@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
    if (this.keyToDocIds == null) {
        float loadFactor = 0.75f;
        int maxDoc = (int) (ReaderUtil.getTopLevelContext(context).reader().maxDoc() * (1 + (1 - loadFactor)));
        this.keyToDocIds = new TLongObjectHashMap<int[]>(maxDoc / this.groupSuperCollector.subs.size() / 10,
                loadFactor);/*  w  w  w .  j a  v  a  2  s.  c o  m*/
        this.keyToDocId = new TLongIntHashMap(maxDoc / this.groupSuperCollector.subs.size(), loadFactor,
                NO_ENTRY_KEY, NO_ENTRY_VALUE);
    }
    this.context = context;
    this.delegate.setNextReader(context);
    NumericDocValues kv = context.reader().getNumericDocValues(this.keyName);
    if (kv == null)
        kv = DocValues.emptyNumeric();
    this.keyValues = kv;
    this.delegate.setNextReader(context);
}

From source file:org.meresco.lucene.search.JoinFieldComparator.java

License:Open Source License

@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
    if (this.topLevelReaderContext == null) {
        this.topLevelReaderContext = ReaderUtil.getTopLevelContext(context);
    }/*from ww  w  .jav  a2s.  c om*/
    keys = KeyValuesCache.get(context, this.otherKeyName);
    docBase = context.docBase;
}