Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:org.tallison.lucene.search.concordance.util.SimpleTargetCounter.java

License:Apache License

/**
 * Simple utility class to perform basic term frequency/document frequency
 * counts on the individual terms within a query.  This relies on
 * IndexReader and does not perform any concordance search/retrieval;
 * it is, therefore, very fast./*from   w w  w. ja  va2 s .c  om*/
 * <p>
 * If you want to visit more than basic terms (e.g. SpanNear),
 * see {@link TargetVisitor}
 *
 * @param query query
 * @param searcher searcher
 * @return target term results
 * @throws java.io.IOException if there is an IOException from the searcher
 */
public SimpleTargetTermResults searchSingleTerms(Query query, IndexSearcher searcher) throws IOException {
    Query tmpQ = query.rewrite(searcher.getIndexReader());
    Set<Term> terms = new HashSet<>();
    Weight weight = tmpQ.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1.0f);
    weight.extractTerms(terms);

    Map<String, Integer> dfs = new HashMap<>();
    Map<String, Integer> tfs = new HashMap<>();

    for (Term t : terms) {
        String targ = t.text();
        int docFreq = searcher.getIndexReader().docFreq(t);
        if (docFreq == 0) {
            continue;
        }
        Integer i = new Integer(docFreq);
        dfs.put(targ, i);

        long tf = searcher.getIndexReader().totalTermFreq(t);
        tfs.put(targ, (int) tf);
    }

    SimpleTargetTermResults results = new SimpleTargetTermResults(dfs, tfs);

    return results;
}

From source file:org.uberfire.ext.metadata.backend.lucene.index.BaseLuceneIndex.java

License:Apache License

protected int[] lookupDocIdByPK(final IndexSearcher searcher, final String... ids) throws IOException {
    final List<LeafReaderContext> subReaders = searcher.getIndexReader().leaves();
    final TermsEnum[] termsEnums = new TermsEnum[subReaders.size()];
    final PostingsEnum[] docsEnums = new PostingsEnum[subReaders.size()];
    for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
        termsEnums[subIDX] = subReaders.get(subIDX).reader().fields().terms("id").iterator();
    }//from   ww  w . j  a v a 2  s  . c  om

    int[] results = new int[ids.length];

    for (int i = 0; i < results.length; i++) {
        results[i] = -1;
    }

    // for each id given
    for (int idx = 0; idx < ids.length; idx++) {
        int base = 0;
        final BytesRef id = new BytesRef(ids[idx]);
        // for each leaf reader..
        for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
            final LeafReader subReader = subReaders.get(subIDX).reader();
            final TermsEnum termsEnum = termsEnums[subIDX];
            // does the enumeration of ("id") terms from our reader contain the "id" field we're looking for?
            if (termsEnum.seekExact(id)) {
                final PostingsEnum docs = docsEnums[subIDX] = termsEnum.postings(docsEnums[subIDX], 0);
                // okay, the reader contains it, get the postings ("docs+") for and check that they're there (NP check)
                if (docs != null) {
                    final int docID = docs.nextDoc();
                    Bits liveDocs = subReader.getLiveDocs();
                    // But wait, maybe some of the docs have been deleted! Check that too..
                    if ((liveDocs == null || liveDocs.get(docID)) && docID != DocIdSetIterator.NO_MORE_DOCS) {
                        results[idx] = base + docID;
                        break;
                    }
                }
            }
            base += subReader.maxDoc();
        }
    }

    return results;
}

From source file:org.uberfire.metadata.backend.lucene.setups.BaseLuceneSetup.java

License:Apache License

@Override
public void rename(final String sourceId, final String targetId) {
    final IndexSearcher searcher = nrtSearcher();
    try {/*from w  w  w  .  j  a  v a  2 s . co m*/
        int docId = lookupDocIdByPK(searcher, sourceId)[0];
        if (docId != -1) {
            final Document source = searcher.getIndexReader().document(docId);

            source.removeField("id");
            source.add(new StringField("id", targetId, Field.Store.YES));

            indexDocument(sourceId, source);
        }
    } catch (IOException ex) {
    } finally {
        nrtRelease(searcher);
    }
}

From source file:org.weborganic.flint.SearcherManager.java

License:artistic-license-2.0

/**
 * Perform a swap between current searcher and given searcher.
 *
 * @param newSearcher/*from  w ww  .  j  a v a 2 s .  c  o m*/
 * @throws IOException
 */
private synchronized void swapSearcher(IndexSearcher newSearcher) throws IOException {
    LOGGER.debug("Swapping reader from {} to {}", this.currentSearcher.getIndexReader().hashCode(),
            newSearcher.getIndexReader().hashCode());
    release(this.currentSearcher);
    this.currentSearcher = newSearcher;
}

From source file:org.weborganic.flint.SearcherManager.java

License:artistic-license-2.0

/**
 * Release the given searcher.//from  ww  w. j a v a  2 s. com
 *
 * @param searcher
 * @throws IOException
 */
protected synchronized void release(IndexSearcher searcher) throws IOException {
    LOGGER.debug("Releasing reader {}", searcher.getIndexReader().hashCode());
    searcher.getIndexReader().decRef();
    // check if we should close an old one
    closeIfDirty(searcher.getIndexReader());
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneEventIndexBackend.java

License:Open Source License

public void init() {
    //Do Zenoss' default query to warm the cache on a thread, as not to delay startup
    class CacheWarmer implements Runnable {
        @Override/*from  w  ww .  ja  va 2 s . com*/
        public void run() {
            logger.info("Warming cache for {}", name);
            IndexSearcher searcher = null;
            try {
                searcher = getSearcher();
                EventFilter filter = EventFilter.newBuilder()
                        .addAllStatus(Lists.newArrayList(EventStatus.values()))
                        .addAllSeverity(Lists.newArrayList(EventSeverity.values())).build();
                Query query = buildQuery(searcher.getIndexReader(), filter, null);
                List<EventSort> sortList = new ArrayList<EventSort>(2);
                sortList.add(EventSort.newBuilder().setField(EventSort.Field.SEVERITY)
                        .setDirection(Direction.DESCENDING).build());
                sortList.add(EventSort.newBuilder().setField(EventSort.Field.LAST_SEEN)
                        .setDirection(Direction.DESCENDING).build());
                Sort sort = buildSort(sortList);
                logger.info("Warming cache for {}", name);
                searchToEventSummaryResult(searcher, query, sort, Sets.newHashSet(FIELD_PROTOBUF), 0, 1000);
                logger.info("Done warming cache for {}!", name);
                ready = true;
            } catch (Exception e) {
                logger.error("Failed to warm cache for {}", name);
                e.printStackTrace();
            } finally {
                try {
                    returnSearcher(searcher);
                } catch (ZepException e) {
                    logger.error("Failed to return searcher");
                    e.printStackTrace();
                }
            }
        }
    }
    Thread warmer = new Thread(new CacheWarmer());
    warmer.setName(name + " Cache Warmer Thread");
    warmer.start();
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneEventIndexBackend.java

License:Open Source License

private EventSummaryResult listInternal(EventSummaryRequest request, Set<String> fieldsToLoad)
        throws ZepException {
    IndexSearcher searcher = null;
    long now = System.currentTimeMillis();
    Query query = null;//from  w  ww.ja va2s  .  c o  m
    try {
        searcher = getSearcher();
        query = buildQuery(searcher.getIndexReader(), request.getEventFilter(), request.getExclusionFilter());
        Sort sort = buildSort(request.getSortList());
        return searchToEventSummaryResult(searcher, query, sort, fieldsToLoad, request.getOffset(),
                request.getLimit());
    } catch (IOException e) {
        throw new ZepException(e.getLocalizedMessage(), e);
    } catch (OutOfMemoryError e) {
        closeSearcherManager();
        throw e;
    } finally {
        returnSearcher(searcher);
        if (query != null) {
            logger.debug("Query {} finished in {} milliseconds", query.toString(),
                    System.currentTimeMillis() - now);
        }
    }
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneEventIndexBackend.java

License:Open Source License

@Override
public void purge(Date threshold) throws ZepException {
    IndexSearcher searcher = null;
    try {//from   w w  w  .j a va  2s.com
        searcher = getSearcher();
        LuceneQueryBuilder query = new LuceneQueryBuilder(filterCacheManager, searcher.getIndexReader(),
                this.indexedDetailsConfiguration);
        query.addRange(FIELD_LAST_SEEN_TIME, null, threshold.getTime());
        this.trackingIndexWriter.deleteDocuments(query.build());
        flush();
    } catch (IOException e) {
        throw new ZepException(e);
    } catch (OutOfMemoryError e) {
        closeSearcherManager();
        throw e;
    } finally {
        returnSearcher(searcher);
    }
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneEventIndexBackend.java

License:Open Source License

protected void searchEventTagSeverities(EventFilter filter, EventTagSeverityCounter counter)
        throws ZepException {
    final boolean hasTagsFilter = filter.getTagFilterCount() > 0;
    IndexSearcher searcher = null;
    try {//from   ww w  .  jav a  2  s . c  o  m
        searcher = getSearcher();
        final Query query = buildQueryFromFilter(searcher.getIndexReader(), filter);
        final OpenBitSet docs = new OpenBitSet(searcher.getIndexReader().maxDoc());
        searcher.search(query, new Collector() {
            private int docBase;

            @Override
            public void setScorer(Scorer scorer) throws IOException {
            }

            @Override
            public void collect(int doc) throws IOException {
                docs.set(docBase + doc);
            }

            @Override
            public void setNextReader(AtomicReaderContext atomicReaderContext) throws IOException {
                this.docBase = atomicReaderContext.docBase;
            }

            @Override
            public boolean acceptsDocsOutOfOrder() {
                return true;
            }
        });
        int docId;
        final DocIdSetIterator it = docs.iterator();
        while ((docId = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
            final EventSummary summary;
            if (this.archive) {
                // TODO: This isn't very cheap - would be better to batch by UUID in separate calls
                // This doesn't get called on the event archive right now, so leave it until need to optimize.
                Document doc = searcher.doc(docId, UUID_FIELDS);
                summary = this.eventSummaryBaseDao.findByUuid(doc.get(FIELD_UUID));
            } else {
                Document doc = searcher.doc(docId);
                // this is an optimization for getting the non-archived tags from an organizer for ticket
                // see ZEN-7239. For this ticket we updated the index to store what we needed for generating the
                // tags severities. Since we do not want a migrate of completely deleting the index this
                // method is backwards compatible by uncompressing the protobuf
                if (doc.get(FIELD_SEVERITY) != null) {
                    int count = Integer.parseInt(doc.get(FIELD_COUNT));
                    boolean acknowledged = EventStatus.STATUS_ACKNOWLEDGED
                            .equals(EventStatus.valueOf(Integer.parseInt(doc.get(FIELD_STATUS))));
                    EventSeverity severity = EventSeverity.valueOf(Integer.parseInt(doc.get(FIELD_SEVERITY)));

                    // get the map for each filter and update the count
                    for (String tag : doc.getValues(FIELD_TAGS))
                        counter.update(tag, severity, count, acknowledged);
                    continue;
                } else {
                    summary = LuceneEventIndexMapper.toEventSummary(doc);
                }
            }
            boolean acknowledged = EventStatus.STATUS_ACKNOWLEDGED == summary.getStatus();
            Event occurrence = summary.getOccurrence(0);
            EventSeverity severity = occurrence.getSeverity();
            int count = occurrence.getCount();
            EventActor actor = occurrence.getActor();

            // Build tags from element_uuids - no tags specified in filter
            if (!hasTagsFilter) {
                if (actor.hasElementUuid())
                    counter.update(actor.getElementUuid(), severity, count, acknowledged);
            }
            // Build tag severities from passed in filter
            else {
                for (String uuid : Arrays.asList(actor.getElementUuid(), actor.getElementSubUuid()))
                    counter.update(uuid, severity, count, acknowledged);
                for (EventTag tag : occurrence.getTagsList())
                    for (String tagUuid : tag.getUuidList())
                        counter.update(tagUuid, severity, count, acknowledged);
            }
        }
    } catch (IOException e) {
        throw new ZepException(e);
    } catch (OutOfMemoryError e) {
        closeSearcherManager();
        throw e;
    } finally {
        returnSearcher(searcher);
    }
}

From source file:perf.AutoPrefixPerf.java

License:Apache License

private static void printQueryTerms(final MultiTermQuery mtq, final IndexSearcher searcher) throws IOException {
    final AtomicInteger termCount = new AtomicInteger();
    final AtomicInteger docCount = new AtomicInteger();
    // TODO: is there an easier way to see terms an MTQ matches?  this is awkward
    MultiTermQuery.RewriteMethod rewriter = mtq.getRewriteMethod();
    if (mtq instanceof TermRangeQuery) {
        TermRangeQuery trq = (TermRangeQuery) mtq;
        BytesRef lowerTerm = trq.getLowerTerm();
        BytesRef upperTerm = trq.getUpperTerm();
        System.out.println("query: " + bytesToLong(lowerTerm) + " " + lowerTerm + " - " + bytesToLong(upperTerm)
                + " " + upperTerm);
    } else {//from w w w . java2s  .  com
        System.out.println("query: " + mtq);
    }
    System.out.println("  query matches " + searcher.search(mtq, 1).totalHits + " docs");
    mtq.setRewriteMethod(new MultiTermQuery.RewriteMethod() {
        @Override
        public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
            for (AtomicReaderContext ctx : searcher.getIndexReader().leaves()) {
                TermsEnum termsEnum = getTermsEnum(mtq, ctx.reader().fields().terms(mtq.getField()), null);
                System.out.println("  reader=" + ctx.reader());
                BytesRef term;
                while ((term = termsEnum.next()) != null) {
                    System.out.println(
                            "    term: len=" + term.length + " " + term + " dF=" + termsEnum.docFreq());
                    termCount.incrementAndGet();
                    docCount.addAndGet(termsEnum.docFreq());
                }
            }

            return null;
        }
    });
    mtq.rewrite(searcher.getIndexReader());
    System.out.println("  total terms: " + termCount);
    System.out.println("  total docs: " + docCount);
    mtq.setRewriteMethod(rewriter);
}