Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:com.stratio.cassandra.lucene.index.FSIndex.java

License:Apache License

/**
 * Returns the total number of deleted {@link Document}s in this index.
 *
 * @return the number of deleted {@link Document}s
 *///ww w . j  a v  a  2 s.c  o  m
@Override
public long getNumDeletedDocs() {
    logger.debug("Getting %s num deleted docs", name);
    try {
        IndexSearcher searcher = searcherManager.acquire();
        try {
            return searcher.getIndexReader().numDeletedDocs();
        } finally {
            searcherManager.release(searcher);
        }
    } catch (Exception e) {
        throw new IndexException(logger, e, "Error getting %s num docs", name);
    }
}

From source file:com.stratio.cassandra.lucene.index.RAMIndex.java

License:Apache License

/**
 * Finds the top {@code count} hits for {@code query} and sorting the hits by {@code sort}.
 *
 * @param query the {@link Query} to search for
 * @param sort the {@link Sort} to be applied
 * @param count the max number of results to be collected
 * @param fields the names of the fields to be loaded
 * @return the found documents//ww  w. j ava  2  s  . c o m
 */
public List<Document> search(Query query, Sort sort, Integer count, Set<String> fields) {
    try {
        indexWriter.commit();
        IndexReader reader = DirectoryReader.open(directory);
        IndexSearcher searcher = new IndexSearcher(reader);
        sort = sort.rewrite(searcher);
        TopDocs topDocs = searcher.search(query, count, sort);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        List<Document> documents = new LinkedList<>();
        for (ScoreDoc scoreDoc : scoreDocs) {
            Document document = searcher.doc(scoreDoc.doc, fields);
            documents.add(document);
        }
        searcher.getIndexReader().close();
        return documents;
    } catch (IOException e) {
        throw new IndexException(logger, e, "Error while searching");
    }
}

From source file:com.stratio.cassandra.lucene.service.LuceneIndex.java

License:Apache License

/**
 * Returns the total number of {@link Document}s in this index.
 *
 * @return The total number of {@link Document}s in this index.
 * @throws IOException If Lucene throws IO errors.
 *///from w  w  w.j av  a 2 s .  co m
@Override
public long getNumDocs() throws IOException {
    Log.debug("%s get num docs", logName);
    IndexSearcher searcher = searcherManager.acquire();
    try {
        return searcher.getIndexReader().numDocs();
    } finally {
        searcherManager.release(searcher);
    }
}

From source file:com.stratio.cassandra.lucene.service.LuceneIndex.java

License:Apache License

/**
 * Returns the total number of deleted {@link Document}s in this index.
 *
 * @return The total number of deleted {@link Document}s in this index.
 * @throws IOException If Lucene throws IO errors.
 *//*from   w ww  . j a  v  a 2  s  .c  o m*/
@Override
public long getNumDeletedDocs() throws IOException {
    Log.debug("%s get num deleted docs", logName);
    IndexSearcher searcher = searcherManager.acquire();
    try {
        return searcher.getIndexReader().numDeletedDocs();
    } finally {
        searcherManager.release(searcher);
    }
}

From source file:com.sxc.lucene.analysis.codec.MetaphoneAnalyzerTest.java

License:Apache License

public void testKoolKat() throws Exception {
    RAMDirectory directory = new RAMDirectory();
    Analyzer analyzer = new MetaphoneReplacementAnalyzer();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_47, analyzer);
    IndexWriter writer = new IndexWriter(directory, indexWriterConfig);
    Document doc = new Document();
    doc.add(new TextField("contents", "cool cat", Field.Store.YES));
    writer.addDocument(doc);/*  www. ja  va 2 s. c o m*/
    writer.close();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
    Query query = new QueryParser(Version.LUCENE_47, "contents", analyzer).parse("kool kat");
    TopDocs hits = searcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    int docID = hits.scoreDocs[0].doc;
    doc = searcher.doc(docID);
    assertEquals("cool cat", doc.get("contents"));
    searcher.getIndexReader().close();
}

From source file:com.tcdi.zombodb.query.VisibilityQueryHelper.java

License:Apache License

static Map<Integer, FixedBitSet> determineVisibility(final Query query, final String field, final long myXid,
        final long xmin, final long xmax, final Set<Long> activeXids, IndexSearcher searcher,
        List<BytesRef> updatedCtids) throws IOException {
    final Map<Integer, FixedBitSet> visibilityBitSets = new HashMap<>();

    if (updatedCtids.size() == 0)
        return visibilityBitSets;

    ////ww w. j ava  2 s  .co m
    // build a map of {@link VisibilityInfo} objects by each _prev_ctid
    //
    // We use XConstantScoreQuery here so that we exclude deleted docs
    //

    final Map<BytesRef, List<VisibilityInfo>> map = new HashMap<>();
    searcher.search(
            new XConstantScoreQuery(
                    SearchContext.current().filterCache().cache(new TermsFilter(field, updatedCtids))),
            new ZomboDBTermsCollector(field) {
                private SortedDocValues prevCtids;
                private SortedNumericDocValues xids;
                private SortedNumericDocValues sequence;
                private int ord;
                private int maxdoc;

                @Override
                public void collect(int doc) throws IOException {
                    xids.setDocument(doc);
                    sequence.setDocument(doc);

                    long xid = xids.valueAt(0);
                    long seq = sequence.valueAt(0);
                    BytesRef prevCtid = prevCtids.get(doc);

                    List<VisibilityInfo> matchingDocs = map.get(prevCtid);

                    if (matchingDocs == null)
                        map.put(BytesRef.deepCopyOf(prevCtid), matchingDocs = new ArrayList<>());
                    matchingDocs.add(new VisibilityInfo(ord, maxdoc, doc, xid, seq));
                }

                @Override
                public void setNextReader(AtomicReaderContext context) throws IOException {
                    prevCtids = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
                    xids = context.reader().getSortedNumericDocValues("_xid");
                    sequence = context.reader().getSortedNumericDocValues("_zdb_seq");
                    ord = context.ord;
                    maxdoc = context.reader().maxDoc();
                }
            });

    if (map.isEmpty())
        return visibilityBitSets;

    //
    // pick out the first VisibilityInfo for each document that is visible & committed
    // and build a FixedBitSet for each reader 'ord' that contains visible
    // documents.  A map of these (key'd on reader ord) is what we return.
    //

    BytesRefBuilder bytesRefBuilder = new BytesRefBuilder() {
        /* overloaded to avoid making a copy of the byte array */
        @Override
        public BytesRef toBytesRef() {
            return new BytesRef(this.bytes(), 0, this.length());
        }
    };

    Terms committedXidsTerms = MultiFields.getFields(searcher.getIndexReader()).terms("_zdb_committed_xid");
    TermsEnum committedXidsEnum = committedXidsTerms == null ? null : committedXidsTerms.iterator(null);
    for (List<VisibilityInfo> visibility : map.values()) {
        CollectionUtil.introSort(visibility, new Comparator<VisibilityInfo>() {
            @Override
            public int compare(VisibilityInfo o1, VisibilityInfo o2) {
                int cmp = Long.compare(o2.xid, o1.xid);
                return cmp == 0 ? Long.compare(o2.sequence, o1.sequence) : cmp;
            }
        });

        boolean foundVisible = false;
        for (VisibilityInfo mapping : visibility) {

            if (foundVisible || mapping.xid > xmax || activeXids.contains(mapping.xid) || (mapping.xid != myXid
                    && !isCommitted(committedXidsEnum, mapping.xid, bytesRefBuilder))) {
                // document is not visible to us
                FixedBitSet visibilityBitset = visibilityBitSets.get(mapping.readerOrd);
                if (visibilityBitset == null)
                    visibilityBitSets.put(mapping.readerOrd,
                            visibilityBitset = new FixedBitSet(mapping.maxdoc));
                visibilityBitset.set(mapping.docid);
            } else {
                foundVisible = true;
            }
        }
    }

    return visibilityBitSets;
}

From source file:com.tripod.lucene.service.AbstractLuceneService.java

License:Apache License

/**
 * Performs highlighting for a given query and a given document.
 *
 * @param indexSearcher the IndexSearcher performing the query
 * @param query the Tripod LuceneQuery//w  w  w  .  j  ava  2s. c  om
 * @param scoreDoc the Lucene ScoreDoc
 * @param doc the Lucene Document
 * @param highlighter the Highlighter to use
 * @param result the QueryResult to add the highlights to
 * @throws IOException if an error occurs performing the highlighting
 * @throws InvalidTokenOffsetsException if an error occurs performing the highlighting
 */
protected void performHighlighting(final IndexSearcher indexSearcher, final Q query, final ScoreDoc scoreDoc,
        final Document doc, final Highlighter highlighter, final QR result)
        throws IOException, InvalidTokenOffsetsException {

    if (query.getHighlightFields() == null || query.getHighlightFields().isEmpty()) {
        return;
    }

    final List<Highlight> highlights = new ArrayList<>();
    final List<String> hlFieldNames = getHighlightFieldNames(query, doc);

    // process each field to highlight on
    for (String hlField : hlFieldNames) {
        final String text = doc.get(hlField);
        if (StringUtils.isEmpty(text)) {
            continue;
        }

        final List<String> snippets = new ArrayList<>();
        final Fields tvFields = indexSearcher.getIndexReader().getTermVectors(scoreDoc.doc);
        final int maxStartOffset = highlighter.getMaxDocCharsToAnalyze() - 1;

        // get the snippets for the given field
        final TokenStream tokenStream = TokenSources.getTokenStream(hlField, tvFields, text, analyzer,
                maxStartOffset);
        final TextFragment[] textFragments = highlighter.getBestTextFragments(tokenStream, text, false, 10);
        for (TextFragment textFragment : textFragments) {
            if (textFragment != null && textFragment.getScore() > 0) {
                snippets.add(textFragment.toString());
            }
        }

        // if we have snippets then add a highlight result to the QueryResult
        if (snippets.size() > 0) {
            highlights.add(new Highlight(hlField, snippets));
        }
    }

    result.setHighlights(highlights);
}

From source file:com.tripod.lucene.service.AbstractLuceneService.java

License:Apache License

/**
 * Processes the faceting results and adds them to the QueryResults builder.
 *
 * @param indexSearcher the IndexSearcher performing the query
 * @param facetsCollector the FacetsCollector that was used for the search
 * @param facetFields the fields to Facet on
 * @param resultBuilder the QueryResults.Builder
 * @throws IOException if an error occurs performing faceting
 *//*from  w w w . ja  va2 s  . co m*/
protected void processFacetResults(final IndexSearcher indexSearcher, final FacetsCollector facetsCollector,
        final Set<String> facetFields, final LuceneQueryResults.Builder<QR> resultBuilder) throws IOException {
    if (facetFields == null) {
        return;
    }

    for (String facetField : facetFields) {
        final List<FacetCount> facetResultCounts = new ArrayList<>();
        final SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(
                indexSearcher.getIndexReader(), facetField);
        final Facets facets = new SortedSetDocValuesFacetCounts(state, facetsCollector);

        org.apache.lucene.facet.FacetResult result = facets.getTopChildren(10, facetField);
        for (int i = 0; i < result.childCount; i++) {
            LabelAndValue lv = result.labelValues[i];
            facetResultCounts.add(new FacetCount(lv.label, lv.value.longValue()));
        }

        resultBuilder.addFacetResult(new FacetResult(facetField, facetResultCounts));
    }
}

From source file:com.tuplejump.stargate.cassandra.SearchSupport.java

License:Apache License

protected List<Row> getRows(final ExtendedFilter filter, final Search search) {
    final SearchSupport searchSupport = this;
    AbstractBounds<RowPosition> keyRange = filter.dataRange.keyRange();
    final Range<Token> filterRange = new Range<>(keyRange.left.getToken(), keyRange.right.getToken());
    final boolean isSingleToken = filterRange.left.equals(filterRange.right);
    final boolean isFullRange = isSingleToken && baseCfs.partitioner.getMinimumToken().equals(filterRange.left);

    SearcherCallback<List<Row>> sc = new SearcherCallback<List<Row>>() {
        @Override//from w  ww  .  j  av  a  2  s  .  c  o  m
        public List<Row> doWithSearcher(org.apache.lucene.search.IndexSearcher searcher) throws Exception {
            Utils.SimpleTimer timer = Utils.getStartedTimer(logger);
            List<Row> results;
            if (search == null) {
                results = new ArrayList<>();
            } else {
                Utils.SimpleTimer timer2 = Utils.getStartedTimer(SearchSupport.logger);
                Function function = search.function(options);
                Query query = search.query(options);
                int resultsLimit = searcher.getIndexReader().maxDoc();
                if (function.shouldLimit()) {
                    if (resultsLimit == 0) {
                        resultsLimit = 1;
                    }
                    resultsLimit = Math.min(filter.currentLimit() + 1, resultsLimit);
                }
                function.init(options);
                IndexEntryCollector collector = new IndexEntryCollector(function, search, options,
                        resultsLimit);
                searcher.search(query, collector);
                timer2.endLogTime("TopDocs search for [" + collector.getTotalHits() + "] results ");
                if (SearchSupport.logger.isDebugEnabled()) {
                    SearchSupport.logger.debug(String.format("Search results [%s]", collector.getTotalHits()));
                }
                RowScanner iter = new RowScanner(searchSupport, baseCfs, filter, collector,
                        function instanceof AggregateFunction ? false : search.isShowScore());
                Utils.SimpleTimer timer3 = Utils.getStartedTimer(SearchSupport.logger);
                results = function.process(iter, customColumnFactory, baseCfs, currentIndex);
                timer3.endLogTime("Aggregation [" + collector.getTotalHits() + "] results");
            }
            timer.endLogTime("Search with results [" + results.size() + "] ");
            return results;

        }

        @Override
        public Range<Token> filterRange() {
            return filterRange;
        }

        @Override
        public boolean isSingleToken() {
            return isSingleToken;
        }

        @Override
        public boolean isFullRange() {
            return isFullRange;
        }
    };

    return currentIndex.search(sc);
}

From source file:com.tuplejump.stargate.IndexContainer.java

License:Apache License

public <T> T search(SearcherCallback<T> searcherCallback) {
    List<IndexReader> indexReaders = new ArrayList<>();
    Map<Indexer, IndexSearcher> indexSearchers = new HashMap<>();
    for (Map.Entry<Range<Token>, Indexer> entry : indexers.entrySet()) {
        Range<Token> range = entry.getKey();
        boolean intersects = intersects(searcherCallback.filterRange(), searcherCallback.isSingleToken(),
                searcherCallback.isFullRange(), range);
        if (intersects) {
            Indexer indexer = entry.getValue();
            IndexSearcher searcher = indexer.acquire();
            indexSearchers.put(indexer, searcher);
            indexReaders.add(searcher.getIndexReader());
        }/*from   www.  jav  a2s. co m*/
    }
    IndexReader[] indexReadersArr = new IndexReader[indexReaders.size()];
    indexReaders.toArray(indexReadersArr);
    MultiReader multiReader = new MultiReader(indexReadersArr, false);
    IndexSearcher allSearcher = new IndexSearcher(multiReader, executorService);
    try {
        return searcherCallback.doWithSearcher(allSearcher);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        try {
            multiReader.close();
        } catch (IOException e) {
            logger.error("Could not close reader", e);
        }
        for (Map.Entry<Indexer, IndexSearcher> entry : indexSearchers.entrySet()) {
            entry.getKey().release(entry.getValue());
        }
    }
}