Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:org.pageseeder.flint.lucene.facet.FlexibleRangeFacet.java

License:Apache License

/**
 * Computes each facet option as a flexible facet.
 * All filters but the ones using the same field as this facet are applied to the base query before computing the numbers.
 *
 * @param searcher the index search to use.
 * @param base     the base query./*  w  w w  .ja v a  2  s .c  o  m*/
 * @param filters  the filters applied to the base query
 * @param size     the maximum number of field values to compute.
 *
 * @throws IOException if thrown by the searcher.
 */
public void compute(IndexSearcher searcher, Query base, List<Filter> filters, int size) throws IOException {
    // If the base is null, simply calculate for each query
    if (base == null) {
        compute(searcher, size);
    } else {
        if (size < 0)
            throw new IllegalArgumentException("size < 0");
        this.totalRanges = 0;
        // find all terms
        List<Term> terms = Terms.terms(searcher.getIndexReader(), this._name);
        // Otherwise, re-compute the query without the corresponding filter 
        Query filtered = base;
        if (filters != null) {
            this.flexible = true;
            for (Filter filter : filters) {
                if (!this._name.equals(filter.name()))
                    filtered = filter.filterQuery(filtered);
            }
        }
        DocumentCounter counter = new DocumentCounter();
        Map<Range, Integer> ranges = new HashMap<>();
        for (Term t : terms) {
            // find range
            Range r = findRange(t);
            if (r == null)
                r = OTHER;
            // find count
            BooleanQuery query = new BooleanQuery();
            query.add(filtered, Occur.MUST);
            query.add(termToQuery(t), Occur.MUST);
            searcher.search(query, counter);
            int count = counter.getCount();
            if (count > 0) {
                // add to map
                Integer ec = ranges.get(r);
                ranges.put(r, Integer.valueOf(count + (ec == null ? 0 : ec.intValue())));
            }
            counter.reset();
        }
        this.totalRanges = ranges.size();
        // add to bucket
        Bucket<Range> b = new Bucket<>(size);
        for (Range r : ranges.keySet()) {
            b.add(r, ranges.get(r));
        }
        this.bucket = b;
    }
}

From source file:org.pageseeder.flint.lucene.facet.FlexibleRangeFacet.java

License:Apache License

/**
 * Computes each facet option without a base query.
 *
 * @param searcher the index search to use.
 * @param size     the number of facet values to calculate.
 *
 * @throws IOException if thrown by the searcher.
 *///w w  w .j ava  2s.c  o  m
protected void compute(IndexSearcher searcher, int size) throws IOException {
    // find all terms
    List<Term> terms = Terms.terms(searcher.getIndexReader(), this._name);
    DocumentCounter counter = new DocumentCounter();
    Map<Range, Integer> ranges = new HashMap<>();
    for (Term t : terms) {
        // find the range
        Range r = findRange(t);
        if (r == null)
            r = OTHER;
        // find number
        searcher.search(termToQuery(t), counter);
        int count = counter.getCount();
        if (count > 0) {
            // add to map
            Integer ec = ranges.get(r);
            ranges.put(r, Integer.valueOf(count + (ec == null ? 0 : ec.intValue())));
        }
        counter.reset();
    }
    // set totals
    this.totalRanges = ranges.size();
    // add to bucket
    Bucket<Range> b = new Bucket<>(size);
    for (Range r : ranges.keySet()) {
        b.add(r, ranges.get(r));
    }
    this.bucket = b;
}

From source file:org.polymap.rhei.fulltext.store.lucene.LuceneFulltextIndex.java

License:Open Source License

@Override
public Iterable<String> propose(String term, int maxResults, String field) throws Exception {
    // no proposals for empty term
    if (term.length() == 0) {
        return Collections.EMPTY_LIST;
    }//from   w w  w  . j  av  a  2 s . c o  m
    IndexSearcher searcher = store.getIndexSearcher();
    TermEnum terms = searcher.getIndexReader().terms(new Term(field != null ? field : FIELD_ANALYZED, term));
    try {
        // sort descending; accept equal keys
        TreeMap<Integer, String> result = new TreeMap(new Comparator<Integer>() {
            public int compare(Integer o1, Integer o2) {
                return o1.equals(o2) ? -1 : -o1.compareTo(o2);
            }
        });
        // sort
        for (int i = 0; i < maxResults * 3; i++) {
            String proposalTerm = terms.term().text();
            int docFreq = terms.docFreq();
            if (!proposalTerm.startsWith(term)) {
                break;
            }
            log.debug("Proposal: term: " + proposalTerm + ", docFreq: " + docFreq);
            result.put(docFreq, proposalTerm);
            if (!terms.next()) {
                break;
            }
        }
        // take first maxResults
        return limit(result.values(), maxResults);
    } catch (Exception e) {
        log.warn("", e);
        return Collections.EMPTY_LIST;
    } finally {
        terms.close();
    }
}

From source file:org.punksearch.web.statistics.FileTypeStatistics.java

License:Open Source License

private static Hits extractDocsForType(String type) {
    Filter filter = TypeFilters.get(type);
    try {/*from  w ww . ja va  2s  . c o m*/
        IndexSearcher indexSearcher = Core.getIndexReaderHolder().getCurrentSearcher();
        IndexReader indexReader = indexSearcher.getIndexReader();
        final TopDocs topDocs = indexSearcher.search(new MatchAllDocsQuery(), filter, indexReader.numDocs());
        return new Hits(indexSearcher, topDocs);
    } catch (Exception e) {
        log.error("error extractDocsForType", e);
        return null;
    }
}

From source file:org.punksearch.web.statistics.FileTypeStatistics.java

License:Open Source License

public static synchronized Long totalSize() {
    if (totalSizeCache == null || indexChangedAfter(totalSizeCacheTimestamp)) {
        long size = 0;
        try {//from   ww  w. ja  v a  2  s  .c o  m
            // Rough approximation to the root directories.
            // Obviously, non-latin1 directory names slip through the filter, we'll catch them later
            // Maybe we should use some ranges with UTF8-16 characters... TODO
            String approxQuery = "*:* -Path:{a TO Z*} -Path:{0 TO 9*}";
            QueryParser parser = new QueryParser(LuceneVersion.VERSION, "Host",
                    new SimpleAnalyzer(LuceneVersion.VERSION));
            Query query = parser.parse(approxQuery);
            IndexSearcher indexSearcher = Core.getIndexReaderHolder().getCurrentSearcher();
            IndexReader indexReader = indexSearcher.getIndexReader();
            final TopDocs topDocs = indexSearcher.search(query, indexReader.numDocs());
            Hits hits = new Hits(indexSearcher, topDocs);
            for (int i = 0; i < hits.length(); i++) {
                Document doc = hits.doc(i);
                String path = doc.get(IndexFields.PATH);
                if (!path.equals("/")) {
                    continue;
                }
                size += Long.parseLong(doc.get(IndexFields.SIZE));
            }
        } catch (Exception e) {
            log.error("", e);
        }
        totalSizeCache = size;
        totalSizeCacheTimestamp = System.currentTimeMillis();
    }
    return totalSizeCache;
}

From source file:org.rssowl.core.internal.persist.search.ModelSearchImpl.java

License:Open Source License

/**
 * Can be called multiple times safely because: - close is safe to be called
 * many times in IndexReader and IndexSearcher - No IndexSearcher is ever
 * added again into the fSearchers map so calling remove two or more times is
 * harmless./*from  w  w  w  .  j a  v  a2 s . co m*/
 */
private void dispose(IndexSearcher searcher) throws IOException {
    fSearchers.remove(searcher);
    searcher.close();
    searcher.getIndexReader().close();
}

From source file:org.sakaiproject.search.index.impl.ClusterFSIndexStorage.java

License:Educational Community License

public void closeIndexSearcher(IndexSearcher indexSearcher) {
    IndexReader indexReader = indexSearcher.getIndexReader();
    boolean closedAlready = false;
    try {//from   w ww.  ja  va2s.  c om
        if (indexReader != null) {
            indexReader.close();
            closedAlready = true;
        }
    } catch (Exception ex) {
        log.error("Failed to close Index Reader " + ex.getMessage());
    }
    try {
        indexSearcher.close();
    } catch (Exception ex) {
        if (closedAlready) {
            log.debug("Failed to close Index Searcher " + ex.getMessage());
        } else {
            log.error("Failed to close Index Searcher " + ex.getMessage());
        }

    }
}

From source file:org.sakaiproject.search.index.impl.FSIndexStorage.java

License:Educational Community License

protected IndexSearcher getIndexSearcher() throws IOException {
    IndexSearcher indexSearcher = null;
    try {/*www .ja  v a 2s.  c  o  m*/
        long reloadStart = System.currentTimeMillis();
        File indexDirectoryFile = new File(searchIndexDirectory);
        if (!indexDirectoryFile.exists()) {
            if (!indexDirectoryFile.mkdirs()) {
                log.warn("getIdexSearch couldn't create directory " + indexDirectoryFile.getPath());
            }
        }

        File f = new File(searchIndexDirectory);
        indexSearcher = new IndexSearcher(FSDirectory.open(f), false);
        if (indexSearcher == null) {
            log.warn("No search Index exists at this time");

        }
        long reloadEnd = System.currentTimeMillis();
        if (diagnostics) {
            log.info("Reload Complete " + indexSearcher.getIndexReader().numDocs() + " in "
                    + (reloadEnd - reloadStart));
        }

    } catch (FileNotFoundException e) {
        log.error("There has been a major poblem with the" + " Search Index which has become corrupted ", e);
        if (doIndexRecovery()) {
            File f = new File(searchIndexDirectory);
            indexSearcher = new IndexSearcher(FSDirectory.open(f), false);
        }
    } catch (IOException e) {
        log.error("There has been a major poblem with the " + "Search Index which has become corrupted", e);
        if (doIndexRecovery()) {
            Directory dir = FSDirectory.open(new File(searchIndexDirectory));
            indexSearcher = new IndexSearcher(dir, false);
        }
    }
    return indexSearcher;
}

From source file:org.sakaiproject.search.index.soaktest.SearchIndexerNode.java

License:Educational Community License

/**
 * @throws IOException//from   ww  w . j a  va  2s  .  c  om
 */
public void testSearch() {
    try {
        long start1 = System.currentTimeMillis();
        IndexSearcher is = journaledFSIndexStorage.getIndexSearcher();
        TermQuery tq = new TermQuery(new Term(SearchService.FIELD_CONTENTS, "node"));

        long start = System.currentTimeMillis();
        TopDocs h = is.search(tq, 1000);
        long end = System.currentTimeMillis();
        log.debug("Got " + h.totalHits + " hits from " + is.getIndexReader().numDocs() + " for node "
                + instanceName + " in " + (end - start) + ":" + (start - start1) + " ms");
    } catch (Exception ex) {
        log.error("Search Failed with, perhapse due to a file being removed " + ex.getMessage());
    }
}

From source file:org.sakaiproject.search.index.soaktest.SearchIndexerNode.java

License:Educational Community License

public void testSlowSearch() throws Exception {
    long start1 = System.currentTimeMillis();

    log.debug("Getting index searcher");
    IndexSearcher is = journaledFSIndexStorage.getIndexSearcher();
    TermQuery tq = new TermQuery(new Term(SearchService.FIELD_CONTENTS, "node"));

    long start = System.currentTimeMillis();
    log.info("Searching with " + is + " and reader " + is.getIndexReader());
    TopDocs topdocs = is.search(tq, 100000);
    log.info("Performing Search and Sleeping 500ms with " + is);
    Thread.sleep(500);//from  w w w  .  ja  v a2 s  .com
    log.info("Performing Search and Sleeping 500ms with " + is);
    long end = System.currentTimeMillis();
    log.info("Got " + topdocs.totalHits + " hits from " + is.getIndexReader().numDocs() + " for node "
            + instanceName + " in " + (end - start) + ":" + (start - start1) + " ms");
    for (int i = 0; i < topdocs.totalHits; i++) {
        Document d = is.doc(i);
        List<Field> e = d.getFields();
        for (int q = 0; q < e.size(); q++) {
            e.get(q);
        }
    }
}