Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:org.oclc.os.SRW.Lucene.BasicLuceneQueryTranslator.java

License:Apache License

public void init(Properties properties, IndexSearcher searcher) throws InstantiationException {
    SRWLuceneDatabase.makeIndexInfo(properties, searcher, indexMappings);

    // to make a QueryParser, we need to figure out what the default search
    // field is and what analyzers to apply.
    Analyzer defaultAnalyzer;//ww w . j ava2s  .  co m
    String defaultField = (String) indexMappings.get("cql.serverChoice");
    String defaultAnalyzerName = (String) properties.get("analyzer.default");
    if (defaultAnalyzerName == null || defaultAnalyzerName.length() == 0)
        defaultAnalyzer = new WhitespaceAnalyzer();
    else
        defaultAnalyzer = getAnalyzer(defaultAnalyzerName);
    PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(defaultAnalyzer);
    // any other analyzers?
    Collection c = searcher.getIndexReader().getFieldNames(IndexReader.FieldOption.INDEXED);
    Iterator iter = c.iterator();
    String analyzerName, field;
    while (iter.hasNext()) {
        field = (String) iter.next();
        analyzerName = (String) properties.get("analyzer." + field);
        if (analyzerName != null && analyzerName.length() > 0)
            analyzer.addAnalyzer(field, getAnalyzer(analyzerName));
    }

    qp = new QueryParser(defaultField, analyzer);
}

From source file:org.oclc.os.SRW.Lucene.SRWLuceneDatabase.java

License:Apache License

public static String makeIndexInfo(Properties props, IndexSearcher searcher,
        Hashtable<String, String> indexMappings) {
    Collection c = searcher.getIndexReader().getFieldNames(IndexReader.FieldOption.INDEXED);
    Hashtable<String, String> sets = new Hashtable<String, String>();
    int indexNum = 0;
    String index, indexSet, luceneIndex, prop;
    StringBuffer sb = new StringBuffer("        <indexInfo>\n");
    StringTokenizer st;//w  ww  .j ava2 s . c  o  m

    Iterator iter = c.iterator();
    while (iter.hasNext()) {
        index = (String) iter.next();
        props.put("qualifier.local." + index, index);
    }
    makeUnqualifiedIndexes(props);

    Enumeration enumer = props.propertyNames();
    while (enumer.hasMoreElements()) {
        prop = (String) enumer.nextElement();
        if (prop.startsWith("qualifier.")) {
            st = new StringTokenizer(prop.substring(10));
            index = st.nextToken();
            st = new StringTokenizer(index, ".");
            if (st.countTokens() == 1) {
                indexSet = "local";
                index = prop.substring(10);
            } else {
                indexSet = st.nextToken();
                index = prop.substring(10 + indexSet.length() + 1);
            }

            if (log.isDebugEnabled())
                log.debug("indexSet=" + indexSet + ", index=" + index);
            if (sets.get(indexSet) == null) { // new set
                sb.append("          <set identifier=\"").append(props.getProperty("indexSet." + indexSet))
                        .append("\" name=\"").append(indexSet).append("\"/>\n");
                sets.put(indexSet, indexSet);
            }
            sb.append("          <index>\n").append("            <title>").append(indexSet).append('.')
                    .append(index).append("</title>\n").append("            <map>\n")
                    .append("              <name set=\"").append(indexSet).append("\">").append(index)
                    .append("</name>\n").append("              </map>\n").append("            </index>\n");

            if (indexMappings != null) {
                // now for a bit of trickery for the CQL parser
                // the line we just read isn't in the format the parser
                // expect.  we just read:
                // qualifier.<indexSet>.indexName=luceneIndexName
                // the parser is expecting:
                // qualifier.<indexSet>.indexName=1=<z39.50UseAttribute>
                // it doesn't really care what Use attribute we provide,
                // so we'll make up Use attribute numbers to correspond
                // with the lucene indexes.
                luceneIndex = props.getProperty(prop);
                indexMappings.put(indexSet + "." + index, luceneIndex);
                if (log.isDebugEnabled())
                    log.debug("mapping " + indexSet + "." + index + " to " + luceneIndex);
                props.put(prop, "1=" + (++indexNum));
            }
        } else if (prop.startsWith("hiddenQualifier.")) {
            st = new StringTokenizer(prop.substring(16));
            index = st.nextToken();
            if (indexMappings != null) {
                // now for a bit of trickery for the CQL parser
                // the line we just read isn't in the format the parser
                // expect.  we just read:
                // qualifier.<indexSet>.indexName=luceneIndexName
                // the parser is expecting:
                // qualifier.<indexSet>.indexName=1=<z39.50UseAttribute>
                // it doesn't really care what Use attribute we provide,
                // so we'll make up Use attribute numbers to correspond
                // with the lucene indexes.
                luceneIndex = props.getProperty(prop);
                indexMappings.put(index, luceneIndex);
                if (log.isDebugEnabled())
                    log.debug("mapping " + index + " to " + luceneIndex);
                props.put(prop, "1=" + (++indexNum));
            }
        }
    }
    sb.append("          </indexInfo>\n");
    return sb.toString();
}

From source file:org.opencms.search.CmsSearchIndex.java

License:Open Source License

/**
 * Closes the given Lucene index searcher.<p>
 * /*from  www  . java  2  s.  co m*/
 * @param searcher the searcher to close
 */
protected synchronized void indexSearcherClose(IndexSearcher searcher) {

    // in case there is an index searcher available close it
    if ((searcher != null) && (searcher.getIndexReader() != null)) {
        try {
            searcher.getIndexReader().close();
            searcher.close();
        } catch (Exception e) {
            LOG.error(Messages.get().getBundle().key(Messages.ERR_INDEX_SEARCHER_CLOSE_1, getName()), e);
        }
    }
}

From source file:org.opencms.search.CmsSearchIndex.java

License:Open Source License

/**
 * Reopens the Lucene index search reader for this index, required after the index has been changed.<p>
 * /*from  ww  w .j av  a2 s  . c  o m*/
 * @see #indexSearcherOpen(String)
 */
protected synchronized void indexSearcherUpdate() {

    IndexSearcher oldSearcher = m_indexSearcher;
    if ((oldSearcher != null) && (oldSearcher.getIndexReader() != null)) {
        // in case there is an index searcher available close it
        try {
            IndexReader newReader = IndexReader.openIfChanged(oldSearcher.getIndexReader(), true);
            if (newReader != null) {
                m_indexSearcher = new IndexSearcher(newReader);
                indexSearcherClose(oldSearcher);
            }
        } catch (Exception e) {
            LOG.error(Messages.get().getBundle().key(Messages.ERR_INDEX_SEARCHER_REOPEN_1, getName()), e);
        }
    } else {
        // make sure we end up with an open index searcher / reader           
        indexSearcherOpen(m_path);
    }
}

From source file:org.opensextant.solrtexttagger.TaggerRequestHandler.java

License:Open Source License

public ValueSourceAccessor(IndexSearcher searcher, ValueSource valueSource) {
    readerContexts = searcher.getIndexReader().leaves();
    this.valueSource = valueSource;
    docValuesArr = new FunctionValues[readerContexts.size()];
    fContext = ValueSource.newContext(searcher);
}

From source file:org.opentravel.schemacompiler.index.QueryTask.java

License:Apache License

protected void displayIndex() {
    IndexSearcher searcher = null;
    try {//from  w  w w .j a va  2  s .com
        searcher = searchManager.acquire();
        IndexReader reader = searcher.getIndexReader();

        for (int i = 0; i < reader.maxDoc(); i++) {
            Document doc = reader.document(i);

            System.out.println("DOCUMENT: " + doc.get(IDENTITY_FIELD));
            System.out.println("  " + BASE_NAMESPACE_FIELD + " : " + doc.get(BASE_NAMESPACE_FIELD));
            System.out.println("  " + FILENAME_FIELD + " : " + doc.get(FILENAME_FIELD));
            System.out.println("  " + STATUS_FIELD + " : " + doc.get(STATUS_FIELD));
            System.out.println("  " + VERSION_FIELD + " : " + doc.get(VERSION_FIELD));
            System.out.println("  " + VERSION_TYPE_FIELD + " : " + doc.get(VERSION_TYPE_FIELD));
        }
    } catch (Throwable t) {
        t.printStackTrace(System.out);

    } finally {
        try {
            if (searcher != null)
                searchManager.release(searcher);

        } catch (IOException e) {
            // Ignore error and continue
        }
    }
}

From source file:org.pageseeder.flint.lucene.facet.FlexibleFieldFacet.java

License:Apache License

/**
 * Computes each facet option as a flexible facet.
 * All filters but the ones using the same field as this facet are applied to the base query before computing the numbers.
 *
 * @param searcher the index search to use.
 * @param base     the base query./*from w w w  .  j  a va  2s.  co  m*/
 * @param filters  the filters applied to the base query (ignored if the base query is null)
 * @param size     the maximum number of field values to compute.
 *
 * @throws IOException if thrown by the searcher.
 */
public void compute(IndexSearcher searcher, Query base, List<Filter> filters, int size) throws IOException {
    // If the base is null, simply calculate for each query
    if (base == null) {
        compute(searcher, size);
    } else {
        if (size < 0)
            throw new IllegalArgumentException("size < 0");
        // reset
        this.totalTerms = size == 0 ? -1 : 0;
        this.hasResults = false;
        this.bucket = null;
        // re-compute the query without the corresponding filter (for flexible facets)
        Query filtered = base;
        if (filters != null) {
            this.flexible = true;
            for (Filter filter : filters) {
                if (!this._name.equals(filter.name()))
                    filtered = filter.filterQuery(filtered);
            }
        }
        // try wildcard query as it's faster, but if it fails go through all terms
        if (size == 0)
            try {
                BooleanQuery query = new BooleanQuery();
                query.add(filtered, Occur.MUST);
                query.add(new WildcardQuery(new Term(this._name, "*")), Occur.MUST);
                TopDocs td = searcher.search(query, 1);
                this.hasResults = td.totalHits > 0;
                return;
            } catch (Exception ex) {
                // oh well go through terms then
            }
        // find all terms
        List<Term> terms = Terms.terms(searcher.getIndexReader(), name());
        if (this._maxTerms > 0 && terms.size() > this._maxTerms)
            return;
        // loop through terms
        DocumentCounter counter = new DocumentCounter();
        Bucket<String> bucket = new Bucket<>(size);
        for (Term t : terms) {
            BooleanQuery query = new BooleanQuery();
            query.add(filtered, Occur.MUST);
            query.add(termToQuery(t), Occur.MUST);
            if (size == 0) {
                // we just want to know if there are results,
                // so load only one and stop when we get one
                TopDocs td = searcher.search(query, 1);
                if (td.totalHits > 0) {
                    this.hasResults = true;
                    return;
                }
            } else {
                // count results
                searcher.search(query, counter);
                int count = counter.getCount();
                bucket.add(t.text(), count);
                counter.reset();
                if (count > 0) {
                    this.totalTerms++;
                    this.hasResults = true;
                }
            }
        }
        if (size != 0)
            this.bucket = bucket;
    }
}

From source file:org.pageseeder.flint.lucene.facet.FlexibleFieldFacet.java

License:Apache License

/**
 * Computes each facet option without a base query.
 *
 * @param searcher the index search to use.
 * @param size     the number of facet values to calculate.
 *
 * @throws IOException if thrown by the searcher.
 *//*from  w w  w. j  ava2  s . c  o  m*/
private void compute(IndexSearcher searcher, int size) throws IOException {
    if (size == 0) {
        // reset
        this.totalTerms = -1;
        this.bucket = null;
        // check if there are terms
        this.hasResults = !Terms.terms(searcher.getIndexReader(), this._name).isEmpty();
    } else {
        // reset
        this.totalTerms = 0;
        this.hasResults = false;
        this.bucket = null;
        // find all terms
        List<Term> terms = Terms.terms(searcher.getIndexReader(), this._name);
        if (this._maxTerms > 0 && terms.size() > this._maxTerms)
            return;
        Bucket<String> bucket = new Bucket<>(size);
        DocumentCounter counter = new DocumentCounter();
        for (Term t : terms) {
            searcher.search(termToQuery(t), counter);
            bucket.add(t.text(), counter.getCount());
            counter.reset();
            this.totalTerms++;
            this.hasResults = true;
        }
        // set bucket
        this.bucket = bucket;
    }
}

From source file:org.pageseeder.flint.lucene.facet.FlexibleIntervalFacet.java

License:Apache License

/**
 * Computes each facet option as a flexible facet.
 * All filters but the ones using the same field as this facet are applied to the base query before computing the numbers.
 *
 * @param searcher the index search to use.
 * @param base     the base query./*from w ww.  ja v  a2s  . co m*/
 * @param filters  the filters applied to the base query (ignored if the base query is null)
 * @param size     the maximum number of field values to compute.
 *
 * @throws IOException if thrown by the searcher.
 */
public void compute(IndexSearcher searcher, Query base, List<Filter> filters, int size) throws IOException {
    // If the base is null, simply calculate for each query
    if (base == null) {
        compute(searcher, size);
    } else {
        if (size < 0)
            throw new IllegalArgumentException("size < 0");
        // reset total terms
        this.totalIntervals = 0;
        // find all terms
        List<Term> terms = Terms.terms(searcher.getIndexReader(), this._name);
        // Otherwise, re-compute the query without the corresponding filter 
        Query filtered = base;
        if (filters != null) {
            this.flexible = true;
            for (Filter filter : filters) {
                if (!this._name.equals(filter.name()))
                    filtered = filter.filterQuery(filtered);
            }
        }
        Map<Interval, Integer> intervals = new HashMap<>();
        DocumentCounter counter = new DocumentCounter();
        for (Term t : terms) {
            // find range
            Interval r = findInterval(t);
            if (r == null)
                continue;
            // find count
            BooleanQuery query = new BooleanQuery();
            query.add(filtered, Occur.MUST);
            query.add(termToQuery(t), Occur.MUST);
            searcher.search(query, counter);
            int count = counter.getCount();
            if (count > 0) {
                // add to map
                Integer ec = intervals.get(r);
                intervals.put(r, Integer.valueOf(count + (ec == null ? 0 : ec.intValue())));
                // check size to stop computing if too big
                if (this._maxIntervals > 0 && intervals.size() > this._maxIntervals)
                    return;
            }
            counter.reset();
        }
        this.totalIntervals = intervals.size();
        // add to bucket
        Bucket<Interval> b = new Bucket<Interval>(size);
        for (Interval interval : intervals.keySet()) {
            b.add(interval, intervals.get(interval));
        }
        this.bucket = b;
    }
}

From source file:org.pageseeder.flint.lucene.facet.FlexibleIntervalFacet.java

License:Apache License

/**
 * Computes each facet option without a base query.
 *
 * @param searcher the index search to use.
 * @param size     the number of facet values to calculate.
 *
 * @throws IOException if thrown by the searcher.
 */// ww w. j  av  a  2s  . c  om
private void compute(IndexSearcher searcher, int size) throws IOException {
    // find all terms
    List<Term> terms = Terms.terms(searcher.getIndexReader(), this._name);
    DocumentCounter counter = new DocumentCounter();
    Map<Interval, Integer> intervals = new HashMap<>();
    for (Term t : terms) {
        // find the range
        Interval interval = findInterval(t);
        if (interval == null)
            continue;
        // find number
        searcher.search(termToQuery(t), counter);
        int count = counter.getCount();
        if (count > 0) {
            // add to map
            Integer ec = intervals.get(interval);
            intervals.put(interval, Integer.valueOf(count + (ec == null ? 0 : ec.intValue())));
            // check size to stop computing if too big
            if (this._maxIntervals > 0 && intervals.size() > this._maxIntervals)
                return;
        }
        counter.reset();
    }
    // set totals
    this.totalIntervals = intervals.size();
    // add to bucket
    Bucket<Interval> b = new Bucket<>(size);
    for (Interval interval : intervals.keySet()) {
        b.add(interval, intervals.get(interval));
    }
    this.bucket = b;
}