Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:com.gentics.cr.lucene.indexaccessor.DefaultMultiIndexAccessor.java

License:Apache License

public synchronized void release(IndexSearcher multiSearcher) {
    IndexReader reader = multiSearcher.getIndexReader();
    release(reader, false);
}

From source file:com.gitblit.LuceneExecutor.java

License:Apache License

/**
 * Close the writer/searcher objects for a repository.
 * /* w  ww. ja v  a2  s .  c  om*/
 * @param repositoryName
 */
public synchronized void close(String repositoryName) {
    try {
        IndexSearcher searcher = searchers.remove(repositoryName);
        if (searcher != null) {
            searcher.getIndexReader().close();
        }
    } catch (Exception e) {
        logger.error("Failed to close index searcher for " + repositoryName, e);
    }

    try {
        IndexWriter writer = writers.remove(repositoryName);
        if (writer != null) {
            writer.close();
        }
    } catch (Exception e) {
        logger.error("Failed to close index writer for " + repositoryName, e);
    }
}

From source file:com.gitblit.LuceneExecutor.java

License:Apache License

private synchronized void resetIndexSearcher(String repository) throws IOException {
    IndexSearcher searcher = searchers.remove(repository);
    if (searcher != null) {
        searcher.getIndexReader().close();
    }/*w ww .  ja  v a 2 s .c  om*/
}

From source file:com.gitblit.LuceneExecutor.java

License:Apache License

/**
 * Searches the specified repositories for the given text or query
 * /*from w  ww  . j a  v a2 s.co  m*/
 * @param text
 *            if the text is null or empty, null is returned
 * @param page
 *            the page number to retrieve. page is 1-indexed.
 * @param pageSize
 *            the number of elements to return for this page
 * @param repositories
 *            a list of repositories to search. if no repositories are
 *            specified null is returned.
 * @return a list of SearchResults in order from highest to the lowest score
 * 
 */
public List<SearchResult> search(String text, int page, int pageSize, String... repositories) {
    if (StringUtils.isEmpty(text)) {
        return null;
    }
    if (ArrayUtils.isEmpty(repositories)) {
        return null;
    }
    Set<SearchResult> results = new LinkedHashSet<SearchResult>();
    StandardAnalyzer analyzer = new StandardAnalyzer(LUCENE_VERSION);
    try {
        // default search checks summary and content
        BooleanQuery query = new BooleanQuery();
        QueryParser qp;
        qp = new QueryParser(LUCENE_VERSION, FIELD_SUMMARY, analyzer);
        qp.setAllowLeadingWildcard(true);
        query.add(qp.parse(text), Occur.SHOULD);

        qp = new QueryParser(LUCENE_VERSION, FIELD_CONTENT, analyzer);
        qp.setAllowLeadingWildcard(true);
        query.add(qp.parse(text), Occur.SHOULD);

        IndexSearcher searcher;
        if (repositories.length == 1) {
            // single repository search
            searcher = getIndexSearcher(repositories[0]);
        } else {
            // multiple repository search
            List<IndexReader> readers = new ArrayList<IndexReader>();
            for (String repository : repositories) {
                IndexSearcher repositoryIndex = getIndexSearcher(repository);
                readers.add(repositoryIndex.getIndexReader());
            }
            IndexReader[] rdrs = readers.toArray(new IndexReader[readers.size()]);
            MultiSourceReader reader = new MultiSourceReader(rdrs);
            searcher = new IndexSearcher(reader);
        }

        Query rewrittenQuery = searcher.rewrite(query);
        logger.debug(rewrittenQuery.toString());

        TopScoreDocCollector collector = TopScoreDocCollector.create(5000, true);
        searcher.search(rewrittenQuery, collector);
        int offset = Math.max(0, (page - 1) * pageSize);
        ScoreDoc[] hits = collector.topDocs(offset, pageSize).scoreDocs;
        int totalHits = collector.getTotalHits();
        for (int i = 0; i < hits.length; i++) {
            int docId = hits[i].doc;
            Document doc = searcher.doc(docId);
            SearchResult result = createSearchResult(doc, hits[i].score, offset + i + 1, totalHits);
            if (repositories.length == 1) {
                // single repository search
                result.repository = repositories[0];
            } else {
                // multi-repository search
                MultiSourceReader reader = (MultiSourceReader) searcher.getIndexReader();
                int index = reader.getSourceIndex(docId);
                result.repository = repositories[index];
            }
            String content = doc.get(FIELD_CONTENT);
            result.fragment = getHighlightedFragment(analyzer, query, content, result);
            results.add(result);
        }
    } catch (Exception e) {
        logger.error(MessageFormat.format("Exception while searching for {0}", text), e);
    }
    return new ArrayList<SearchResult>(results);
}

From source file:com.gitblit.service.LuceneService.java

License:Apache License

/**
 * Searches the specified repositories for the given text or query
 *
 * @param text//  www  . j a  v a  2s  .  c o m
 *            if the text is null or empty, null is returned
 * @param page
 *            the page number to retrieve. page is 1-indexed.
 * @param pageSize
 *            the number of elements to return for this page
 * @param repositories
 *            a list of repositories to search. if no repositories are
 *            specified null is returned.
 * @return a list of SearchResults in order from highest to the lowest score
 *
 */
public List<SearchResult> search(String text, int page, int pageSize, String... repositories) {
    if (StringUtils.isEmpty(text)) {
        return null;
    }
    if (ArrayUtils.isEmpty(repositories)) {
        return null;
    }
    Set<SearchResult> results = new LinkedHashSet<SearchResult>();
    StandardAnalyzer analyzer = new StandardAnalyzer();
    try {
        // default search checks summary and content
        BooleanQuery.Builder bldr = new BooleanQuery.Builder();
        QueryParser qp;
        qp = new QueryParser(FIELD_SUMMARY, analyzer);
        qp.setAllowLeadingWildcard(true);
        bldr.add(qp.parse(text), Occur.SHOULD);

        qp = new QueryParser(FIELD_CONTENT, analyzer);
        qp.setAllowLeadingWildcard(true);
        bldr.add(qp.parse(text), Occur.SHOULD);

        IndexSearcher searcher;
        if (repositories.length == 1) {
            // single repository search
            searcher = getIndexSearcher(repositories[0]);
        } else {
            // multiple repository search
            List<IndexReader> readers = new ArrayList<IndexReader>();
            for (String repository : repositories) {
                IndexSearcher repositoryIndex = getIndexSearcher(repository);
                readers.add(repositoryIndex.getIndexReader());
            }
            IndexReader[] rdrs = readers.toArray(new IndexReader[readers.size()]);
            MultiSourceReader reader = new MultiSourceReader(rdrs);
            searcher = new IndexSearcher(reader);
        }

        BooleanQuery query = bldr.build();
        Query rewrittenQuery = searcher.rewrite(query);
        logger.debug(rewrittenQuery.toString());

        TopScoreDocCollector collector = TopScoreDocCollector.create(5000);
        searcher.search(rewrittenQuery, collector);
        int offset = Math.max(0, (page - 1) * pageSize);
        ScoreDoc[] hits = collector.topDocs(offset, pageSize).scoreDocs;
        int totalHits = collector.getTotalHits();
        for (int i = 0; i < hits.length; i++) {
            int docId = hits[i].doc;
            Document doc = searcher.doc(docId);
            SearchResult result = createSearchResult(doc, hits[i].score, offset + i + 1, totalHits);
            if (repositories.length == 1) {
                // single repository search
                result.repository = repositories[0];
            } else {
                // multi-repository search
                MultiSourceReader reader = (MultiSourceReader) searcher.getIndexReader();
                int index = reader.getSourceIndex(docId);
                result.repository = repositories[index];
            }
            String content = doc.get(FIELD_CONTENT);
            result.fragment = getHighlightedFragment(analyzer, query, content, result);
            results.add(result);
        }
    } catch (Exception e) {
        logger.error(MessageFormat.format("Exception while searching for {0}", text), e);
    }
    return new ArrayList<SearchResult>(results);
}

From source file:com.github.buzztaiki.lucene.lastuni.CJKSingleCharQueryTest.java

License:Apache License

private TopDocs search(Directory dir, Analyzer analyzer, String query) throws IOException, ParseException {
    IndexSearcher searcher = newSearcher(DirectoryReader.open(dir));
    try {// w  w  w. j  av  a2s . c  om
        QueryParser qp = newQueryParser(analyzer);
        Query q = qp.parse(query);
        return searcher.search(q, 10);
    } finally {
        searcher.getIndexReader().close();
    }
}

From source file:com.github.rnewson.couchdb.lucene.DatabaseIndexer.java

License:Apache License

public void search(final HttpServletRequest req, final HttpServletResponse resp)
        throws IOException, JSONException {
    final IndexState state = getState(req, resp);
    if (state == null)
        return;//from   w  w w.j ava  2s  . com
    final IndexSearcher searcher = state.borrowSearcher(isStaleOk(req));
    final String etag = state.getEtag();
    final FastVectorHighlighter fvh = new FastVectorHighlighter(true, true);
    final JSONArray result = new JSONArray();
    try {
        if (state.notModified(req)) {
            resp.setStatus(304);
            return;
        }
        for (final String queryString : getQueryStrings(req)) {
            final Analyzer analyzer = state.analyzer(req.getParameter("analyzer"));
            final Operator operator = "and".equalsIgnoreCase(req.getParameter("default_operator"))
                    ? Operator.AND
                    : Operator.OR;
            final Query q = state.parse(queryString, operator, analyzer);

            final JSONObject queryRow = new JSONObject();
            queryRow.put("q", q.toString());
            if (getBooleanParameter(req, "debug")) {
                queryRow.put("plan", QueryPlan.toPlan(q));
                queryRow.put("analyzer", analyzer.getClass());
            }
            queryRow.put("etag", etag);
            if (getBooleanParameter(req, "rewrite")) {
                final Query rewritten_q = q.rewrite(searcher.getIndexReader());
                queryRow.put("rewritten_q", rewritten_q.toString());

                final JSONObject freqs = new JSONObject();

                final Set<Term> terms = new HashSet<Term>();
                rewritten_q.extractTerms(terms);
                for (final Object term : terms) {
                    final int freq = searcher.getIndexReader().docFreq((Term) term);
                    freqs.put(term.toString(), freq);
                }
                queryRow.put("freqs", freqs);
            } else {
                // Perform the search.
                final TopDocs td;
                final StopWatch stopWatch = new StopWatch();

                final boolean include_docs = getBooleanParameter(req, "include_docs");
                final int highlights = getIntParameter(req, "highlights", 0);
                final int highlight_length = max(getIntParameter(req, "highlight_length", 18), 18); // min for fast term vector highlighter is 18
                final boolean include_termvectors = getBooleanParameter(req, "include_termvectors");
                final int limit = getIntParameter(req, "limit", ini.getInt("lucene.limit", 25));
                final Sort sort = CustomQueryParser.toSort(req.getParameter("sort"));
                final int skip = getIntParameter(req, "skip", 0);

                final Set<String> fieldsToLoad;
                if (req.getParameter("include_fields") == null) {
                    fieldsToLoad = null;
                } else {
                    final String[] fields = Utils.splitOnCommas(req.getParameter("include_fields"));
                    final List<String> list = Arrays.asList(fields);
                    fieldsToLoad = new HashSet<String>(list);
                }

                if (sort == null) {
                    td = searcher.search(q, null, skip + limit);
                } else {
                    td = searcher.search(q, null, skip + limit, sort);
                }
                stopWatch.lap("search");

                // Fetch matches (if any).
                final int max = Math.max(0, Math.min(td.totalHits - skip, limit));
                final JSONArray rows = new JSONArray();
                final String[] fetch_ids = new String[max];
                for (int i = skip; i < skip + max; i++) {
                    final Document doc;
                    if (fieldsToLoad == null) {
                        doc = searcher.doc(td.scoreDocs[i].doc);
                    } else {
                        doc = searcher.doc(td.scoreDocs[i].doc, fieldsToLoad);
                    }

                    final JSONObject row = new JSONObject();
                    final JSONObject fields = new JSONObject();
                    final JSONObject highlight_rows = new JSONObject();

                    // Include stored fields.
                    for (final IndexableField f : doc.getFields()) {
                        if (!f.fieldType().stored()) {
                            continue;
                        }
                        final String name = f.name();
                        final Object value;
                        if (f.numericValue() != null) {
                            value = f.numericValue();
                        } else {
                            value = f.stringValue();
                        }
                        if (value != null) {
                            if ("_id".equals(name)) {
                                row.put("id", value);
                            } else {
                                if (!fields.has(name)) {
                                    fields.put(name, value);
                                } else {
                                    final Object obj = fields.get(name);
                                    if (obj instanceof String || obj instanceof Number) {
                                        final JSONArray arr = new JSONArray();
                                        arr.put(obj);
                                        arr.put(value);
                                        fields.put(name, arr);
                                    } else {
                                        assert obj instanceof JSONArray;
                                        ((JSONArray) obj).put(value);
                                    }
                                }

                                if (highlights > 0) {
                                    String[] frags = fvh.getBestFragments(fvh.getFieldQuery(q),
                                            searcher.getIndexReader(), td.scoreDocs[i].doc, name,
                                            highlight_length, highlights);
                                    highlight_rows.put(name, frags);
                                }
                            }
                        }
                    }

                    if (!Float.isNaN(td.scoreDocs[i].score)) {
                        row.put("score", td.scoreDocs[i].score);
                    } // Include sort order (if any).
                    if (td instanceof TopFieldDocs) {
                        final FieldDoc fd = (FieldDoc) ((TopFieldDocs) td).scoreDocs[i];
                        row.put("sort_order", fd.fields);
                    }
                    // Fetch document (if requested).
                    if (include_docs) {
                        fetch_ids[i - skip] = doc.get("_id");
                    }
                    if (fields.length() > 0) {
                        row.put("fields", fields);
                    }
                    if (highlight_rows.length() > 0) {
                        row.put("highlights", highlight_rows);
                    }

                    rows.put(row);
                }
                // Fetch documents (if requested).
                if (include_docs && fetch_ids.length > 0) {
                    final List<CouchDocument> fetched_docs = database.getDocuments(fetch_ids);
                    for (int j = 0; j < max; j++) {
                        final CouchDocument doc = fetched_docs.get(j);
                        final JSONObject row = doc == null ? new JSONObject("{\"error\":\"not_found\"}")
                                : doc.asJson();
                        rows.getJSONObject(j).put("doc", row);
                    }
                }
                stopWatch.lap("fetch");

                queryRow.put("skip", skip);
                queryRow.put("limit", limit);
                queryRow.put("total_rows", td.totalHits);
                queryRow.put("search_duration", stopWatch.getElapsed("search"));
                queryRow.put("fetch_duration", stopWatch.getElapsed("fetch"));
                // Include sort info (if requested).
                if (td instanceof TopFieldDocs) {
                    queryRow.put("sort_order", CustomQueryParser.toJSON(((TopFieldDocs) td).fields));
                }
                queryRow.put("rows", rows);
            }
            result.put(queryRow);
        }
    } catch (final ParseException e) {
        ServletUtils.sendJsonError(req, resp, 400, "Bad query syntax: " + e.getMessage());
        return;
    } finally {
        state.returnSearcher(searcher);
    }

    resp.setHeader("ETag", etag);
    resp.setHeader("Cache-Control", "must-revalidate");
    ServletUtils.setResponseContentTypeAndEncoding(req, resp);

    final Object json = result.length() > 1 ? result : result.getJSONObject(0);
    final String callback = req.getParameter("callback");
    final String body;
    if (callback != null) {
        body = String.format("%s(%s)", callback, json);
    } else {
        if (json instanceof JSONObject) {
            final JSONObject obj = (JSONObject) json;
            body = getBooleanParameter(req, "debug") ? obj.toString(2) : obj.toString();
        } else {
            final JSONArray arr = (JSONArray) json;
            body = getBooleanParameter(req, "debug") ? arr.toString(2) : arr.toString();
        }
    }

    final Writer writer = resp.getWriter();
    try {
        writer.write(body);
    } finally {
        writer.close();
    }
}

From source file:com.github.rnewson.couchdb.lucene.SearchRequest.java

License:Apache License

public String execute(final IndexSearcher searcher) throws IOException {
    // Decline requests over MAX_LIMIT.
    if (limit > Config.MAX_LIMIT) {
        return "{\"code\":400,\"body\":\"max limit was exceeded.\"}";
    }//  w  ww  . j av a 2  s  .  c  o  m
    // Return "304 - Not Modified" if etag matches.
    final String etag = getETag(searcher);
    if (!debug && etag.equals(this.ifNoneMatch)) {
        return "{\"code\":304}";
    }

    final JSONObject json = new JSONObject();
    json.put("q", q.toString());
    json.put("etag", etag);

    if (rewrite_query) {
        final Query rewritten_q = q.rewrite(searcher.getIndexReader());
        json.put("rewritten_q", rewritten_q.toString());
        final JSONObject freqs = new JSONObject();

        final Set terms = new HashSet();
        rewritten_q.extractTerms(terms);
        for (final Object term : terms) {
            final int freq = searcher.docFreq((Term) term);
            freqs.put(term, freq);
        }
        json.put("freqs", freqs);
    } else {
        // Perform search.
        final TopDocs td;
        final StopWatch stopWatch = new StopWatch();
        if (sort == null) {
            td = searcher.search(q, filter, skip + limit);
        } else {
            td = searcher.search(q, filter, skip + limit, sort);
        }
        stopWatch.lap("search");
        // Fetch matches (if any).
        final int max = max(0, min(td.totalHits - skip, limit));

        final JSONArray rows = new JSONArray();
        final String[] fetch_ids = new String[max];
        for (int i = skip; i < skip + max; i++) {
            final Document doc = searcher.doc(td.scoreDocs[i].doc);
            final JSONObject row = new JSONObject();
            final JSONObject fields = new JSONObject();

            // Include stored fields.
            for (Object f : doc.getFields()) {
                Field fld = (Field) f;

                if (!fld.isStored())
                    continue;
                String name = fld.name();
                String value = fld.stringValue();
                if (value != null) {
                    if (Config.ID.equals(name)) {
                        row.put("id", value);
                    } else {
                        if (!fields.has(name)) {
                            fields.put(name, value);
                        } else {
                            final Object obj = fields.get(name);
                            if (obj instanceof String) {
                                final JSONArray arr = new JSONArray();
                                arr.add((String) obj);
                                arr.add(value);
                                fields.put(name, arr);
                            } else {
                                assert obj instanceof JSONArray;
                                ((JSONArray) obj).add(value);
                            }
                        }
                    }
                }
            }

            row.put("score", td.scoreDocs[i].score);
            // Include sort order (if any).
            if (td instanceof TopFieldDocs) {
                final FieldDoc fd = (FieldDoc) ((TopFieldDocs) td).scoreDocs[i];
                row.put("sort_order", fd.fields);
            }
            // Fetch document (if requested).
            if (include_docs) {
                fetch_ids[i - skip] = doc.get(Config.ID);
            }
            if (fields.size() > 0) {
                row.put("fields", fields);
            }
            rows.add(row);
        }
        // Fetch documents (if requested).
        if (include_docs) {
            final JSONArray fetched_docs = DB.getDocs(dbname, fetch_ids).getJSONArray("rows");
            for (int i = 0; i < max; i++) {
                rows.getJSONObject(i).put("doc", fetched_docs.getJSONObject(i).getJSONObject("doc"));
            }
        }
        stopWatch.lap("fetch");

        json.put("skip", skip);
        json.put("limit", limit);
        json.put("total_rows", td.totalHits);
        json.put("search_duration", stopWatch.getElapsed("search"));
        json.put("fetch_duration", stopWatch.getElapsed("fetch"));
        // Include sort info (if requested).
        if (td instanceof TopFieldDocs) {
            json.put("sort_order", toString(((TopFieldDocs) td).fields));
        }
        json.put("rows", rows);
    }

    final JSONObject result = new JSONObject();
    result.put("code", 200);

    final JSONObject headers = new JSONObject();
    headers.put("Cache-Control", "max-age=" + Config.COMMIT_MAX / 1000);
    // Results can't change unless the IndexReader does.
    headers.put("ETag", etag);
    result.put("headers", headers);

    if (debug) {
        result.put("body", String.format("<pre>%s</pre>", StringEscapeUtils.escapeHtml(json.toString(2))));
    } else {
        result.put("json", json);
    }

    return result.toString();
}

From source file:com.github.rnewson.couchdb.lucene.SearchRequest.java

License:Apache License

private String getETag(final IndexSearcher searcher) {
    return Long.toHexString(searcher.getIndexReader().getVersion());
}

From source file:com.github.tteofili.apacheconeu14.oak.search.nls.NLSQueryIndex.java

License:Apache License

private void initializeClassifier(IndexSearcher searcher) {
    try {//from  w  w w . j a v a2 s . co  m
        classifier.train(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "jcr:title",
                "jcr:primaryType", new StandardAnalyzer());
    } catch (IOException e) {
        // error in training
    }
}