List of usage examples for org.apache.lucene.search IndexSearcher search
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException
From source file:aos.lucene.tools.RegexQueryTest.java
License:Apache License
public void testRegexQuery() throws Exception { Directory directory = TestUtil.getBookIndexDirectory(); IndexSearcher searcher = new IndexSearcher(directory); RegexQuery q = new RegexQuery(new Term("title", ".*st.*")); TopDocs hits = searcher.search(q, 10); assertEquals(2, hits.totalHits);//from ww w .j ava2 s. co m assertTrue(TestUtil.hitsIncludeTitle(searcher, hits, "Tapestry in Action")); assertTrue( TestUtil.hitsIncludeTitle(searcher, hits, "Mindstorms: Children, Computers, And Powerful Ideas")); searcher.close(); directory.close(); }
From source file:app.finder.topicsource.service.SearchFiles.java
License:Apache License
/** * This demonstrates a typical paging search scenario, where the search * engine presents pages of size n to the user. The user can then go to the * next page if interested in the next hits. * /*from w w w.j av a 2s . c o m*/ * When the query is executed for the first time, then only enough results * are collected to fill 5 result pages. If the user wants to page beyond * this limit, then the query is executed another time and all hits are * collected. * */ public static List<String> doSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage, boolean raw, boolean interactive) throws IOException { List<String> list = new ArrayList<String>(); // Collect enough docs to show 5 pages TopDocs results = searcher.search(query, 5 * hitsPerPage); ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits; //System.out.println(numTotalHits + " total matching documents"); int start = 0; int end = Math.min(numTotalHits, hitsPerPage); for (int i = start; i < end; i++) { Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) { //System.out.println((i + 1) + ". " + path); list.add(path); String title = doc.get("title"); // if (title != null) { // System.out.println(" Title: " + doc.get("title")); // } } else { System.out.println((i + 1) + ". " + "No path for this document."); } } return list; }
From source file:app.SearchFiles.java
License:Apache License
/** 139 * This demonstrates a typical paging search scenario, where the search engine presents 140 * pages of size n to the user. The user can then go to the next page if interested in 141 * the next hits.//from ww w. j a v a 2s. com 142 * 143 * When the query is executed for the first time, then only enough results are collected 144 * to fill 5 result pages. If the user wants to page beyond this limit, then the query 145 * is executed another time and all hits are collected. 146 * 147 */ public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage, boolean raw, boolean interactive) throws IOException { // Collect enough docs to show 5 pages TopDocs results = searcher.search(query, 5 * hitsPerPage); ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits; System.out.println(numTotalHits + " total matching documents"); int start = 0; int end = Math.min(numTotalHits, hitsPerPage); while (true) { if (end > hits.length) { System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits + " total matching documents collected."); System.out.println("Collect more (y/n) ?"); String line = in.readLine(); if (line.length() == 0 || line.charAt(0) == 'n') { break; } hits = searcher.search(query, numTotalHits).scoreDocs; } end = Math.min(hits.length, start + hitsPerPage); for (int i = start; i < end; i++) { if (raw) { // output raw format System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score); continue; } Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) { System.out.println((i + 1) + ". " + path); String title = doc.get("title"); if (title != null) { System.out.println(" Title: " + doc.get("title")); } } else { System.out.println((i + 1) + ". " + "No path for this document"); } } if (!interactive || end == 0) { break; } if (numTotalHits >= end) { boolean quit = false; while (true) { System.out.print("Press "); if (start - hitsPerPage >= 0) { System.out.print("(p)revious page, "); } if (start + hitsPerPage < numTotalHits) { System.out.print("(n)ext page, "); } System.out.println("(q)uit or enter number to jump to a page."); String line = in.readLine(); if (line.length() == 0 || line.charAt(0) == 'q') { quit = true; break; } if (line.charAt(0) == 'p') { start = Math.max(0, start - hitsPerPage); break; } else if (line.charAt(0) == 'n') { if (start + hitsPerPage < numTotalHits) { start += hitsPerPage; } break; } else { int page = Integer.parseInt(line); if ((page - 1) * hitsPerPage < numTotalHits) { start = (page - 1) * hitsPerPage; break; } else { System.out.println("No such page"); } } } if (quit) break; end = Math.min(numTotalHits, start + hitsPerPage); } } }
From source file:Application.mediaIndexer.java
public static void SearchFiles(String index, String queryString, String selected, TextArea results) throws IOException, ParseException { IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); BufferedReader in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)); QueryParser parser = new QueryParser(selected, analyzer); String line = queryString != null ? queryString : in.readLine(); line = line.trim();/* w ww . j a v a 2 s .co m*/ Query query = parser.parse(line); int maxHits = 100; TopDocs docsResults = searcher.search(query, maxHits); ScoreDoc[] hits = docsResults.scoreDocs; for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); results.appendText("Title: " + doc.get("title") + "\n"); results.appendText("Artists: " + doc.get("xmpDM:artist") + "\n"); results.appendText("Genre: " + doc.get("xmpDM:genre") + "\n"); results.appendText("Year: " + doc.get("xmpDM:releaseDate") + "\n"); } // Playlist. playlist.clear(); for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) playlist.add(new File(path)); } reader.close(); }
From source file:at.ac.univie.mminf.luceneSKOS.util.TestUtil.java
License:Apache License
public static int hitCount(IndexSearcher searcher, Query query) throws IOException { TopDocs results = searcher.search(query, 1); return results.totalHits; }
From source file:at.ac.univie.mminf.luceneSKOS.util.TestUtil.java
License:Apache License
public static void explainQuery(IndexSearcher searcher, Query query) throws IOException { TopDocs topDocs = searcher.search(query, 10); for (ScoreDoc match : topDocs.scoreDocs) { Explanation explanation = searcher.explain(query, match.doc); System.out.println("---------------"); System.out.println(explanation.toString()); }/*from ww w. j a v a 2 s . com*/ }
From source file:at.fh_kufstein.InformationRetrievalUebung2.Main.java
public static void main(String[] args) { try {//from w w w. ja v a2s . co m Directory dir = getBookIndexDirectory(); IndexSearcher searcher = new IndexSearcher(dir); QueryParser parser = new QueryParser(Version.LUCENE_30, SEARCH_FIELD, new SimpleAnalyzer()); Query query = parser.parse(getUserQuery()); TopDocs docs = searcher.search(query, MAX_HITS); printResults(docs); searcher.close(); dir.close(); } catch (IOException | ParseException ex) { Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:au.edu.unimelb.csse.join.JoinFunctionalTest.java
License:Apache License
public void testFilterjoin() throws Exception { String sent = "(NP" + "(NP" + "(DT The)" + "(NN year))" + "(NP" + "(NP(CD 1956))" + "(PP" + "(IN in)" + "(NP(JJ rugby)(NN union))" + ")" + ")" + "(. .)" + ")"; Analyzer analyser = new FastStringAnalyser(); RAMDirectory dir = new RAMDirectory(); IndexWriter writer = new IndexWriter(dir, analyser, true, IndexWriter.MaxFieldLength.UNLIMITED); Document d = new Document(); d.add(new Field("sent", sent, Field.Store.NO, Field.Index.ANALYZED_NO_NORMS, Field.TermVector.WITH_POSITIONS)); writer.addDocument(d);// w w w. ja va 2s . c om writer.close(); IndexSearcher searcher = new IndexSearcher(dir); boolean[] lookaheadOptions = new boolean[] { false, true }; for (TermJoinType type : TermJoinType.values()) { for (boolean lookahead : lookaheadOptions) { QueryBuilder builder = new QueryBuilder("//PP[/IN AND /NP]"); TreebankQuery query = builder.parse(type, lookahead); SimpleHitCollector hitCollector = new SimpleHitCollector(10); searcher.search(query, hitCollector); assertEquals(1, hitCollector.totalHits); } } QueryBuilder builder = new QueryBuilder("//PP[/IN AND /NP/JJ/rugby]"); TreebankQuery query = builder.parse(TermJoinType.SIMPLE, true); SimpleHitCollector hitCollector = new SimpleHitCollector(10); searcher.search(query, hitCollector); assertEquals(1, hitCollector.totalHits); }
From source file:au.edu.unimelb.csse.join.JoinFunctionalTest.java
License:Apache License
private void assertNumberOfComparisons(IndexSearcher searcher, final String queryString, final TermJoinType joinType, final boolean useLookahead, final int numberOfComparisons) throws ParseException, IOException { QueryBuilder builder = new QueryBuilder(queryString); TreebankQuery query = builder.parse(joinType, useLookahead); int beforeTest = DoNotUseJoinLogic.getNumberOfComparisons(); SimpleHitCollector hitCollector = new SimpleHitCollector(10); searcher.search(query, hitCollector); assertEquals(1, hitCollector.totalHits); int afterTest = DoNotUseJoinLogic.getNumberOfComparisons(); // assertEquals(numberOfComparisons, afterTest - beforeTest); }
From source file:au.edu.unimelb.csse.QueryExpTest.java
License:Apache License
private void run() throws CorruptIndexException, IOException, ParseException { IndexSearcher searcher = new IndexSearcher(indexPath); TreeTerm starterTerm = new TreeTerm(0, TreeAxis.DESCENDANT, new Term("sent", "the")); TreeExpr starterExpr = new TreeExpr(); starterExpr.addTerm(starterTerm);/* w w w.j a v a2s.com*/ SimpleHitCollector collector = new SimpleHitCollector(1); searcher.search(new TreebankQuery(starterExpr), collector); collector.reset(); try { Thread.sleep(3000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } searcher.search(new TreebankQuery(starterExpr), collector); for (int i = 0; i < times; i++) { collector.reset(); QueryBuilder builder = new QueryBuilder(query); final TreebankQuery q = builder.parse(joinType, useLookahead); long start = System.nanoTime(); searcher.search(q, collector); long end = System.nanoTime(); System.out.println((end - start) + "\t" + collector.totalHits); } }