List of usage examples for org.apache.lucene.search IndexSearcher IndexSearcher
public IndexSearcher(IndexReaderContext context)
From source file:com.github.mosuka.apache.lucene.example.cmd.SearchCommand.java
License:Apache License
@Override public void execute(Map<String, Object> attrs) { Map<String, Object> responseMap = new LinkedHashMap<String, Object>(); String responseJSON = null;/*ww w. j av a 2 s .com*/ Directory indexDir = null; IndexReader reader = null; try { String index = (String) attrs.get("index"); String queryStr = (String) attrs.get("query"); indexDir = FSDirectory.open(new File(index).toPath()); QueryParser queryParser = new QueryParser("text", new JapaneseAnalyzer()); Query query = queryParser.parse(queryStr); reader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(query, 10); List<Map<String, Object>> documentList = new LinkedList<Map<String, Object>>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { Document document = searcher.doc(scoreDoc.doc); Map<String, Object> documentMap = new LinkedHashMap<String, Object>(); for (IndexableField f : document.getFields()) { documentMap.put(f.name(), f.stringValue()); } documentMap.put("score", scoreDoc.score); documentList.add(documentMap); } responseMap.put("status", 0); responseMap.put("message", "OK"); responseMap.put("totalHits", topDocs.totalHits); responseMap.put("maxScore", topDocs.getMaxScore()); responseMap.put("result", documentList); } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } catch (ParseException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } finally { try { if (reader != null) { reader.close(); } } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } try { if (indexDir != null) { indexDir.close(); } } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } } try { ObjectMapper mapper = new ObjectMapper(); responseJSON = mapper.writeValueAsString(responseMap); } catch (IOException e) { responseJSON = String.format("{\"status\":1, \"message\":\"%s\"}", e.getMessage()); } System.out.println(responseJSON); }
From source file:com.github.msarhan.lucene.ArabicRootExtractorAnalyzerTests.java
License:Open Source License
@Test public void testArabicRootIndex() throws IOException, ParseException, URISyntaxException { Directory index = new RAMDirectory(); ArabicRootExtractorAnalyzer analyzer = new ArabicRootExtractorAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(analyzer); final AtomicInteger id = new AtomicInteger(0); IndexWriter w = new IndexWriter(index, config); URL url = ArabicRootExtractorStemmer.class.getClassLoader() .getResource("com/github/msarhan/lucene/fateha.txt"); if (url == null) { fail("Not able to load data file!"); }// w w w . j a va 2s . co m Files.lines(new File(url.toURI()).toPath()) .forEach(line -> addDoc(w, line, String.valueOf(id.incrementAndGet()))); w.close(); String querystr = ""; Query q = new QueryParser("title", analyzer).parse(querystr); int hitsPerPage = 10; IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(q, hitsPerPage); //print(searcher, docs); assertEquals(2, docs.scoreDocs.length); }
From source file:com.github.msarhan.lucene.ArabicRootExtractorAnalyzerTests.java
License:Open Source License
@Test public void testInlineStemmer() throws IOException, ParseException { //Initialize the index Directory index = new RAMDirectory(); Analyzer analyzer = new ArabicRootExtractorAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(analyzer); IndexWriter writer = new IndexWriter(index, config); Document doc = new Document(); doc.add(new StringField("number", "1", Field.Store.YES)); doc.add(new TextField("title", "?? ? ? ??", Field.Store.YES));//from www . j a va2s .c o m writer.addDocument(doc); doc = new Document(); doc.add(new StringField("number", "2", Field.Store.YES)); doc.add(new TextField("title", "? ?? ? ?", Field.Store.YES)); writer.addDocument(doc); doc = new Document(); doc.add(new StringField("number", "3", Field.Store.YES)); doc.add(new TextField("title", "? ??", Field.Store.YES)); writer.addDocument(doc); writer.close(); //~ //Query the index String queryStr = ""; Query query = new QueryParser("title", analyzer).parse(queryStr); int hitsPerPage = 5; IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(query, hitsPerPage, Sort.INDEXORDER); ScoreDoc[] hits = docs.scoreDocs; //~ //Print results /* System.out.println("Found " + hits.length + " hits:"); for (ScoreDoc hit : hits) { int docId = hit.doc; Document d = searcher.doc(docId); System.out.printf("\t(%s): %s\n", d.get("number"), d.get("title")); } */ //~ }
From source file:com.github.rnewson.couchdb.lucene.Search.java
License:Apache License
public static void main(final String[] args) { Utils.LOG.info("searcher started."); try {//from ww w . java2 s .c om IndexReader reader = null; IndexSearcher searcher = null; final Scanner scanner = new Scanner(System.in); while (scanner.hasNextLine()) { if (reader == null) { // Open a reader and searcher if index exists. if (IndexReader.indexExists(Config.INDEX_DIR)) { reader = IndexReader.open(NIOFSDirectory.getDirectory(Config.INDEX_DIR), true); searcher = new IndexSearcher(reader); } } final String line = scanner.nextLine(); // Process search request if index exists. if (searcher == null) { System.out.println(Utils.error(503, "couchdb-lucene not available.")); continue; } final JSONObject obj; try { obj = JSONObject.fromObject(line); } catch (final JSONException e) { System.out.println(Utils.error(400, "invalid JSON.")); continue; } if (!obj.has("query")) { System.out.println(Utils.error(400, "No query found in request.")); continue; } final JSONObject query = obj.getJSONObject("query"); final boolean reopen = !"ok".equals(query.optString("stale", "not-ok")); // Refresh reader and searcher if necessary. if (reader != null && reopen) { final IndexReader newReader = reader.reopen(); if (reader != newReader) { Utils.LOG.info("Lucene index was updated, reopening searcher."); final IndexReader oldReader = reader; reader = newReader; searcher = new IndexSearcher(reader); oldReader.close(); } } try { // A query. if (query.has("q")) { final JSONArray path = obj.getJSONArray("path"); if (path.size() < 3) { System.out.println(Utils.error(400, "No design document in path.")); continue; } if (path.size() < 4) { System.out.println(Utils.error(400, "No view name in path.")); } if (path.size() > 4) { System.out.println(Utils.error(400, "Extra path info in request.")); } assert path.size() == 4; final SearchRequest request = new SearchRequest(obj); final String result = request.execute(searcher); System.out.println(result); continue; } // info. if (query.keySet().isEmpty()) { final JSONObject json = new JSONObject(); json.put("current", reader.isCurrent()); json.put("disk_size", size(reader.directory())); json.put("doc_count", reader.numDocs()); json.put("doc_del_count", reader.numDeletedDocs()); final JSONArray fields = new JSONArray(); for (final Object field : reader.getFieldNames(FieldOption.INDEXED)) { if (((String) field).startsWith("_")) continue; fields.add(field); } json.put("fields", fields); json.put("last_modified", IndexReader.lastModified(Config.INDEX_DIR)); json.put("optimized", reader.isOptimized()); final JSONObject info = new JSONObject(); info.put("code", 200); info.put("json", json); final JSONObject headers = new JSONObject(); headers.put("Content-Type", "text/plain"); info.put("headers", headers); System.out.println(info); } } catch (final Exception e) { System.out.println(Utils.error(400, e)); } System.out.println(Utils.error(400, "Bad request.")); } if (reader != null) { reader.close(); } } catch (final Exception e) { System.out.println(Utils.error(500, e.getMessage())); } Utils.LOG.info("searcher stopped."); }
From source file:com.github.s4ke.moar.lucene.query.test.BaseLuceneTest.java
License:Open Source License
public void assertHits(Query query, int hitCount) throws IOException { try (IndexReader ir = DirectoryReader.open(d)) { IndexSearcher searcher = new IndexSearcher(ir); TopDocs td = searcher.search(query, 10); assertEquals("hitCount didn't match expected hit count", hitCount, td.totalHits); }/*from w w w . j av a 2 s . c o m*/ }
From source file:com.github.s4ke.moar.lucene.query.test.MoarQueryPerfTest.java
License:Open Source License
@Test public void testComparison() throws IOException { this.setupComparisonData(); try (IndexReader ir = DirectoryReader.open(d)) { IndexSearcher is = new IndexSearcher(ir); Perf perf = new Perf(true); for (int i = 0; i < 1000; ++i) { String wordOfChoice = WORDS.get(this.random.nextInt(WORDS.size())); wordOfChoice = wordOfChoice.substring(0, this.random.nextInt(wordOfChoice.length() - 1) + 1); wordOfChoice += ".*"; System.out.println(wordOfChoice); {/*from w w w . ja v a 2s . c o m*/ perf.pre(); MoaPattern pattern = MoaPattern.compile(wordOfChoice); MoarQuery tq = new MoarQuery("tag", pattern); TopDocs td = is.search(tq, 10); System.out.println(td.totalHits + " moar query hits"); perf.after(); perf.report("searching with moar"); } { RegexpQuery regexpQuery = new RegexpQuery(new Term("tag", wordOfChoice)); perf.pre(); TopDocs td = is.search(regexpQuery, 10); System.out.println(td.totalHits + " regexp query hits"); perf.after(); perf.report("searching with regexp"); } } } }
From source file:com.github.tenorviol.gitsearch.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void search(CommandLine cl) throws Exception { String index = "index"; String field = "contents"; String queries = null;// w w w .j a va2 s .co m int repeat = 0; boolean raw = false; String queryString = ""; int hitsPerPage = 25; Iterator it = cl.commandArgs.iterator(); while (it.hasNext()) { queryString += " " + it.next(); } IndexReader reader = IndexReader.open(FSDirectory.open(new File(cl.indexPath))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31); QueryParser parser = new QueryParser(Version.LUCENE_31, field, analyzer); String line = queryString; line = line.trim(); Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(searcher, query, hitsPerPage); searcher.close(); reader.close(); }
From source file:com.github.tteofili.apacheconeu14.oak.search.nls.IndexUtils.java
License:Apache License
public static IndexSearcher getSearcher() { if (directory == null) { directory = openDir();//w w w .j ava 2s.c o m } try { return new IndexSearcher(DirectoryReader.open(directory)); } catch (Exception e) { log.error("could not create index searcher", e); } return null; }
From source file:com.github.tteofili.looseen.MinHashClassifier.java
License:Apache License
@Override public ClassificationResult<BytesRef> assignClass(String text) throws IOException { DirectoryReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); try {/*from www .j a v a2 s .co m*/ int k = 3; TopDocs topDocs = searcher.search(buildQuery(TEXT_FIELD, text, min, hashCount, hashSize), k); if (topDocs.totalHits > 0) { return buildListFromTopDocs(searcher, CLASS_FIELD, topDocs, k).get(0); // Document document = reader.document(topDocs.scoreDocs[0].doc); // String category = document.getField(CLASS_FIELD).stringValue(); // return new ClassificationResult<>(new BytesRef(category), topDocs.getMaxScore()); } else { return null; } } finally { reader.close(); } }
From source file:com.github.wxiaoqi.search.lucene.LuceneDao.java
License:Open Source License
public TableResultResponse<IndexObject> page(Integer pageNumber, Integer pageSize, String keyword) { IndexReader indexReader = null;//ww w . j a va 2s . co m TableResultResponse<IndexObject> pageQuery = null; List<IndexObject> searchResults = new ArrayList<>(); try { indexReader = DirectoryReader.open(this.getDirectory()); IndexSearcher indexSearcher = new IndexSearcher(indexReader); Query query = QueryUtil.query(keyword, this.getAnalyzer(), "title", "descripton"); ScoreDoc lastScoreDoc = this.getLastScoreDoc(pageNumber, pageSize, query, indexSearcher); /*?documentsearchAfter */ TopDocs topDocs = indexSearcher.searchAfter(lastScoreDoc, query, pageSize); Highlighter highlighter = this.addStringHighlighter(query); log.info("??{}", keyword); log.info("{}", topDocs.totalHits); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { int docID = scoreDoc.doc; float score = scoreDoc.score; Document document = indexSearcher.doc(docID); IndexObject indexObject = DocumentUtil.document2IndexObject(this.getAnalyzer(), highlighter, document, score); searchResults.add(indexObject); log.info("" + score); } Collections.sort(searchResults); pageQuery = new TableResultResponse<>(topDocs.totalHits, searchResults); } catch (Exception e) { e.printStackTrace(); } finally { try { indexReader.close(); } catch (IOException e) { e.printStackTrace(); } } return pageQuery; }