List of usage examples for org.apache.lucene.index DirectoryReader open
public static DirectoryReader open(final IndexCommit commit) throws IOException
From source file:com.github.flaxsearch.resources.IndexResourceTestBase.java
License:Apache License
@BeforeClass public static void setup() throws IOException { directory = Fixtures.openDirectory(); reader = DirectoryReader.open(directory); }
From source file:com.github.flaxsearch.util.FSReaderManager.java
License:Apache License
public FSReaderManager(String indexPath) throws IOException { this.directory = FSDirectory.open(Paths.get(indexPath)); this.reader = DirectoryReader.open(directory); }
From source file:com.github.jiloc.USTweetsAnalyzer.Analyzer_Index.java
public Analyzer_Index(Directory dir) throws IOException { ir = DirectoryReader.open(dir); searcher = new IndexSearcher(ir); // universe = new HashMap<String, HashSet<String>>(); }
From source file:com.github.lucene.store.database.DatabaseDirectoryITest.java
License:Apache License
@Test(expected = IndexNotFoundException.class) public void whenIndexIsEmptyAndOpenIndexReader_shouldThrowIndexNotFoundException() throws IOException { DirectoryReader.open(directory); }
From source file:com.github.mosuka.apache.lucene.example.cmd.SearchCommand.java
License:Apache License
@Override public void execute(Map<String, Object> attrs) { Map<String, Object> responseMap = new LinkedHashMap<String, Object>(); String responseJSON = null;// w w w.j a va 2s . c o m Directory indexDir = null; IndexReader reader = null; try { String index = (String) attrs.get("index"); String queryStr = (String) attrs.get("query"); indexDir = FSDirectory.open(new File(index).toPath()); QueryParser queryParser = new QueryParser("text", new JapaneseAnalyzer()); Query query = queryParser.parse(queryStr); reader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(query, 10); List<Map<String, Object>> documentList = new LinkedList<Map<String, Object>>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { Document document = searcher.doc(scoreDoc.doc); Map<String, Object> documentMap = new LinkedHashMap<String, Object>(); for (IndexableField f : document.getFields()) { documentMap.put(f.name(), f.stringValue()); } documentMap.put("score", scoreDoc.score); documentList.add(documentMap); } responseMap.put("status", 0); responseMap.put("message", "OK"); responseMap.put("totalHits", topDocs.totalHits); responseMap.put("maxScore", topDocs.getMaxScore()); responseMap.put("result", documentList); } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } catch (ParseException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } finally { try { if (reader != null) { reader.close(); } } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } try { if (indexDir != null) { indexDir.close(); } } catch (IOException e) { responseMap.put("status", 1); responseMap.put("message", e.getMessage()); } } try { ObjectMapper mapper = new ObjectMapper(); responseJSON = mapper.writeValueAsString(responseMap); } catch (IOException e) { responseJSON = String.format("{\"status\":1, \"message\":\"%s\"}", e.getMessage()); } System.out.println(responseJSON); }
From source file:com.github.msarhan.lucene.ArabicRootExtractorAnalyzerTests.java
License:Open Source License
@Test public void testArabicRootIndex() throws IOException, ParseException, URISyntaxException { Directory index = new RAMDirectory(); ArabicRootExtractorAnalyzer analyzer = new ArabicRootExtractorAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(analyzer); final AtomicInteger id = new AtomicInteger(0); IndexWriter w = new IndexWriter(index, config); URL url = ArabicRootExtractorStemmer.class.getClassLoader() .getResource("com/github/msarhan/lucene/fateha.txt"); if (url == null) { fail("Not able to load data file!"); }/*from w w w . ja v a 2 s .c o m*/ Files.lines(new File(url.toURI()).toPath()) .forEach(line -> addDoc(w, line, String.valueOf(id.incrementAndGet()))); w.close(); String querystr = ""; Query q = new QueryParser("title", analyzer).parse(querystr); int hitsPerPage = 10; IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(q, hitsPerPage); //print(searcher, docs); assertEquals(2, docs.scoreDocs.length); }
From source file:com.github.msarhan.lucene.ArabicRootExtractorAnalyzerTests.java
License:Open Source License
@Test public void testInlineStemmer() throws IOException, ParseException { //Initialize the index Directory index = new RAMDirectory(); Analyzer analyzer = new ArabicRootExtractorAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(analyzer); IndexWriter writer = new IndexWriter(index, config); Document doc = new Document(); doc.add(new StringField("number", "1", Field.Store.YES)); doc.add(new TextField("title", "?? ? ? ??", Field.Store.YES));//from w w w. ja va 2s . com writer.addDocument(doc); doc = new Document(); doc.add(new StringField("number", "2", Field.Store.YES)); doc.add(new TextField("title", "? ?? ? ?", Field.Store.YES)); writer.addDocument(doc); doc = new Document(); doc.add(new StringField("number", "3", Field.Store.YES)); doc.add(new TextField("title", "? ??", Field.Store.YES)); writer.addDocument(doc); writer.close(); //~ //Query the index String queryStr = ""; Query query = new QueryParser("title", analyzer).parse(queryStr); int hitsPerPage = 5; IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(query, hitsPerPage, Sort.INDEXORDER); ScoreDoc[] hits = docs.scoreDocs; //~ //Print results /* System.out.println("Found " + hits.length + " hits:"); for (ScoreDoc hit : hits) { int docId = hit.doc; Document d = searcher.doc(docId); System.out.printf("\t(%s): %s\n", d.get("number"), d.get("title")); } */ //~ }
From source file:com.github.s4ke.moar.lucene.query.test.BaseLuceneTest.java
License:Open Source License
public void assertHits(Query query, int hitCount) throws IOException { try (IndexReader ir = DirectoryReader.open(d)) { IndexSearcher searcher = new IndexSearcher(ir); TopDocs td = searcher.search(query, 10); assertEquals("hitCount didn't match expected hit count", hitCount, td.totalHits); }/* ww w . j a va2s. com*/ }
From source file:com.github.s4ke.moar.lucene.query.test.MoarQueryPerfTest.java
License:Open Source License
@Test public void testComparison() throws IOException { this.setupComparisonData(); try (IndexReader ir = DirectoryReader.open(d)) { IndexSearcher is = new IndexSearcher(ir); Perf perf = new Perf(true); for (int i = 0; i < 1000; ++i) { String wordOfChoice = WORDS.get(this.random.nextInt(WORDS.size())); wordOfChoice = wordOfChoice.substring(0, this.random.nextInt(wordOfChoice.length() - 1) + 1); wordOfChoice += ".*"; System.out.println(wordOfChoice); {/*w ww . j av a 2s . c om*/ perf.pre(); MoaPattern pattern = MoaPattern.compile(wordOfChoice); MoarQuery tq = new MoarQuery("tag", pattern); TopDocs td = is.search(tq, 10); System.out.println(td.totalHits + " moar query hits"); perf.after(); perf.report("searching with moar"); } { RegexpQuery regexpQuery = new RegexpQuery(new Term("tag", wordOfChoice)); perf.pre(); TopDocs td = is.search(regexpQuery, 10); System.out.println(td.totalHits + " regexp query hits"); perf.after(); perf.report("searching with regexp"); } } } }
From source file:com.github.tteofili.apacheconeu14.oak.search.nls.IndexUtils.java
License:Apache License
public static IndexSearcher getSearcher() { if (directory == null) { directory = openDir();/*from w w w. j av a2s.c om*/ } try { return new IndexSearcher(DirectoryReader.open(directory)); } catch (Exception e) { log.error("could not create index searcher", e); } return null; }