List of usage examples for org.apache.lucene.index IndexReader close
@Override public final synchronized void close() throws IOException
From source file:aos.lucene.search.msc.BasicSearchingTest.java
License:Apache License
public void testTerm() throws Exception { Directory dir = FSDirectory.open(new File("target/index")); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); Term t = new Term("subject", "ant"); Query query = new TermQuery(t); TopDocs docs = searcher.search(query, 10); assertEquals("Ant in Action", 1, docs.totalHits); t = new Term("subject", "junit"); docs = searcher.search(new TermQuery(t), 10); assertEquals("Ant in Action, " + "JUnit in Action, Second Edition", 2, docs.totalHits); reader.close(); dir.close();// w w w . j a v a 2 s . c o m }
From source file:aos.lucene.search.msc.Fragments.java
License:Apache License
public void nrtReader() throws Exception { IndexReader reader = null; IndexSearcher searcher;//from w w w .ja va2s .c om IndexReader newReader = reader.reopen(); if (reader != newReader) { reader.close(); reader = newReader; searcher = new IndexSearcher(reader); } }
From source file:aos.lucene.search.msc.NearRealTimeTest.java
License:Apache License
public void testNearRealTime() throws Exception { Directory dir = new RAMDirectory(); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_46), IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 10; i++) { Document doc = new Document(); doc.add(new Field("id", "" + i, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS)); doc.add(new Field("text", "aaa", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc);// www . j a v a2s. c om } IndexReader reader = writer.getReader(); IndexSearcher searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("text", "aaa")); TopDocs docs = searcher.search(query, 1); assertEquals(10, docs.totalHits); writer.deleteDocuments(new Term("id", "7")); Document doc = new Document(); doc.add(new Field("id", "11", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS)); doc.add(new Field("text", "bbb", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); IndexReader newReader = reader.reopen(); assertFalse(reader == newReader); reader.close(); searcher = new IndexSearcher(newReader); TopDocs hits = searcher.search(query, 10); assertEquals(9, hits.totalHits); query = new TermQuery(new Term("text", "bbb")); hits = searcher.search(query, 1); assertEquals(1, hits.totalHits); newReader.close(); writer.close(); }
From source file:aos.lucene.tools.BooksMoreLikeThis.java
License:Apache License
public static void main(String[] args) throws Throwable { String indexDir = System.getProperty("index.dir"); FSDirectory directory = FSDirectory.open(new File(indexDir)); IndexReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); int numDocs = reader.maxDoc(); MoreLikeThis mlt = new MoreLikeThis(reader); mlt.setFieldNames(new String[] { "title", "author" }); mlt.setMinTermFreq(1);//from w w w .j ava2s. co m mlt.setMinDocFreq(1); for (int docID = 0; docID < numDocs; docID++) { LOGGER.info(); Document doc = reader.document(docID); LOGGER.info(doc.get("title")); Query query = mlt.like(docID); LOGGER.info(" query=" + query); TopDocs similarDocs = searcher.search(query, 10); if (similarDocs.totalHits == 0) LOGGER.info(" None like this"); for (int i = 0; i < similarDocs.scoreDocs.length; i++) { if (similarDocs.scoreDocs[i].doc != docID) { doc = reader.document(similarDocs.scoreDocs[i].doc); LOGGER.info(" -> " + doc.getField("title").stringValue()); } } } reader.close(); directory.close(); }
From source file:aos.lucene.tools.CreateSpellCheckerIndex.java
License:Apache License
public static void main(String[] args) throws IOException { if (args.length != 3) { LOGGER.info("Usage: java lia.tools.SpellCheckerTest SpellCheckerIndexDir IndexDir IndexField"); System.exit(1);/*from ww w . ja va2 s . c om*/ } String spellCheckDir = args[0]; String indexDir = args[1]; String indexField = args[2]; LOGGER.info("Now build SpellChecker index..."); Directory dir = FSDirectory.open(new File(spellCheckDir)); SpellChecker spell = new SpellChecker(dir); //#A long startTime = System.currentTimeMillis(); Directory dir2 = FSDirectory.open(new File(indexDir)); IndexReader r = DirectoryReader.open(dir2); //#B try { spell.indexDictionary(new LuceneDictionary(r, indexField)); //#C } finally { r.close(); } dir.close(); dir2.close(); long endTime = System.currentTimeMillis(); LOGGER.info(" took " + (endTime - startTime) + " milliseconds"); }
From source file:aplicacion.sistema.indexer.test.DeleteFiles.java
License:Apache License
/** Deletes documents from an index that do not contain a term. */ public static void main(String[] args) { String usage = "java org.apache.lucene.demo.DeleteFiles <unique_term>"; if (args.length == 0) { System.err.println("Usage: " + usage); System.exit(1);//ww w. ja va 2 s . c om } try { Directory directory = FSDirectory.open(new File("index")); IndexReader reader = IndexReader.open(directory, false); // we don't want read-only because we are about to delete Term term = new Term("path", args[0]); int deleted = reader.deleteDocuments(term); System.out.println("deleted " + deleted + " documents containing " + term); // one can also delete documents by their internal id: /* for (int i = 0; i < reader.maxDoc(); i++) { System.out.println("Deleting document with id " + i); reader.delete(i); }*/ reader.close(); directory.close(); } catch (Exception e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
From source file:aplicacion.sistema.indexer.test.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]"; usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*from w ww . j av a 2s . c o m*/ } String index = "e:/index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String normsField = null; boolean paging = true; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-norms".equals(args[i])) { normsField = args[i + 1]; i++; } else if ("-paging".equals(args[i])) { if (args[i + 1].equals("false")) { paging = false; } else { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage == 0) { paging = false; } } i++; } } IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true if (normsField != null) reader = new OneNormsReader(reader, normsField); Searcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new FileReader(queries)); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer); while (true) { if (queries == null) // prompt the user System.out.println("Enter query: "); String line = in.readLine(); if (line == null || line.length() == -1) break; line = line.trim(); if (line.length() == 0) break; Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } if (paging) { doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null); } else { doStreamingSearch(searcher, query); } } reader.close(); }
From source file:app.finder.topicsource.service.SearchFiles.java
License:Apache License
public List<TopicSource> getTopicSources(String queryString) throws IOException, ParseException { String field = "contents"; String queries = null;/*from w ww . jav a 2 s . c o m*/ int repeat = 0; boolean raw = false; int hitsPerPage = SEARCH_MAX_SIZE; // 100; IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(indexDir))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); BufferedReader in = null; QueryParser parser = new QueryParser(field, analyzer); Query query = parser.parse(queryString); //System.out.println("Searching for: " + query.toString(field)); searcher.search(query, null, SEARCH_MAX_SIZE); List<String> list = doSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); reader.close(); List<TopicSource> topicSourceList = new ArrayList<TopicSource>(); TopicSource topicSource = null; int counter = 0; for (String fileName : list) { topicSource = new TopicSource(); File file = new File(fileName); topicSource.setFileName("" + (++counter) + ". " + file.getName()); topicSource.setPath(file.getCanonicalPath()); topicSource.setText(readFile(file)); topicSourceList.add(topicSource); } return topicSourceList; }
From source file:app.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*from ww w .j a v a 2s . co m*/ } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = true; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } reader.close(); }
From source file:Application.mediaIndexer.java
public static void SearchFiles(String index, String queryString, String selected, TextArea results) throws IOException, ParseException { IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); BufferedReader in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)); QueryParser parser = new QueryParser(selected, analyzer); String line = queryString != null ? queryString : in.readLine(); line = line.trim();//from ww w .j a v a2 s . c om Query query = parser.parse(line); int maxHits = 100; TopDocs docsResults = searcher.search(query, maxHits); ScoreDoc[] hits = docsResults.scoreDocs; for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); results.appendText("Title: " + doc.get("title") + "\n"); results.appendText("Artists: " + doc.get("xmpDM:artist") + "\n"); results.appendText("Genre: " + doc.get("xmpDM:genre") + "\n"); results.appendText("Year: " + doc.get("xmpDM:releaseDate") + "\n"); } // Playlist. playlist.clear(); for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) playlist.add(new File(path)); } reader.close(); }