List of usage examples for org.apache.lucene.index IndexReader close
@Override public final synchronized void close() throws IOException
From source file:ReadFiles.java
License:Apache License
public static Result doRandFetch(String path, DIRTYPE type, IndexReader ir, int randfetchnr) throws IOException { IndexReader reader; Result r = new Result(); long beginTs, endTs; if (ir != null) reader = ir;// w ww . j a v a2s . c o m else { beginTs = System.currentTimeMillis(); switch (type) { default: case MMAP: reader = DirectoryReader.open(MMapDirectory.open(new File(path))); break; case NIO: reader = DirectoryReader.open(NIOFSDirectory.open(new File(path))); break; case SIMPLE: reader = DirectoryReader.open(SimpleFSDirectory.open(new File(path))); break; } endTs = System.currentTimeMillis(); r.initTs += endTs - beginTs; r.initTsNr += 1; } System.out.println("-----RandFt it------"); try { Thread.sleep(10000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } // Randomized the fetch Random rand = new Random(); beginTs = System.currentTimeMillis(); int maxDoc = reader.maxDoc(); if (randfetchnr > 0) maxDoc = randfetchnr; for (int i = 0; i < maxDoc; i++) { Document doc = reader.document(rand.nextInt(maxDoc)); doc.get("foo"); doc.get("bar"); //System.out.println("Key: " + doc.get("foo") + ", Value: " + doc.get("bar")); } endTs = System.currentTimeMillis(); r.fetchTs += endTs - beginTs; r.fetchTsNr += maxDoc; if (ir == null) { beginTs = System.currentTimeMillis(); reader.close(); endTs = System.currentTimeMillis(); r.closeTs += endTs - beginTs; r.closeTsNr += 1; } return r; }
From source file:ReadFiles.java
License:Apache License
public static Result doRepOpenClose(String path, DIRTYPE type, long nr) throws IOException { IndexReader reader; Result r = new Result(); long beginTs, endTs; System.out.println("-----Open/Close it------"); for (int i = 0; i < nr; i++) { beginTs = System.currentTimeMillis(); switch (type) { default://from w w w .jav a 2s . c o m case MMAP: reader = DirectoryReader.open(MMapDirectory.open(new File(path))); break; case NIO: reader = DirectoryReader.open(NIOFSDirectory.open(new File(path))); break; case SIMPLE: reader = DirectoryReader.open(SimpleFSDirectory.open(new File(path))); break; } endTs = System.currentTimeMillis(); r.initTs += endTs - beginTs; r.initTsNr += 1; beginTs = System.currentTimeMillis(); reader.close(); endTs = System.currentTimeMillis(); r.closeTs += endTs - beginTs; r.closeTsNr += 1; } return r; }
From source file:SearchFilesTest.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String index = args[0];/*from ww w .ja va 2 s. c om*/ String field = "contents"; String queries = "resources/query.txt"; String queryString = null; int hitsPerPage = 10; IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); File stopWordsFile = new File("resources/stop.txt"); CharArraySet stopWordsCharArraySet = WordlistLoader.getWordSet(new FileReader(stopWordsFile), Version.LUCENE_47); Analyzer analyzer = new RomanianAnalyzerUsingAnotherConstructorForStopwordAnalyzer(Version.LUCENE_47, stopWordsCharArraySet); BufferedReader in = null; in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), codification)); QueryParser parser = new QueryParser(Version.LUCENE_47, field, analyzer); while (true) { String line = in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Looking for: " + query.toString(field)); doPagingSearch(in, searcher, query); if (queryString != null) { break; } } reader.close(); }
From source file:LuceneSearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void search(String phrase, String field, int hitsPerPage) { try {/*from ww w . j av a2 s . co m*/ IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(m_index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer); Query query = parser.parse(phrase); System.out.println("Searching for: " + query.toString(field)); searcher.search(query, null, hitsPerPage); TopDocs results = searcher.search(query, hitsPerPage); ScoreDoc[] hits = results.scoreDocs; for (ScoreDoc hit : hits) { Document doc = searcher.doc(hit.doc); //String path = doc.get("path"); String title = doc.get("title"); System.out.println(hit.score + " -" + title); } reader.close(); } catch (IOException e) { } catch (ParseException e) { } }
From source file:TfIdfViewer.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava QueryConvert [-index dir]"; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/* w w w . j a va 2 s . co m*/ } String index = "index"; String field = "contents"; String queries = null; String queryString = null; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } } // create a reader and a searcher for the index IndexReader reader = IndexReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); // create the reader from where we'll read filenames BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } while (true) { // get two filenames System.out.println("Enter filename 1 (or hit <RETURN>): "); String f1 = in.readLine(); if (f1 == null || f1.length() == -1) break; f1 = f1.trim(); if (f1.length() == 0) break; System.out.println("Enter filename 2: "); String f2 = in.readLine(); // get the docId's of the two filenames in the index int id1 = findDocId(searcher, f1); if (id1 < 0) { System.out.println("No file " + f1 + " found in index!"); break; } int id2 = findDocId(searcher, f2); if (id1 < 0) { System.out.println("No file " + f1 + " found in index!"); break; } // convert them to tf-idf format TermWeight[] v1 = toTfIdf(reader, id1); TermWeight[] v2 = toTfIdf(reader, id2); // print them out, System.out.println("-----------------------"); printTermWeightVector(v1); System.out.println("-----------------------"); printTermWeightVector(v2); System.out.println("-----------------------"); // and print their cosine similarity System.out.println("The cosine similarity of the two files is: " + cosineSimilarity(v1, v2)); } searcher.close(); reader.close(); }
From source file:action.indexing.IndexingTest.java
License:Apache License
public void testIndexReader() throws IOException { IndexReader reader = IndexReader.open(directory); assertEquals(ids.length, reader.maxDoc()); //8 assertEquals(ids.length, reader.numDocs()); //8 reader.close(); }
From source file:antnlp.opie.indexsearch.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);// w ww . j ava 2s. c om } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); //Analyzer analyzer = new StandardAnalyzer(); //Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); Analyzer analyzer = new WhitespaceAnalyzer(); BufferedReader in = null; if (queries != null) { in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8); } else { in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)); } //QueryParser parser = new QueryParser(field, analyzer); QueryBuilder builder = new QueryBuilder(analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } //Query query = parser.parse(line); Query query = builder.createPhraseQuery(field, line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } reader.close(); }
From source file:aos.lucene.search.advanced.BooksLikeThis.java
License:Apache License
public static void main(String[] args) throws IOException { Directory dir = TestUtil.getBookIndexDirectory(); IndexReader reader = DirectoryReader.open(dir); int numDocs = reader.maxDoc(); BooksLikeThis blt = new BooksLikeThis(reader); for (int i = 0; i < numDocs; i++) { // LOGGER.info();// w w w . ja v a 2 s . com Document doc = reader.document(i); LOGGER.info(doc.get("title")); Document[] docs = blt.docsLike(i, 10); // if (docs.length == 0) { LOGGER.info(" None like this"); } for (Document likeThisDoc : docs) { LOGGER.info(" -> " + likeThisDoc.get("title")); } } reader.close(); dir.close(); }
From source file:aos.lucene.search.advanced.FunctionQueryTest.java
License:Apache License
public void testRecency() throws Throwable { Directory dir = TestUtil.getBookIndexDirectory(); IndexReader r = DirectoryReader.open(dir); IndexSearcher s = new IndexSearcher(r); s.setDefaultFieldSortScoring(true, true); QueryParser parser = new QueryParser(Version.LUCENE_46, "contents", new StandardAnalyzer(Version.LUCENE_46)); Query q = parser.parse("java in action"); // #A Query q2 = new RecencyBoostingQuery(q, // #B 2.0, 2 * 365, "pubmonthAsDay"); Sort sort = new Sort(new SortField[] { SortField.FIELD_SCORE, new SortField("title2", SortField.STRING) }); TopDocs hits = s.search(q2, null, 5, sort); for (int i = 0; i < hits.scoreDocs.length; i++) { Document doc = r.document(hits.scoreDocs[i].doc); LOGGER.info((1 + i) + ": " + doc.get("title") + ": pubmonth=" + doc.get("pubmonth") + " score=" + hits.scoreDocs[i].score); }/*www .j a v a 2s. c o m*/ s.close(); r.close(); dir.close(); }
From source file:aos.lucene.search.ext.payloads.PayloadsTest.java
License:Apache License
public void testPayloadTermQuery() throws Throwable { addDoc("Hurricane warning", "Bulletin: A hurricane warning was issued at " + "6 AM for the outer great banks"); addDoc("Warning label maker", "The warning label maker is a delightful toy for " + "your precocious seven year old's warning needs"); addDoc("Tornado warning", "Bulletin: There is a tornado warning for " + "Worcester county until 6 PM today"); IndexReader r = writer.getReader(); writer.close();//from w w w. j a v a2 s.c o m IndexSearcher searcher = new IndexSearcher(r); searcher.setSimilarity(new BoostingSimilarity()); Term warning = new Term("contents", "warning"); Query query1 = new TermQuery(warning); LOGGER.info("\nTermQuery results:"); TopDocs hits = searcher.search(query1, 10); TestUtil.dumpHits(searcher, hits); assertEquals("Warning label maker", // #B searcher.doc(hits.scoreDocs[0].doc).get("title")); // #B Query query2 = new PayloadTermQuery(warning, new AveragePayloadFunction()); LOGGER.info("\nPayloadTermQuery results:"); hits = searcher.search(query2, 10); TestUtil.dumpHits(searcher, hits); assertEquals("Warning label maker", // #C searcher.doc(hits.scoreDocs[2].doc).get("title")); // #C r.close(); searcher.close(); }