List of usage examples for org.apache.lucene.index IndexReader close
@Override public final synchronized void close() throws IOException
From source file:com.xx.platform.web.listener.WebApplicationContextListener.java
private void doFlushDelDoc() { // TODO Auto-generated method stub LOG.info("......"); IndexReader reader = null; try {/*from ww w . j av a 2 s. c o m*/ List<String> delList = BerkeleyDB.getDelDocQuery(); File file = new File(SearchContext.search_dir + File.separator + "index"); if (!file.exists()) { LOG.info(""); SearchContext.isInit = true; return; } Directory directory = FSDirectory.getDirectory(file, false); reader = IndexReader.open(directory); if (reader.isLocked(SearchContext.search_dir + File.separator + "index")) { LOG.info("JVM"); reader.unlock(FSDirectory.getDirectory(SearchContext.search_dir + File.separator + "index")); if (!reader.isLocked(SearchContext.search_dir + File.separator + "index")) LOG.info(""); else { LOG.info(","); NutchCommand.setCrawl(false); throw new IOException(","); } } if (delList != null) { for (String ls : delList) { reader.deleteDocument(Integer.valueOf(ls)); } LOG.info(""); } } catch (Exception e) { LOG.info("" + e.getMessage()); // TODO Auto-generated catch block e.printStackTrace(); } finally { try { if (reader != null) { boolean isOp = !reader.isOptimized(); reader.close(); if (isOp) { optimize(); } } } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } SearchContext.isInit = true; }
From source file:com.yangxu.searchengine.search.SearchFiles.java
License:Apache License
public void makeSearch(String field, String queries, int hitsPerPage) throws Exception { int repeat = 10; boolean raw = false; String queryString = null;//w ww . j av a 2s. c om if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } IndexReader reader = IndexReader.open(FSDirectory.open(new File(indexPath))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SmartChineseAnalyzer(Version.LUCENE_31); // BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_31, field, analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } searcher.close(); reader.close(); }
From source file:com.yangxu.searchengine.service.impl.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/java/4_0/demo.html for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*from w w w.jav a2s . c o m*/ } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } IndexReader reader = IndexReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_31, field, analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } searcher.close(); reader.close(); }
From source file:com.Yasna.forum.database.DbSearchIndexer.java
License:Open Source License
/** * Deletes a message from the index.//from w w w . j av a 2 s . co m */ protected final void deleteMessagesFromIndex(int[] messages) throws IOException { if (messages == null) { return; } IndexReader reader = getReader(); if (reader == null) { //Reader will be null if the search index doesn't exist. return; } Term messageIDTerm; for (int i = 0; i < messages.length; i++) { messageIDTerm = new Term("messageID", Integer.toString(messages[i])); try { reader.delete(messageIDTerm); } catch (Exception e) { } } try { reader.close(); } catch (Exception e) { } }
From source file:com.zimbra.cs.index.RawIndexEditor.java
License:Open Source License
void dumpAll() throws IOException { IndexReader reader = IndexReader.open(luceneDirectory); try {//from www. j a v a2 s. co m int maxDoc = reader.maxDoc(); System.out.println("There are " + maxDoc + " documents in this index."); for (int i = 0; i < maxDoc; i++) { dumpDocument(reader.document(i), reader.isDeleted(i)); } } finally { reader.close(); } }
From source file:com.zimbra.cs.rmgmt.RemoteMailQueue.java
License:Open Source License
public SearchResult search(Query query, int offset, int limit) throws ServiceException { SearchResult result = new SearchResult(); IndexReader indexReader = null; try {//from ww w .java 2 s . c om if (!mIndexPath.exists()) { return result; } indexReader = IndexReader.open(LuceneDirectory.open(mIndexPath)); summarize(result, indexReader); if (query == null) { list0(result, indexReader, offset, limit); } else { search0(result, indexReader, query, offset, limit); } } catch (Exception e) { throw ServiceException.FAILURE("exception occurred searching mail queue", e); } finally { if (indexReader != null) { try { indexReader.close(); } catch (IOException ioe) { ZimbraLog.rmgmt.warn("exception occured closing index reader from search", ioe); } } } return result; }
From source file:com.zimbra.cs.rmgmt.RemoteMailQueue.java
License:Open Source License
public void action(Server server, QueueAction action, String[] ids) throws ServiceException { if (ZimbraLog.rmgmt.isDebugEnabled()) ZimbraLog.rmgmt.debug("action=" + action + " ids=" + Arrays.deepToString(ids) + " " + this); RemoteManager rm = RemoteManager.getRemoteManager(server); IndexReader indexReader = null; try {//from w ww . j a va 2s.c o m boolean all = false; if (ids.length == 1 && ids[0].equals("ALL")) { // Special case ALL that postsuper supports clearIndex(); all = true; } else { indexReader = IndexReader.open(LuceneDirectory.open(mIndexPath), false); } int done = 0; int total = ids.length; while (done < total) { int last = Math.min(total, done + MAX_REMOTE_EXECUTION_QUEUEIDS); StringBuilder sb = new StringBuilder(128 + (last * MAX_LENGTH_OF_QUEUEIDS)); sb.append("zmqaction " + action.toString() + " " + mQueueName + " "); int i; boolean first = true; for (i = done; i < last; i++) { if (first) { first = false; } else { sb.append(","); } if (!all) { Term toDelete = new Term(QueueAttr.id.toString(), ids[i].toLowerCase()); int numDeleted = indexReader.deleteDocuments(toDelete); mNumMessages.getAndAdd(-numDeleted); if (ZimbraLog.rmgmt.isDebugEnabled()) ZimbraLog.rmgmt.debug("deleting term:" + toDelete + ", docs deleted=" + numDeleted); } sb.append(ids[i].toUpperCase()); } done = last; rm.execute(sb.toString()); } } catch (IOException ioe) { throw ServiceException.FAILURE("exception occurred performing queue action", ioe); } finally { if (indexReader != null) { try { indexReader.close(); } catch (IOException ioe) { ZimbraLog.rmgmt.warn("exception occured closing index reader during action", ioe); } } } }
From source file:com.zsq.lucene.chapter1.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*from ww w . ja v a 2 s. com*/ } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } Paths.get(index); IndexReader reader = DirectoryReader.open(FSDirectory.open(null)); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43); BufferedReader in = null; if (queries != null) { in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8); } else { in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)); } QueryParser parser = new QueryParser(Version.LUCENE_43, field, analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } reader.close(); }
From source file:concurrency.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/* ww w .j ava 2s . c om*/ } String index = "/Users/rene/learn/topic-index"; //"/Users/rene/learn/learn5/lucene/finder/index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; // regular search // String queryString = "computer"; String queryString = "viewresolver spring"; //"fitness";// "ExecutorService";//"EventListener"; // //"Country"; //"Preconditions"; // wildcard query // String queryString = "te*t"; // fuzzy query // String queryString = "roam~2"; // phrase query // String queryString = "\"apache lucene\"~5"; // boolean search // String queryString = "\"networks\" AND \"protocol\""; // boosted search // String queryString = "computer^10 crime"; int hitsPerPage = 100; IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); BufferedReader in = null; QueryParser parser = new QueryParser(field, analyzer); Query query = parser.parse(queryString); System.out.println("Searching for: " + query.toString(field)); searcher.search(query, null, 1000); //hitsPerPage); //100); doSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); reader.close(); }
From source file:control.Search.java
/** * Search for previously indexed feeds through 'title' and 'description' fields, according to a query * //from www .j a v a 2s . c o m * @param query terms to be considered in the search * @return a JSON representation of the retrieved feeds * @throws ParseException query parsing failure * @throws IOException I/O issue when creating index */ public String queryIndexedFeeds(String query) throws ParseException, IOException { //creates IndexReader with analyzers IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); StandardAnalyzer analyzer = new StandardAnalyzer(); MultiFieldQueryParser queryParser = new MultiFieldQueryParser(new String[] { "title", "description" }, analyzer); //search for documents TopDocs docs = searcher.search(queryParser.parse(query), 25); ScoreDoc[] hits = docs.scoreDocs; //iterate over results and put on JSON format JSONArray jsonArray = new JSONArray(); for (int i = 0; i < hits.length; i++) { int docId = hits[i].doc; Document d = searcher.doc(docId); //create new json object JSONObject json = new JSONObject(); json.put("id", d.get("id")); json.put("link", d.get("link")); json.put("title", d.get("title")); json.put("description", d.get("description")); jsonArray.put(json); } reader.close(); String ret = jsonArray.toString(); return ret; }