List of usage examples for org.apache.lucene.index IndexReader close
@Override public final synchronized void close() throws IOException
From source file:main.java.run.SearchFiles.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*from ww w . j ava2 s. c o m*/ } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new BM25Similarity()); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer); while (true) { if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); } String line = queryString != null ? queryString : in.readLine(); if (line == null || line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); for (int i = 0; i < repeat; i++) { searcher.search(query, null, 100); } Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); } doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); if (queryString != null) { break; } } reader.close(); }
From source file:mm.SearchFiles.java
License:Apache License
public static int searchQuery(String q) throws Exception { List<String> result = null; int hits = 0; String queryString = q;//from w w w . j a v a 2s. c o m int hitsPerPage = 100; IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); BufferedReader in = null; QueryParser parser = new QueryParser(field, analyzer); Query query = parser.parse(queryString); System.out.println("Searching for: " + query.toString(field)); searcher.search(query, 100); hits = doSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); reader.close(); return hits; }
From source file:model.searchData.java
public static int[] format(String args) throws ParseException, CorruptIndexException, IOException { int id[] = new int[100]; File indexDir = new File(INDEX_PATH); String query = args;/* ww w. j av a2s . co m*/ int maxHits = 100; IndexReader reader = IndexReader.open(FSDirectory.open(new File(INDEX_PATH))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35); QueryParser parser = new QueryParser(Version.LUCENE_35, FIELD_CONTENTS, analyzer); Query q = parser.parse(query); TopDocs hits = searcher.search(q, maxHits); ScoreDoc[] scoreDocs = hits.scoreDocs; System.out.println("hits=" + scoreDocs.length); System.out.println("Hits (rank,score,docId)"); for (int n = 0; n < scoreDocs.length; ++n) { ScoreDoc sd = scoreDocs[n]; float score = sd.score; int docId = sd.doc; id[n] = docId; System.out.println(""); System.out.printf("%3d %4.2f %d\n", n, score, docId); } reader.close(); return id; }
From source file:moteurrecherche.Controleur.java
public void Search(ActionEvent event) { if (entier == 1) { if (motCle.getText().length() != 0) { Conteneur.getChildren().clear(); SearchFiles sr = new SearchFiles(); System.out.println(motCle.getText().length()); try { Path path = Paths.get("index"); Directory dir = FSDirectory.open(path); IndexReader ireader = DirectoryReader.open(dir); IndexSearcher s = new IndexSearcher(ireader); StandardAnalyzer analyzer = new StandardAnalyzer(); QueryParser parser = new QueryParser("text", analyzer); Query wrappedQuery = new CustomizedScoreQuery(parser.parse(motCle.getText()), ireader); TopDocs topdocs = s.search(wrappedQuery, 20); ArrayList<Resultat> Res = new ArrayList<>(); for (ScoreDoc sd : topdocs.scoreDocs) { Res.add(new Resultat(s.doc(sd.doc).get("text"), s.doc(sd.doc).get("date"), s.doc(sd.doc).get("identifiant"), s.doc(sd.doc).get("auteur"), sd.score, s.doc(sd.doc).get("Social"))); }// w ww.j a v a 2 s . co m ObservableList<Resultat> obs = FXCollections.observableArrayList(Res); TableColumn<Resultat, String> Texte = new TableColumn<>("Texte"); Texte.setCellValueFactory(data -> new SimpleStringProperty(data.getValue().getText())); TableColumn<Resultat, String> auteur = new TableColumn<>("Auteur"); auteur.setCellValueFactory(data -> new SimpleStringProperty(data.getValue().getAuteur())); TableColumn<Resultat, String> Date = new TableColumn<>("Date"); Date.setCellValueFactory(data -> new SimpleStringProperty(data.getValue().getDate())); TableColumn<Resultat, String> Identifiant = new TableColumn<>("Identifiant"); Identifiant.setCellValueFactory( data -> new SimpleStringProperty(data.getValue().getIdentifiant())); TableColumn<Resultat, String> Social = new TableColumn<>("Score social"); Social.setCellValueFactory(data -> new SimpleStringProperty(data.getValue().getScoreSocial())); TableColumn<Resultat, String> Score = new TableColumn<>("Score Final"); Score.setCellValueFactory( data -> new SimpleFloatProperty(data.getValue().getScoreFinal()).asString()); table.getColumns().addAll(auteur, Date, Social, Score, Texte); table.setItems(obs); table.autosize(); ireader.close(); Conteneur.getChildren().add(table); } catch (Exception e) { e.printStackTrace(); } } else { System.out.println("J adore les chatons"); } } }
From source file:narock.HighFreqTerms.java
License:Apache License
public static void main(String[] args) throws Exception { IndexReader reader = null; FSDirectory dir = null;// w w w.j a v a2s .c o m String field = null; boolean IncludeTermFreqs = false; if (args.length == 0 || args.length > 4) { usage(); System.exit(1); } if (args.length > 0) { dir = FSDirectory.open(new File(args[0])); } for (int i = 1; i < args.length; i++) { if (args[i].equals("-t")) { IncludeTermFreqs = true; } else { try { numTerms = Integer.parseInt(args[i]); } catch (NumberFormatException e) { field = args[i]; } } } String[] fields = field != null ? new String[] { field } : null; reader = DirectoryReader.open(dir); TermStats[] terms = getHighFreqTerms(reader, numTerms, fields); if (!IncludeTermFreqs) { //default HighFreqTerms behavior for (int i = 0; i < terms.length; i++) { System.out.printf("%s:%s %,d \n", terms[i].field, terms[i].termtext.utf8ToString(), terms[i].docFreq); } } else { TermStats[] termsWithTF = sortByTotalTermFreq(reader, terms); for (int i = 0; i < termsWithTF.length; i++) { System.out.printf("%s:%s \t totalTF = %,d \t doc freq = %,d \n", termsWithTF[i].field, termsWithTF[i].termtext.utf8ToString(), termsWithTF[i].totalTermFreq, termsWithTF[i].docFreq); } } reader.close(); }
From source file:net.conquiris.search.DefaultManagedReaderSupplier.java
License:Apache License
private void close(Reader reader) { if (reader == null) { return;//from w ww . java 2 s . c o m } IndexReader ir = reader.get(); if (ir == null) { return; } try { ir.close(); } catch (Exception e) { // TODO: log } }
From source file:net.conquiris.search.EmptyIndexTest.java
License:Apache License
/** Missing index. */ @Test//from w w w. ja va 2 s . com public void missingRAMnrt() throws Exception { final Directory d = new RAMDirectory(); final IndexWriter w = new IndexWriter(d, new IndexWriterConfig(Version.LUCENE_34, new StandardAnalyzer(Version.LUCENE_34))); try { final IndexReader r = IndexReader.open(w, true); try { IndexSearcher s = new IndexSearcher(r); s.search(new MatchAllDocsQuery(), 5); s.close(); } finally { r.close(); } } finally { } }
From source file:net.hillsdon.reviki.search.impl.LuceneSearcher.java
License:Apache License
/** * Reusable template that cleans up properly. * @param <T> Result type./*w w w . j a va2 s . c o m*/ * @param operation Operation to perform. * @param allIndices If true, search all indices (other wikis) not just our own. * @return Result from operation. * @throws IOException On index read error, * @throws QuerySyntaxException If we can't parse a query. */ private <T> T doReadOperation(final ReadOperation<T> operation, final boolean allIndices) throws IOException, QuerySyntaxException { createIndexIfNecessary(); List<Searcher> searchers = new ArrayList<Searcher>(); List<IndexReader> readers = new ArrayList<IndexReader>(); /* First add our reader/searcher. If this fails, it's an error but clean up. */ IndexReader reader = IndexReader.open(_dir); Searcher searcher = null; try { searcher = new IndexSearcher(reader); searchers.add(searcher); readers.add(reader); } finally { if (searcher == null) { reader.close(); } } if (allIndices) { for (File dir : _otherDirs) { searcher = null; reader = null; try { reader = IndexReader.open(dir); searcher = new IndexSearcher(reader); searchers.add(searcher); readers.add(reader); } catch (Exception e) { // The index may not exist, but other wikis' indices aren't that important anyway, so // just don't search them. if (searcher != null) { searcher.close(); } if (reader != null) { reader.close(); } } } } try { /* Don't bother using a multi searcher if we only have one */ if (searchers.size() > 1) { searcher = new MultiSearcher(searchers.toArray(new Searcher[] {})); /* Add to list of searchers so it gets closed */ searchers.add(searcher); } else { searcher = searchers.get(0); } try { Analyzer analyzer = createAnalyzer(); return operation.execute(readers.get(0), searcher, analyzer); } catch (ParseException ex) { throw new QuerySyntaxException(ex.getMessage(), ex); } } finally { for (Searcher s : searchers) { try { s.close(); } catch (Exception e) { } } for (IndexReader r : readers) { try { r.close(); } catch (Exception e) { } } } }
From source file:net.homeip.donaldm.doxmentor4j.AjaxSearchHandler.java
License:Open Source License
private String search(String searchText, int maxHits, int pageNo) //--------------------------------------------------------------- { DoxMentor4J app = DoxMentor4J.getApp(); java.io.File archiveFile = app.getArchiveFile(); String indexDirName = app.getIndexDir(); String archiveIndexDirName = app.getArchiveIndexDir(); if (((indexDirName == null) || (indexDirName.trim().length() == 0)) && ((archiveIndexDirName == null) || (archiveIndexDirName.trim().length() == 0))) return errorMessage("Search index not defined in configuration file"); if (indexDirName != null) indexDirName = indexDirName.trim(); if (archiveIndexDirName != null) archiveIndexDirName = archiveIndexDirName.trim(); Directory directory = null;// w ww. java 2 s.com IndexReader indexReader = null; Searcher searcher = null; StringBuilder html = new StringBuilder(); try { try { if (archiveFile != null) IndexFactory.create(archiveFile, archiveIndexDirName, indexDirName, false, true); else IndexFactory.create(indexDirName, false, true); directory = IndexFactory.getDirectory(); if (directory == null) return errorMessage("Could not open search index directory"); indexReader = IndexReader.open(directory); } catch (Exception e) { logger.error("Could not open search index directory", e); return errorMessage("Could not open search index directory " + "<br>" + e.getMessage()); } searcher = new IndexSearcher(indexReader); Analyzer analyzer = new StandardAnalyzer(DoxMentor4J.LUCENE_VERSION); QueryParser parser = new QueryParser(DoxMentor4J.LUCENE_VERSION, "contents", analyzer); Query query = null; try { query = parser.parse(searchText); } catch (ParseException e) { logger.error("Error parsing search text (" + searchText + ")", e); return errorMessage("Error parsing search text (" + searchText + ")<br>" + e.getMessage()); } TopDocs hits = searcher.search(query, maxHits); final int count = hits.totalHits; if (count == 0) html.append("<div style=\"vertical-align: middle; \"><p>Query " + "returned no hits</p></div>"); else { int pages = count / HITS_PER_PAGE; if ((count % HITS_PER_PAGE) != 0) pages++; int startHit = pageNo * HITS_PER_PAGE; int endHit = Math.min(startHit + HITS_PER_PAGE, count); html.append("<div style=\"vertical-align: top; \">"); html.append("<p width=\"100%\" align=\"right\" class=\"search\">"); html.append( String.format("Page %d/%d (Hits %d-%d of %d)", pageNo + 1, pages, startHit, endHit, count)); html.append("</div>"); html.append("<div style=\"vertical-align: middle; \">"); for (int i = startHit; i < endHit; i++) { Document doc = searcher.doc(hits.scoreDocs[i].doc); String title = doc.get("title"); String pg = doc.get("page"); if (pg == null) pg = "-1"; int page; try { page = Integer.parseInt(pg); } catch (Exception _e) { page = -1; } if (title == null) title = "<i>Title undefined</i>"; String href = doc.get("path"); if (href != null) { html.append(String.format("<a href=\"%s\" class=\"search\">" + "%s (%s)</a>", href, title, href)); String fragment = getFragment(href, searchText, page); if (fragment.length() > 0) html.append(fragment); html.append("<hr>"); } } html.append("</div>"); html.append("<div>"); html.append("<p width=\"100%\" align=\"center\" class=\"search\">"); // The javascript search function page no. is 1 based while pageNo // is 0 based. if (pageNo > 0) { html.append("<font size=\"+1\"><a class=\"search\" " + "href=\"javascript:search('") .append(searchText).append("',").append(pageNo) .append(")\">Previous</a></font> "); } int startPage = Math.max(0, pageNo - DISPLAY_WINDOW_SIZE / 2); int endPage = Math.min(pages, pageNo + DISPLAY_WINDOW_SIZE / 2); for (int p = startPage; p < endPage; p++) { if (p != pageNo) html.append("<a href=\"javascript:search('").append(searchText).append("',").append(p + 1) .append(")\" " + "class=\"search\">").append(p + 1).append("</a> "); else html.append(p + 1).append(" "); } if ((pageNo + 1) < pages) html.append("<font size=\"+1\"><a class=\"search\" " + "href=\"javascript:search('") .append(searchText).append("',").append(pageNo + 2).append(")\"> Next</a></font>"); html.append("</div>"); } } catch (Exception e) { logger.error("Error creating search query (" + searchText + ")", e); return errorMessage("Error creating search query (" + searchText + ")<br>" + e.getMessage()); } finally { if (searcher != null) try { searcher.close(); } catch (Exception e) { } if (indexReader != null) try { indexReader.close(); } catch (Exception e) { } IndexFactory.closeDirectory(); } return html.toString(); }
From source file:net.java.ao.RelatedEntityImpl.java
License:Apache License
public RelatedEntity<?>[] getRelated() throws IOException { Class<? extends RawEntity<Object>> type = entity.getEntityType(); String table = entity.getEntityManager().getTableNameConverter().getName(type); List<String> indexFields = Common.getSearchableFields(entity.getEntityManager(), type); String[] searchFields = new String[indexFields.size()]; for (int i = 0; i < searchFields.length; i++) { searchFields[i] = table + '.' + indexFields.get(i); }// w w w . ja v a 2s . com Directory indexDir = ((SearchableEntityManager) entity.getEntityManager()).getIndexDir(); IndexReader reader = null; try { reader = IndexReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(indexDir); MoreLikeThis more = new MoreLikeThis(reader); more.setFieldNames(searchFields); more.setAnalyzer(((SearchableEntityManager) entity.getEntityManager()).getAnalyzer()); int docID = -1; String primaryKeyField = Common.getPrimaryKeyField(entity.getEntityType(), entity.getEntityManager().getFieldNameConverter()); Object primaryKeyValue = Common.getPrimaryKeyValue(entity); TermDocs docs = reader.termDocs(new Term(table + "." + primaryKeyField, Common.getPrimaryKeyType(type).valueToString(primaryKeyValue))); if (docs.next()) { docID = docs.doc(); } if (docID < 0) { return (RelatedEntity<?>[]) Array.newInstance(type, 0); } org.apache.lucene.search.Query query = more.like(docID); Hits hits = searcher.search(query); List<RelatedEntity<?>> back = new ArrayList<RelatedEntity<?>>(); for (int i = 0; i < hits.length(); i++) { String entityKey = hits.doc(i).get(table + "." + primaryKeyField); if (entityKey.equals(primaryKeyValue.toString())) { continue; } back.add((RelatedEntity<?>) entity.getEntityManager().peer(type, Common.getPrimaryKeyType(type).defaultParseValue(entityKey))); } return back.toArray((RelatedEntity<?>[]) Array.newInstance(type, back.size())); } finally { if (reader != null) { try { reader.close(); } catch (IOException e) { } } } }