List of usage examples for org.apache.lucene.search.vectorhighlight FastVectorHighlighter getBestFragments
public final String[] getBestFragments(final FieldQuery fieldQuery, IndexReader reader, int docId, String fieldName, int fragCharSize, int maxNumFragments) throws IOException
From source file:com.github.rnewson.couchdb.lucene.DatabaseIndexer.java
License:Apache License
public void search(final HttpServletRequest req, final HttpServletResponse resp) throws IOException, JSONException { final IndexState state = getState(req, resp); if (state == null) return;//from ww w .ja va 2s.c o m final IndexSearcher searcher = state.borrowSearcher(isStaleOk(req)); final String etag = state.getEtag(); final FastVectorHighlighter fvh = new FastVectorHighlighter(true, true); final JSONArray result = new JSONArray(); try { if (state.notModified(req)) { resp.setStatus(304); return; } for (final String queryString : getQueryStrings(req)) { final Analyzer analyzer = state.analyzer(req.getParameter("analyzer")); final Operator operator = "and".equalsIgnoreCase(req.getParameter("default_operator")) ? Operator.AND : Operator.OR; final Query q = state.parse(queryString, operator, analyzer); final JSONObject queryRow = new JSONObject(); queryRow.put("q", q.toString()); if (getBooleanParameter(req, "debug")) { queryRow.put("plan", QueryPlan.toPlan(q)); queryRow.put("analyzer", analyzer.getClass()); } queryRow.put("etag", etag); if (getBooleanParameter(req, "rewrite")) { final Query rewritten_q = q.rewrite(searcher.getIndexReader()); queryRow.put("rewritten_q", rewritten_q.toString()); final JSONObject freqs = new JSONObject(); final Set<Term> terms = new HashSet<Term>(); rewritten_q.extractTerms(terms); for (final Object term : terms) { final int freq = searcher.getIndexReader().docFreq((Term) term); freqs.put(term.toString(), freq); } queryRow.put("freqs", freqs); } else { // Perform the search. final TopDocs td; final StopWatch stopWatch = new StopWatch(); final boolean include_docs = getBooleanParameter(req, "include_docs"); final int highlights = getIntParameter(req, "highlights", 0); final int highlight_length = max(getIntParameter(req, "highlight_length", 18), 18); // min for fast term vector highlighter is 18 final boolean include_termvectors = getBooleanParameter(req, "include_termvectors"); final int limit = getIntParameter(req, "limit", ini.getInt("lucene.limit", 25)); final Sort sort = CustomQueryParser.toSort(req.getParameter("sort")); final int skip = getIntParameter(req, "skip", 0); final Set<String> fieldsToLoad; if (req.getParameter("include_fields") == null) { fieldsToLoad = null; } else { final String[] fields = Utils.splitOnCommas(req.getParameter("include_fields")); final List<String> list = Arrays.asList(fields); fieldsToLoad = new HashSet<String>(list); } if (sort == null) { td = searcher.search(q, null, skip + limit); } else { td = searcher.search(q, null, skip + limit, sort); } stopWatch.lap("search"); // Fetch matches (if any). final int max = Math.max(0, Math.min(td.totalHits - skip, limit)); final JSONArray rows = new JSONArray(); final String[] fetch_ids = new String[max]; for (int i = skip; i < skip + max; i++) { final Document doc; if (fieldsToLoad == null) { doc = searcher.doc(td.scoreDocs[i].doc); } else { doc = searcher.doc(td.scoreDocs[i].doc, fieldsToLoad); } final JSONObject row = new JSONObject(); final JSONObject fields = new JSONObject(); final JSONObject highlight_rows = new JSONObject(); // Include stored fields. for (final IndexableField f : doc.getFields()) { if (!f.fieldType().stored()) { continue; } final String name = f.name(); final Object value; if (f.numericValue() != null) { value = f.numericValue(); } else { value = f.stringValue(); } if (value != null) { if ("_id".equals(name)) { row.put("id", value); } else { if (!fields.has(name)) { fields.put(name, value); } else { final Object obj = fields.get(name); if (obj instanceof String || obj instanceof Number) { final JSONArray arr = new JSONArray(); arr.put(obj); arr.put(value); fields.put(name, arr); } else { assert obj instanceof JSONArray; ((JSONArray) obj).put(value); } } if (highlights > 0) { String[] frags = fvh.getBestFragments(fvh.getFieldQuery(q), searcher.getIndexReader(), td.scoreDocs[i].doc, name, highlight_length, highlights); highlight_rows.put(name, frags); } } } } if (!Float.isNaN(td.scoreDocs[i].score)) { row.put("score", td.scoreDocs[i].score); } // Include sort order (if any). if (td instanceof TopFieldDocs) { final FieldDoc fd = (FieldDoc) ((TopFieldDocs) td).scoreDocs[i]; row.put("sort_order", fd.fields); } // Fetch document (if requested). if (include_docs) { fetch_ids[i - skip] = doc.get("_id"); } if (fields.length() > 0) { row.put("fields", fields); } if (highlight_rows.length() > 0) { row.put("highlights", highlight_rows); } rows.put(row); } // Fetch documents (if requested). if (include_docs && fetch_ids.length > 0) { final List<CouchDocument> fetched_docs = database.getDocuments(fetch_ids); for (int j = 0; j < max; j++) { final CouchDocument doc = fetched_docs.get(j); final JSONObject row = doc == null ? new JSONObject("{\"error\":\"not_found\"}") : doc.asJson(); rows.getJSONObject(j).put("doc", row); } } stopWatch.lap("fetch"); queryRow.put("skip", skip); queryRow.put("limit", limit); queryRow.put("total_rows", td.totalHits); queryRow.put("search_duration", stopWatch.getElapsed("search")); queryRow.put("fetch_duration", stopWatch.getElapsed("fetch")); // Include sort info (if requested). if (td instanceof TopFieldDocs) { queryRow.put("sort_order", CustomQueryParser.toJSON(((TopFieldDocs) td).fields)); } queryRow.put("rows", rows); } result.put(queryRow); } } catch (final ParseException e) { ServletUtils.sendJsonError(req, resp, 400, "Bad query syntax: " + e.getMessage()); return; } finally { state.returnSearcher(searcher); } resp.setHeader("ETag", etag); resp.setHeader("Cache-Control", "must-revalidate"); ServletUtils.setResponseContentTypeAndEncoding(req, resp); final Object json = result.length() > 1 ? result : result.getJSONObject(0); final String callback = req.getParameter("callback"); final String body; if (callback != null) { body = String.format("%s(%s)", callback, json); } else { if (json instanceof JSONObject) { final JSONObject obj = (JSONObject) json; body = getBooleanParameter(req, "debug") ? obj.toString(2) : obj.toString(); } else { final JSONArray arr = (JSONArray) json; body = getBooleanParameter(req, "debug") ? arr.toString(2) : arr.toString(); } } final Writer writer = resp.getWriter(); try { writer.write(body); } finally { writer.close(); } }
From source file:invertedindex.SearchIndex.java
public ArrayList<SearchResults> search(String keyword) throws IOException { String indexLocation = this.getIndexLocation(); // System.out.println("Inside search method"); // indexLocation = ""; // BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); // while (true) { try {//from w ww. j ava2 s . co m IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(indexLocation))); IndexSearcher searcher = new IndexSearcher(reader); TopScoreDocCollector collector = TopScoreDocCollector.create(topDocs, true); String query = keyword; query = "\"" + query + "\""; Query q = new QueryParser(Version.LUCENE_47, "contents", analyzer).parse(query); SimpleFragListBuilder fragListBuilder = new SimpleFragListBuilder(); ScoreOrderFragmentsBuilder fragBuilder = new ScoreOrderFragmentsBuilder(); FastVectorHighlighter fvh = new FastVectorHighlighter(FastVectorHighlighter.DEFAULT_PHRASE_HIGHLIGHT, FastVectorHighlighter.DEFAULT_FIELD_MATCH, fragListBuilder, fragBuilder); fvh = new FastVectorHighlighter(FastVectorHighlighter.DEFAULT_PHRASE_HIGHLIGHT, FastVectorHighlighter.DEFAULT_FIELD_MATCH, fragListBuilder, fragBuilder); // System.out.println(q); // searcher.search(q, collector); // searcher.search(q, null,topDocs); ScoreDoc[] hits = collector.topDocs().scoreDocs; // 4. display results System.out.println("Found " + hits.length + " hits."); totalHits = hits.length; searchResulsAL = new ArrayList<>(); for (int i = 0; i < hits.length; ++i) { int docId = hits[i].doc; FieldQuery fq = fvh.getFieldQuery(q); // System.out.println("fq "+fq); String[] fragments = fvh.getBestFragments(fq, searcher.getIndexReader(), docId, "contents", 50, 10); //String[] lineFragments = fvh.getBestFragments(fq, searcher.getIndexReader(), docId, "contents", 18,10); Document d = searcher.doc(docId); String filePath = d.get("path"); for (int j = 0; j < fragments.length; j++) { // System.out.println("FRAGMENT iS "+fragments[j]); // int k=0; // for(k=0;k<lineFragments.length;k++){ // fragments[j].getSc String temp = Jsoup.parse(fragments[j]).text(); // LineNumberSearcher lns = new LineNumberSearcher(); //lineNumbersList = new ArrayList<>(); lineNumberArrayList = new ArrayList<>(); lineNumber = "null"; boolean g = Pattern.compile("\\n").matcher(fragments[j]).find(); if (!g) { // System.out.println("NO G"); lineNumbersList = lns.search(temp, filePath); // for(String s : lineNumbersList){ // System.out.println("s is "+s); // } // if(lineNumbersList.get(0).isEmpty()){ // lineNumber = "Not Found"; // }else { if (!lineNumbersList.isEmpty()) { // System.out.println("in line number"); lineNumber = lineNumbersList.get(0); } // } } //here is the tried code for enter space /* else{ System.out.println("YES G"); String lines[] = fragments[j].split("\\r?\\n"); // ArrayList<String> newLines = new ArrayList<>(); ArrayList<String> newLines = new ArrayList<>(Arrays.asList(lines)); System.out.println("Here 3"); int special = 0; for(String line : newLines){ if(Pattern.compile("^$").matcher(line).find()){ newLines.remove(line); special++; } } System.out.println("Here 4"); // List<String> list = Arrays.asList(lines); // if(list.contains(temp)){ // // } // for(String line: newLines){ // System.out.println("LINE IS "+line); // } if(newLines.size()==1){ // System.out.println("Yes G but NOT G"); lineNumbersList = lns.search(temp,filePath); if(!lineNumberArrayList.isEmpty()){ lineNumber = lineNumbersList.get(0); } System.out.println("Here 1"); }else{ System.out.println("Here 2"); ArrayList<String> a0 = lns.search(Jsoup.parse(newLines.get(0)).text(),filePath); ArrayList<String> a1 = lns.search(Jsoup.parse(newLines.get(1)).text(),filePath); int k,l; outerloop: for(k=0;k<a0.size();k++){ for(l=0;l<a1.size();l++){ int secondline = Integer.parseInt(a1.get(l)); // System.out.println("second line is"+ secondline); int firstline = Integer.parseInt(a0.get(k)); // System.out.println("first line is"+firstline); int diff = secondline - firstline; // System.out.println("DIFFERENCE IS "+diff); // System.out.println("Special IS "+special); if(diff == special+1){ insideLoopFlag = true; // System.out.println("K IS "+k); // System.out.println("IN BREAK "); break outerloop; } } // System.out.println("K IS "+k); } // System.out.println("OUT OF FOR LOOP"); // System.out.println("K IS "+k); if(insideLoopFlag==true){ lineNumber = String.valueOf(a0.get(k)); } // System.out.println("LINE NUMBER IS "+lineNumber); } } */ // } fragments[j] = fragments[j].replaceAll("\\n", " "); // System.out.println("\t\t" + fragments[j] + "..."); fragments[j] = fragments[j] + "...."; if (!(lineNumber.equals("null"))) { // System.out.println("in line number"); fragments[j] = fragments[j] + " at Line " + lineNumber; } } //Setting Results SearchResults sr = new SearchResults(); sr.setFilename(d.get("filename")); sr.setScore(hits[i].score); sr.setFragments(fragments); sr.setPath(filePath); sr.setContentType(d.get("contentType")); // sr.setLineNumber(lineNumber); searchResulsAL.add(sr); // } // writer.close(); reader.close(); } catch (Exception e) { System.out.println("Error searching in search index " + e + " : " + e.getMessage()); // break; } // } return searchResulsAL; }
From source file:invertedindex.SearchIndex.java
public ArrayList<SearchResults> multipleSearch(String keyword1, String keyword2, String radio) throws IOException { String indexLocation = this.getIndexLocation(); try {//from w ww. j av a2 s . c o m IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(indexLocation))); IndexSearcher searcher = new IndexSearcher(reader); TopScoreDocCollector collector = TopScoreDocCollector.create(topDocs, true); String query1 = keyword1; String query2 = keyword2; query1 = "\"" + query1 + "\""; query2 = "\"" + query2 + "\""; Query q1 = new QueryParser(Version.LUCENE_47, "contents", analyzer).parse(query1); Query q2 = new QueryParser(Version.LUCENE_47, "contents", analyzer).parse(query2); BooleanQuery apiQuery = new BooleanQuery(); if (radio.equalsIgnoreCase("and")) { apiQuery.add(q1, BooleanClause.Occur.MUST); apiQuery.add(q2, BooleanClause.Occur.MUST); } else if (radio.equalsIgnoreCase("or")) { apiQuery.add(q1, BooleanClause.Occur.SHOULD); apiQuery.add(q2, BooleanClause.Occur.SHOULD); } else if (radio.equalsIgnoreCase("not")) { apiQuery.add(q1, BooleanClause.Occur.MUST); apiQuery.add(q2, BooleanClause.Occur.MUST_NOT); } SimpleFragListBuilder fragListBuilder = new SimpleFragListBuilder(); ScoreOrderFragmentsBuilder fragBuilder = new ScoreOrderFragmentsBuilder(); FastVectorHighlighter fvh = new FastVectorHighlighter(FastVectorHighlighter.DEFAULT_PHRASE_HIGHLIGHT, FastVectorHighlighter.DEFAULT_FIELD_MATCH, fragListBuilder, fragBuilder); fvh = new FastVectorHighlighter(FastVectorHighlighter.DEFAULT_PHRASE_HIGHLIGHT, FastVectorHighlighter.DEFAULT_FIELD_MATCH, fragListBuilder, fragBuilder); searcher.search(apiQuery, collector); ScoreDoc[] hits = collector.topDocs().scoreDocs; System.out.println("Found " + hits.length + " hits."); totalHits = hits.length; searchResulsAL = new ArrayList<>(); for (int i = 0; i < hits.length; ++i) { int docId = hits[i].doc; FieldQuery fq = fvh.getFieldQuery(apiQuery); // String[] fragments = fvh.getBestFragments(fq, searcher.getIndexReader(), docId, "contents", 50, 10); Document d = searcher.doc(docId); // String filePath = d.get("path"); for (int j = 0; j < fragments.length; j++) { String temp = Jsoup.parse(fragments[j]).text(); // LineNumberSearcher lns = new LineNumberSearcher(); //lineNumbersList = new ArrayList<>(); lineNumber = "null"; lineNumberArrayList = new ArrayList<>(); boolean g = Pattern.compile("\\n").matcher(fragments[j]).find(); if (!g) { // System.out.println("NO G g"); lineNumbersList = lns.search(temp, filePath); // for(String s : lineNumbersList){ // System.out.println("s is "+s); // } // if (!lineNumbersList.isEmpty()) { // System.out.println("in line number"); lineNumber = lineNumbersList.get(0); } } fragments[j] = fragments[j].replaceAll("\\n", " "); // System.out.println("\t\t" + fragments[j] + "..."); fragments[j] = fragments[j] + " ...."; if (!(lineNumber.equals("null"))) { // System.out.println("in line number"); fragments[j] = fragments[j] + " at Line " + lineNumber; } } SearchResults sr = new SearchResults(); sr.setFilename(d.get("filename")); sr.setScore(hits[i].score); sr.setFragments(fragments); sr.setPath(filePath); sr.setContentType(d.get("contentType")); searchResulsAL.add(sr); } reader.close(); } catch (Exception e) { System.out.println("Error searching in search index " + e + " : " + e.getMessage()); } return searchResulsAL; }
From source file:org.segrada.search.lucene.LuceneSearchEngine.java
License:Apache License
@Override public PaginationInfo<SearchHit> search(String searchTerm, Map<String, String> filters) { // to avoid NPEs if (filters == null) filters = new HashMap<>(); // set defaults int page = 1; int entriesPerPage = 20; try {/*ww w. j a va 2s .c om*/ DirectoryReader iReader = DirectoryReader.open(directory); String[] containFields; // do we have a filter to contain to certain fields? if (filters.containsKey("fields")) { String fields = filters.get("fields"); if (fields.isEmpty()) containFields = new String[] { "title", "subTitles", "content" }; else if (fields.equalsIgnoreCase("title")) containFields = new String[] { "title" }; else if (fields.equalsIgnoreCase("subTitles")) containFields = new String[] { "subTitles" }; else if (fields.equalsIgnoreCase("content")) containFields = new String[] { "content" }; else if (fields.equalsIgnoreCase("allTitles")) containFields = new String[] { "title", "subTitles" }; else throw new RuntimeException("fields-Filter " + fields + " is not known."); } else containFields = new String[] { "title", "subTitles", "content" }; // Parse a simple query that searches for "text": MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, containFields, analyzer); // which operator do we use? parser.setDefaultOperator(QueryParser.Operator.AND); if (filters.containsKey("operator")) { String operator = filters.get("operator"); if (operator.equalsIgnoreCase("or")) parser.setDefaultOperator(QueryParser.Operator.OR); else if (!operator.isEmpty() && !operator.equalsIgnoreCase("and")) throw new RuntimeException("operator-Filter " + operator + " is not and/or."); } // filters for query List<Filter> searchFilters = new ArrayList<>(); // class filter if (filters.containsKey("class") && !filters.get("class").isEmpty()) { // multiple classes? String[] classes = filters.get("class").split(","); // single class if (classes.length <= 1) { TermQuery categoryQuery = new TermQuery(new Term("className", filters.get("class"))); searchFilters.add(new QueryWrapperFilter(categoryQuery)); } else { // multiple classes Filter[] categories = new Filter[classes.length]; for (int i = 0; i < classes.length; i++) { categories[i] = new QueryWrapperFilter( new TermQuery(new Term("className", classes[i].trim()))); } // add chained filter searchFilters.add(new ChainedFilter(categories, ChainedFilter.OR)); } } // tag filter if (filters.containsKey("tags") && !filters.get("tags").isEmpty()) { // split tags into array String[] tags = filters.get("tags").split(","); BooleanQuery booleanQuery = new BooleanQuery(); for (String tag : tags) { booleanQuery.add(new TermQuery(new Term("tag", tag.trim())), BooleanClause.Occur.SHOULD); } searchFilters.add(new QueryWrapperFilter(booleanQuery)); } // create filter - if multiple filters applied, add chained filter Filter filter = null; if (searchFilters.size() == 1) filter = searchFilters.get(0); else if (searchFilters.size() > 1) { Filter[] filterArray = new Filter[searchFilters.size()]; searchFilters.toArray(filterArray); filter = new ChainedFilter(filterArray, ChainedFilter.AND); } // define query Query query = null; if (searchTerm != null) query = parser.parse(searchTerm); if (query == null) query = new MatchAllDocsQuery(); // fallback to match all documents // get hits per page if (filters.containsKey("limit")) { try { entriesPerPage = Integer.valueOf(filters.get("limit")); if (entriesPerPage <= 0 || entriesPerPage > 1000) entriesPerPage = 20; } catch (NumberFormatException e) { logger.warn("Could not parse limit " + filters.get("limit") + " to integer", e); } } // get page number if (filters.containsKey("page")) { try { page = Integer.valueOf(filters.get("page")); } catch (NumberFormatException e) { logger.warn("Could not parse page " + filters.get("page") + " to integer", e); } } // calculate start/stop indexes int startIndex = (page - 1) * entriesPerPage; int endIndex = page * entriesPerPage; IndexSearcher iSearcher = new IndexSearcher(iReader); // do search TopDocs topDocs = iSearcher.search(query, filter, 1000); // update end index if (topDocs.scoreDocs.length < endIndex) endIndex = topDocs.scoreDocs.length; // how many pages do we have? int pages = topDocs.scoreDocs.length / entriesPerPage + 1; // reset page to sane limit, if needed if (page <= 0 || page > pages) page = 1; // highlighter FastVectorHighlighter highlighter = new FastVectorHighlighter(); FieldQuery fieldQuery = null; // field query for highlighted terms if (searchTerm != null) fieldQuery = highlighter.getFieldQuery( new QueryParser(Version.LUCENE_47, "content", analyzer).parse(searchTerm), iReader); // cycle trough hits List<SearchHit> hits = new ArrayList<>(); for (int i = startIndex; i < endIndex; i++) { ScoreDoc scoreDoc = topDocs.scoreDocs[i]; Document hitDoc = iSearcher.doc(scoreDoc.doc); SearchHit searchHit = new SearchHit(); searchHit.setId(hitDoc.get("id")); searchHit.setClassName(hitDoc.get("className")); searchHit.setTitle(hitDoc.get("title")); searchHit.setSubTitles(hitDoc.get("subTitles")); searchHit.setTagIds(hitDoc.getValues("tag")); String color = hitDoc.get("color"); searchHit.setColor(color != null ? new Integer(color) : null); searchHit.setIconFileIdentifier(hitDoc.get("iconFileIdentifier")); searchHit.setRelevance(scoreDoc.score); // get highlighted components if (searchTerm != null) { String[] bestFragments = highlighter.getBestFragments(fieldQuery, iReader, scoreDoc.doc, "content", 18, 10); searchHit.setHighlightText(bestFragments); } // add hit hits.add(searchHit); } iReader.close(); // return pagination info return new PaginationInfo<>(page, pages, topDocs.totalHits, entriesPerPage, hits); } catch (Throwable e) { logger.error("Error in search.", e); } // return empty list result in order to avoid NPEs return new PaginationInfo<>(page, 1, 0, entriesPerPage, new ArrayList<>()); }
From source file:org.segrada.search.lucene.LuceneSearchEngine.java
License:Apache License
@Override public String[] searchInDocument(String searchTerm, String id) { // sanity check if (searchTerm == null || id == null || searchTerm.isEmpty() || id.isEmpty()) return new String[] {}; try {/*from w w w . j a v a 2 s .c om*/ DirectoryReader iReader = DirectoryReader.open(directory); IndexSearcher iSearcher = new IndexSearcher(iReader); // only search content MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_47, new String[] { "content" }, analyzer); // set operator and contain by id parser.setDefaultOperator(QueryParser.Operator.AND); Query query = parser.parse(searchTerm); Filter filter = new QueryWrapperFilter(new TermQuery(new Term("id", id))); // do search, maximum of 1 document TopDocs topDocs = iSearcher.search(query, filter, 1); if (topDocs.scoreDocs.length > 0) { ScoreDoc scoreDoc = topDocs.scoreDocs[0]; // get highlighted text FastVectorHighlighter highlighter = new FastVectorHighlighter(); FieldQuery fieldQuery = highlighter.getFieldQuery( new QueryParser(Version.LUCENE_47, "content", analyzer).parse(searchTerm), iReader); // return max of 100 highlighted elements return highlighter.getBestFragments(fieldQuery, iReader, scoreDoc.doc, "content", 100, 100); } } catch (Throwable e) { logger.error("Error in search.", e); } return new String[] {}; }