List of usage examples for org.apache.lucene.queryparser.flexible.standard StandardQueryParser StandardQueryParser
public StandardQueryParser()
From source file:SearchFiles11.java
License:Apache License
/** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/* w w w . j av a 2 s.c o m*/ } String index = "index"; String field = "contents"; String queries = null; int repeat = 0; boolean raw = false; String queryString = null; int hitsPerPage = 10; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { index = args[i + 1]; i++; } else if ("-field".equals(args[i])) { field = args[i + 1]; i++; } else if ("-queries".equals(args[i])) { queries = args[i + 1]; i++; } else if ("-query".equals(args[i])) { queryString = args[i + 1]; i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i + 1]); i++; } else if ("-raw".equals(args[i])) { raw = true; } else if ("-paging".equals(args[i])) { hitsPerPage = Integer.parseInt(args[i + 1]); if (hitsPerPage <= 0) { System.err.println("There must be at least 1 hit per page."); System.exit(1); } i++; } } IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index))); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); StandardQueryParser queryParserHelper = new StandardQueryParser(); Query query = queryParserHelper.parse( "Physical OR tests OR for OR shoulder OR impingements OR and OR local OR lesions OR of OR bursa, OR tendon OR labrum OR that OR may OR accompany OR impingement", field); TopDocs results = searcher.search(query, 100); Date end = new Date(); ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits; String FILENAME = "/home/devil/research/CLEF/ehealth/task2/dataset/pubmed11.res"; int i = 1; try (BufferedWriter bw = new BufferedWriter(new FileWriter(FILENAME))) { String content = ""; for (ScoreDoc h : hits) { Document doc = searcher.doc(h.doc); String path = doc.get("path"); String[] path_words = path.split("/"); System.out.println(path_words[path_words.length - 1] + " score=" + h.score); content = "CD007427 " + "NF " + path_words[path_words.length - 1] + " " + i++ + " " + h.score + " pubmed\n"; bw.write(content); } } catch (IOException e) { e.printStackTrace(); } //doPagingSearch(in, searcher, bQuery.build(), hitsPerPage, raw, queries == null && queryString == null); reader.close(); }
From source file:edu.utsa.sifter.IndexResource.java
License:Apache License
Query parseQuery(final String queryString, final String defaultField) throws QueryNodeException { if (queryString != null && !queryString.isEmpty()) { StandardQueryParser qp = new StandardQueryParser(); return qp.parse(queryString, defaultField); } else {//ww w . java2 s .c om return new MatchAllDocsQuery(); } }
From source file:eu.europeana.corelib.search.utils.SearchUtils.java
License:Creative Commons License
public static boolean isTermQuery(String queryTerm) { StandardQueryParser queryParserHelper = new StandardQueryParser(); org.apache.lucene.search.Query query = null; try {/*from w ww. j a v a 2 s. c o m*/ query = queryParserHelper.parse(queryTerm, "text"); } catch (QueryNodeException e) { //e.printStackTrace(); } return (query != null && query instanceof TermQuery); }
From source file:intelligentWebAlgorithms.examples.search.MySearcher.java
License:Apache License
public SearchResult[] search(String query, int numberOfMatches) { SearchResult[] docResults = null; IndexSearcher is = null;//w w w . j av a 2s . c om Directory dir = null; try { dir = FSDirectory.open(indexFile); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } DirectoryReader dirReader = null; try { dirReader = DirectoryReader.open(dir); is = new IndexSearcher(dirReader); } catch (IOException ioX) { System.out.println("ERROR: " + ioX.getMessage()); } StandardQueryParser queryParserHelper = new StandardQueryParser(); Query q = null; try { q = queryParserHelper.parse(query, LuceneIndexBuilder.INDEX_FIELD_CONTENT); } catch (QueryNodeException e) { e.printStackTrace(); } TopDocs hits = null; try { hits = is.search(q, numberOfMatches); docResults = new SearchResult[hits.scoreDocs.length]; for (int i = 0; i < hits.scoreDocs.length; i++) { Document hitDoc = is.doc(hits.scoreDocs[i].doc); docResults[i] = new SearchResult(hitDoc.get("docid"), hitDoc.get("doctype"), hitDoc.get("title"), hitDoc.get("url"), hits.scoreDocs[i].score); } dirReader.close(); dir.close(); } catch (IOException ioX) { System.out.println("ERROR: " + ioX.getMessage()); } catch (Exception e) { e.printStackTrace(); } String header = "Search results using Lucene index scores:"; boolean showTitle = true; printResults(header, "Query: " + query, docResults, showTitle); return docResults; }
From source file:io.anserini.rerank.lib.AxiomReranker.java
License:Apache License
@Override public ScoredDocuments rerank(ScoredDocuments docs, RerankerContext<T> context) { assert (docs.documents.length == docs.scores.length); try {//from ww w.j av a 2s. c om // First to search against external index if it is not null docs = processExternalContext(docs, context); // Select R*M docs from the original ranking list as the reranking pool Set<Integer> usedDocs = selectDocs(docs, context); // Extract an inverted list from the reranking pool Map<String, Set<Integer>> termInvertedList = extractTerms(usedDocs, context, null); // Calculate all the terms in the reranking pool and pick top K of them Map<String, Double> expandedTermScores = computeTermScore(termInvertedList, context); StringBuilder builder = new StringBuilder(); for (Map.Entry<String, Double> termScore : expandedTermScores.entrySet()) { String term = termScore.getKey(); double score = termScore.getValue(); builder.append(term).append("^").append(score).append(" "); } String queryText = builder.toString().trim(); if (queryText.isEmpty()) { LOG.info("[Empty Expanded Query]: " + context.getQueryTokens()); queryText = context.getQueryText(); } StandardQueryParser p = new StandardQueryParser(); Query nq = p.parse(queryText, this.field); if (this.outputQuery) { LOG.info("QID: " + context.getQueryId()); LOG.info("Original Query: " + context.getQuery().toString(this.field)); LOG.info("Running new query: " + nq.toString(this.field)); } return searchTopDocs(nq, context); } catch (Exception e) { e.printStackTrace(); return docs; } }
From source file:net.oneandone.pommes.model.Database.java
License:Apache License
public List<Pom> query(String queryString, Variables variables) throws IOException, QueryNodeException { BooleanQuery query;/*from w w w . j av a2 s .com*/ List<String> terms; Query term; char marker; String string; queryString = variables(queryString, variables); if (queryString.startsWith("%")) { // CAUTION: don't merge this into + separates terms below, because lucene query may contain '+' themselves return query(new StandardQueryParser().parse(queryString.substring(1), Database.GAV_NAME)); } else { query = new BooleanQuery(); terms = PLUS.split(queryString); if (terms.isEmpty()) { terms.add(""); } for (String termString : terms) { marker = termString.isEmpty() ? ' ' : termString.charAt(0); switch (marker) { case ':': if (termString.length() > 1 && termString.charAt(1) == '-') { string = variables(termString.substring(2), variables); term = or(substring(Database.PAR_GAV, string), substring(Database.DEP_GAV, string)); } else { string = variables(termString.substring(1), variables); term = substring(Database.GAV_NAME, string); } break; case '@': string = variables(termString.substring(1), variables); term = substring(Database.ORIGIN, string); break; default: string = variables(termString, variables); term = or(substring(Database.GAV_NAME, string), substring(Database.ORIGIN, string)); break; } query.add(term, BooleanClause.Occur.MUST); } return query(query); } }
From source file:org.scilab.modules.xcos.palette.PaletteSearcher.java
/** * @param str Query//from w w w . ja v a2 s. c o m * @return paths to the found blocks */ public List<Document> search(String str) { List<Document> found = new ArrayList<>(); try (IndexReader reader = DirectoryReader.open(mgr.getDirectory())) { IndexSearcher searcher = new IndexSearcher(reader); StandardQueryParser queryParserHelper = new StandardQueryParser(); queryParserHelper.setAllowLeadingWildcard(true); queryParserHelper.setLowercaseExpandedTerms(true); queryParserHelper.setAnalyzer(mgr.getAnalyzer()); queryParserHelper.setMultiFields(new String[] { "refname", "refpurpose", "content" }); Query query = queryParserHelper.parse(str, null); TopDocs results = searcher.search(query, XcosConstants.MAX_HITS); ScoreDoc[] hits = results.scoreDocs; if (hits.length == 0) { query = queryParserHelper.parse("*" + str + "*", null); results = searcher.search(query, XcosConstants.MAX_HITS); hits = results.scoreDocs; } for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); found.add(doc); } } catch (IOException | QueryNodeException e) { Logger.getLogger(PaletteSearcher.class.getName()).log(Level.SEVERE, null, e); } return found; }