List of usage examples for org.apache.lucene.queryparser.flexible.standard StandardQueryParser parse
@Override public Query parse(String query, String defaultField) throws QueryNodeException
From source file:org.apache.geode.cache.lucene.internal.StringQueryProvider.java
License:Apache License
@Override public synchronized Query getQuery(LuceneIndex index) throws LuceneQueryException { if (luceneQuery == null) { String[] fields = index.getFieldNames(); LuceneIndexImpl indexImpl = (LuceneIndexImpl) index; StandardQueryParser parser = new StandardQueryParser(indexImpl.getAnalyzer()); try {/* w ww. j a v a 2 s .co m*/ luceneQuery = parser.parse(query, defaultField); if (logger.isDebugEnabled()) { logger.debug("User query " + query + " is parsed to be: " + luceneQuery); } } catch (QueryNodeException e) { logger.debug("Query node exception:" + query, e); throw new LuceneQueryException("Malformed lucene query: " + query, e); } } return luceneQuery; }
From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.java
License:Apache License
static Query tokenToQuery(String text, String fieldName, Analyzer analyzer) { if (analyzer == null) { return null; }/* w w w.j av a2s .c om*/ StandardQueryParser parserHelper = new StandardQueryParser(analyzer); parserHelper.setAllowLeadingWildcard(true); parserHelper.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); text = rewriteQueryText(text); try { return parserHelper.parse(text, fieldName); } catch (QueryNodeException e) { throw new RuntimeException(e); } }
From source file:org.efaps.admin.index.Searcher.java
License:Apache License
/** * Search./*from w w w.j ava 2 s.c o m*/ * * @param _search the search * @return the search result * @throws EFapsException on error */ protected SearchResult executeSearch(final ISearch _search) throws EFapsException { final SearchResult ret = new SearchResult(); try { LOG.debug("Starting search with: {}", _search.getQuery()); final StandardQueryParser queryParser = new StandardQueryParser(Index.getAnalyzer()); queryParser.setAllowLeadingWildcard(true); if (EFapsSystemConfiguration.get().containsAttributeValue(KernelSettings.INDEXDEFAULTOP)) { queryParser.setDefaultOperator(EnumUtils.getEnum(StandardQueryConfigHandler.Operator.class, EFapsSystemConfiguration.get().getAttributeValue(KernelSettings.INDEXDEFAULTOP))); } else { queryParser.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); } final Query query = queryParser.parse(_search.getQuery(), "ALL"); final IndexReader reader = DirectoryReader.open(Index.getDirectory()); Sort sort = _search.getSort(); if (sort == null) { sort = new Sort(new SortField(Key.CREATED.name(), SortField.Type.LONG, true)); } final FacetsConfig facetConfig = Index.getFacetsConfig(); final DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(Index.getTaxonomyDirectory()); final IndexSearcher searcher = new IndexSearcher(reader); final FacetsCollector fc = new FacetsCollector(); final TopFieldDocs topFieldDocs = FacetsCollector.search(searcher, query, _search.getNumHits(), sort, fc); if (_search.getConfigs().contains(SearchConfig.ACTIVATE_DIMENSION)) { final Facets facets = new FastTaxonomyFacetCounts(taxoReader, facetConfig, fc); for (final FacetResult result : facets.getAllDims(1000)) { LOG.debug("FacetResult {}.", result); final DimConfig dimConfig = facetConfig.getDimConfig(result.dim); final Dimension retDim = new Dimension().setKey(result.dim); ret.getDimensions().add(retDim); for (final LabelAndValue labelValue : result.labelValues) { final DimValue dimValue = new DimValue().setLabel(labelValue.label) .setValue(labelValue.value.intValue()); dimValue.setPath(new String[] { retDim.getKey() }); retDim.getValues().add(dimValue); if (dimConfig.hierarchical) { addSubDimension(facets, dimValue, result.dim, labelValue.label); } } } } ret.setHitCount(topFieldDocs.totalHits); if (ret.getHitCount() > 0) { final ScoreDoc[] hits = topFieldDocs.scoreDocs; LOG.debug("Found {} hits.", hits.length); for (int i = 0; i < hits.length; ++i) { final Document doc = searcher.doc(hits[i].doc); final String oid = doc.get(Key.OID.name()); final String text = doc.get(Key.MSGPHRASE.name()); LOG.debug("{}. {}\t {}", i + 1, oid, text); final Instance instance = Instance.get(oid); final List<Instance> list; if (this.typeMapping.containsKey(instance.getType())) { list = this.typeMapping.get(instance.getType()); } else { list = new ArrayList<Instance>(); this.typeMapping.put(instance.getType(), list); } list.add(instance); final Element element = new Element().setOid(oid).setText(text); for (final Entry<String, Collection<String>> entry : _search.getResultFields().entrySet()) { for (final String name : entry.getValue()) { final String value = doc.get(name); if (value != null) { element.addField(name, value); } } } this.elements.put(instance, element); } } reader.close(); checkAccess(); ret.getElements().addAll(this.elements.values()); } catch (final IOException | QueryNodeException e) { LOG.error("Catched Exception", e); } return ret; }
From source file:org.gridkit.coherence.search.lucene.xml.JAXBSchemaTest.java
License:Apache License
public Query parseStd(String query) { try {//from w ww . ja v a 2s .c om StandardQueryParser parser = new StandardQueryParser(new StandardAnalyzer(Version.LUCENE_42)); return parser.parse(query, "text"); } catch (QueryNodeException e) { throw new RuntimeException(e); } }
From source file:org.scilab.modules.xcos.palette.PaletteSearcher.java
/** * @param str Query/* w w w . j a va 2 s . c o m*/ * @return paths to the found blocks */ public List<Document> search(String str) { List<Document> found = new ArrayList<>(); try (IndexReader reader = DirectoryReader.open(mgr.getDirectory())) { IndexSearcher searcher = new IndexSearcher(reader); StandardQueryParser queryParserHelper = new StandardQueryParser(); queryParserHelper.setAllowLeadingWildcard(true); queryParserHelper.setLowercaseExpandedTerms(true); queryParserHelper.setAnalyzer(mgr.getAnalyzer()); queryParserHelper.setMultiFields(new String[] { "refname", "refpurpose", "content" }); Query query = queryParserHelper.parse(str, null); TopDocs results = searcher.search(query, XcosConstants.MAX_HITS); ScoreDoc[] hits = results.scoreDocs; if (hits.length == 0) { query = queryParserHelper.parse("*" + str + "*", null); results = searcher.search(query, XcosConstants.MAX_HITS); hits = results.scoreDocs; } for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); found.add(doc); } } catch (IOException | QueryNodeException e) { Logger.getLogger(PaletteSearcher.class.getName()).log(Level.SEVERE, null, e); } return found; }
From source file:org.xbib.elasticsearch.test.SKOSLabelFilterTest.java
License:Apache License
@Test public void testTermQuery() throws CorruptIndexException, IOException, QueryNodeException { Document doc = new Document(); doc.add(new Field("content", "I work for the united nations", TextField.TYPE_STORED)); writer.addDocument(doc);/*from www . j av a2s . c om*/ searcher = new IndexSearcher(DirectoryReader.open(writer, false)); StandardQueryParser parser = new StandardQueryParser( new SimpleAnalyzer(SKOSAnalysisPlugin.getLuceneVersion())); Query query = parser.parse("united nations", "content"); Assert.assertEquals(1, TestUtil.hitCount(searcher, query)); }
From source file:stroom.search.server.SearchExpressionQueryBuilder.java
License:Apache License
private Query getSubQuery(final Version matchVersion, final IndexField field, final String value, final Set<String> terms, final boolean in) { Query query = null;/*from w ww . jav a 2s . c o m*/ // Store terms for hit highlighting. String highlight = value; highlight = NON_WORD.matcher(highlight).replaceAll(" "); highlight = highlight.trim(); highlight = MULTIPLE_SPACE.matcher(highlight).replaceAll(" "); final String[] highlights = highlight.split(" "); Collections.addAll(terms, highlights); // If we have omitted term frequencies and positions for this field then // we can't expect to do a sentence match. In this case we need to // modify the query so that each word becomes a new term in a boolean // query. String val = value.trim(); if (in || !AnalyzerType.KEYWORD.equals(field.getAnalyzerType())) { // If the field has been analysed then we need to analyse the search // query to create matching terms. final Analyzer analyzer = AnalyzerFactory.create(field.getAnalyzerType(), field.isCaseSensitive()); if (!field.isTermPositions()) { val = NON_WORD_OR_WILDCARD.matcher(val).replaceAll(" "); val = val.trim(); val = MULTIPLE_SPACE.matcher(val).replaceAll(" +"); val = MULTIPLE_WILDCARD.matcher(val).replaceAll("+"); } if (val.length() > 0) { final StandardQueryParser queryParser = new StandardQueryParser(analyzer); queryParser.setAllowLeadingWildcard(true); queryParser.setLowercaseExpandedTerms(!field.isCaseSensitive()); try { query = queryParser.parse(val, field.getFieldName()); } catch (final QueryNodeException e) { throw new SearchException("Unable to parse query term '" + val + "'", e); } } } else { if (val.length() > 0) { // As this is just indexed as a keyword we only want to search // for the term. if (!field.isCaseSensitive()) { val = value.toLowerCase(); } final Term term = new Term(field.getFieldName(), val); final boolean termContainsWildcard = (val.indexOf('*') != -1) || (val.indexOf('?') != -1); if (termContainsWildcard) { query = new WildcardQuery(new Term(field.getFieldName(), val)); } else { query = new TermQuery(term); } } } return query; }