List of usage examples for org.apache.lucene.search ConstantScoreQuery ConstantScoreQuery
public ConstantScoreQuery(Query query)
From source file:cn.hbu.cs.esearch.index.BaseSearchIndex.java
License:Apache License
private void deleteDocs(LongSet delDocs) throws IOException { if (delDocs == null || delDocs.size() == 0) { return;/*from www . ja v a 2s.c om*/ } EsearchMultiReader<R> reader = openIndexReader(); if (reader == null) { return; } UIDFilter uidFilter = new UIDFilter(delDocs.toLongArray(), reader); IndexWriter writer = null; try { writer = openIndexWriter(null, null); writer.deleteDocuments(new ConstantScoreQuery(uidFilter)); writer.commit(); } finally { closeIndexWriter(); } }
From source file:cn.hbu.cs.esearch.index.LuceneIndexDataLoader.java
License:Apache License
private final void purgeDocuments() { if (_purgeFilter != null) { BaseSearchIndex<R> idx = getSearchIndex(); IndexWriter writer = null;/* ww w .j av a 2 s . c o m*/ LOGGER.info("purging docs started..."); int count = 0; long start = System.currentTimeMillis(); try { writer = idx.openIndexWriter(null, null); ConstantScoreQuery q = new ConstantScoreQuery(_purgeFilter); writer.deleteDocuments(q); writer.commit(); } catch (Throwable th) { LOGGER.error("problem creating purge filter: " + th.getMessage(), th); } finally { idx.closeIndexWriter(); } long end = System.currentTimeMillis(); LOGGER.info("purging docs completed in " + (end - start) + "ms"); LOGGER.info("total docs purged: " + count); } }
From source file:cn.hbu.cs.esearch.store.LuceneStore.java
License:Apache License
@Override protected void persistDelete(long uid) throws IOException { final int docid = mapDocId(uid); if (docid < 0) { return;//www. j a va 2s.c o m } Query deleteQ = new ConstantScoreQuery(new Filter() { @Override public DocIdSet getDocIdSet(AtomicReaderContext readerCtx, Bits acceptedDocs) throws IOException { return new DocIdSet() { @Override public DocIdSetIterator iterator() throws IOException { return new DocIdSetIterator() { int currId = -1; @Override public int nextDoc() throws IOException { if (currId == -1) { currId = docid; } else { currId = DocIdSetIterator.NO_MORE_DOCS; } return currId; } @Override public int docID() { return currId; } @Override public int advance(int target) throws IOException { if (currId != DocIdSetIterator.NO_MORE_DOCS) { if (target < docid) { currId = docid; } else { currId = DocIdSetIterator.NO_MORE_DOCS; } } return currId; } @Override public long cost() { // TODO Auto-generated method stub return 0; } }; } }; } }); indexWriter.deleteDocuments(deleteQ); if (currentReaderData != null) { currentReaderData.uidMap.remove(uid); } }
From source file:com.billiger.solr.handler.component.QLTBComponent.java
License:Apache License
/** * Load the QLTB map from a Config.//from w ww . j a v a 2 s. c o m * * Read and process the "boosts/query" XPath nodes from the given * Config, and build them into a QLTB map. The XML format is described * in the class documentation. * * The result of this function is a map of (analyzed) query strings * with their respective lists of boosted query terms. These are * ConstantScoreQuery instances for each term with the corresponding * boost factor. (Invalid - i.e. non-numerical - boost factors are * logged as warnings). * * The SOLR core that is passed into this function is necessary for * determinating the FieldType of the boosted fields. Only with the * correct field type is it possible to boost non-string fields, as * these non-string values need to be ft.readableToIndexed(). * * @param cfg * Config object to read the XML QLTB from * @param core * SOLR Core the query is performed on * @return QLTB map * * @throws IOException * If the query could not be analysed */ private Map<String, List<Query>> loadQLTBMap(final Config cfg, final SolrCore core) throws IOException { Map<String, List<Query>> map = new HashMap<String, List<Query>>(); NodeList nodes = (NodeList) cfg.evaluate("boosts/query", XPathConstants.NODESET); for (int i = 0; i < nodes.getLength(); i++) { Node node = nodes.item(i); String qstr = DOMUtil.getAttr(node, "text", "missing query 'text'"); qstr = getAnalyzedQuery(qstr); NodeList children = node.getChildNodes(); List<Query> termBoosts = new ArrayList<Query>(); for (int j = 0; j < children.getLength(); j++) { Node child = children.item(j); if (!child.getNodeName().equals("term")) { continue; } String field = DOMUtil.getAttr(child, "field", "missing 'field'"); String value = DOMUtil.getAttr(child, "value", "missing 'value'"); String boost = DOMUtil.getAttr(child, "boost", "missing 'boost'"); float termBoost = 1; try { termBoost = Float.parseFloat(boost); } catch (NumberFormatException e) { log.warn("invalid boost " + boost + " for query \"" + qstr + "\", term: \"" + field + ":" + value + "\": " + e.getMessage()); continue; } // without readableToIndexed QLTB boosting would only work // for string field types FieldType ft = core.getLatestSchema().getField(field).getType(); value = ft.readableToIndexed(value); Term t = new Term(field, value); TermQuery tq = new TermQuery(t); ConstantScoreQuery csq = new ConstantScoreQuery(tq); csq.setBoost(termBoost); termBoosts.add(csq); } map.put(qstr, termBoosts); } return map; }
From source file:com.esri.gpt.catalog.lucene.QueryProvider.java
License:Apache License
/** * Creates new prefix query. Depending on {@link getUseConstantScoreQuery()} * it's either {@link org.apache.lucene.search.ConstantScoreQuery} with * {@link org.apache.lucene.search.PrefixFilter} or just * {@link org.apache.lucene.search.PrefixQuery}. * @param term term/*w w w.j a v a 2 s. c om*/ * @return prefix query */ private Query newPrefixQuery(String fieldName, String term) { return this.getUseConstantScoreQuery() ? new ConstantScoreQuery(new PrefixFilter(new Term(fieldName, term))) : new PrefixQuery(new Term(fieldName, term)); }
From source file:com.github.tteofili.looseen.MinHashClassifier.java
License:Apache License
private Query buildQuery(String field, String query, int min, int hashCount, int hashSetSize) throws IOException { Analyzer chain = createMinHashAnalyzer(min, hashCount, hashSetSize); ArrayList<String> tokens = getTokens(chain, field, query); chain.close();/*w ww . java 2 s. co m*/ BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (String token : tokens) { builder.add(new ConstantScoreQuery(new TermQuery(new Term("text", token))), BooleanClause.Occur.SHOULD); } return builder.build(); }
From source file:com.greplin.lucene.query.PredicateBonusQueryTest.java
License:Apache License
@Test public void testBasics() throws Exception { IndexWriter writer = new IndexWriter(this.directory, new IndexWriterConfig(Version.LUCENE_35, new WhitespaceAnalyzer(Version.LUCENE_35))); writer.addDocument(new DocumentBuilder().add("value", "5").build()); writer.close();//from w w w . j av a2 s . c om IndexReader reader = IndexReader.open(this.directory); IndexSearcher searcher = new IndexSearcher(reader); Query query = new ConstantScoreQuery(new TermQuery(new Term("value", "5"))); Assert.assertEquals(1.0, searcher.search(query, 1).getMaxScore(), 0.00001); Query noBonus = new PredicateBonusQuery(query, Predicates.NONE, 10.0f); Assert.assertEquals(1.0, searcher.search(noBonus, 1).getMaxScore(), 0.00001); Query bonus = new PredicateBonusQuery(query, Predicates.ALL, 100.0f); Assert.assertEquals(101.0, searcher.search(bonus, 1).getMaxScore(), 0.00001); Query noMatch = new TermQuery(new Term("value", "not5")); Assert.assertEquals(Double.NaN, searcher.search(noMatch, 1).getMaxScore(), 0.00001); Query noMatchNoBonus = new PredicateBonusQuery(noMatch, Predicates.NONE, 10.0f); Assert.assertEquals(Double.NaN, searcher.search(noMatchNoBonus, 1).getMaxScore(), 0.00001); Query noMatchIgnoresBonus = new PredicateBonusQuery(noMatch, Predicates.ALL, 100.0f); Assert.assertEquals(Double.NaN, searcher.search(noMatchIgnoresBonus, 1).getMaxScore(), 0.00001); }
From source file:com.mhs.qsol.DefaultDateParser.java
License:Apache License
public Query buildDateQuery(String field, String date, Locale locale) { DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale); df.setLenient(true);//from www . j a va2 s . c o m Matcher m; if ((m = BEFORE_DATE.matcher(date)).find()) { Date date1 = null; try { date1 = df.parse(m.group(1).trim()); } catch (ParseException e) { throw new QsolParseException(e); } final Filter filter = TermRangeFilter.Less(field, DateTools.dateToString(date1, Resolution.DAY)); return new ConstantScoreQuery(filter); } else if ((m = AFTER_DATE.matcher(date)).find()) { Date date1 = null; try { date1 = df.parse(m.group(1).trim()); } catch (ParseException e) { throw new QsolParseException("Could not parse date", e); } final Filter filter = TermRangeFilter.More(field, DateTools.dateToString(date1, Resolution.DAY)); return new ConstantScoreQuery(filter); } else if ((m = DATE_TO_DATE.matcher(date)).find()) { Date date1 = null; Date date2 = null; try { date1 = df.parse(m.group(1).trim()); date2 = df.parse(m.group(2).trim()); } catch (ParseException e) { throw new QsolParseException(e); } if ((date1 != null) && (date2 != null)) { } return new ConstantScoreQuery(new TermRangeFilter(field, DateTools.dateToString(date1, Resolution.DAY), DateTools.dateToString(date2, Resolution.DAY), true, true)); } else { Date date1 = null; try { date1 = df.parse(date.toString()); } catch (ParseException e) { throw new QsolParseException(e); } return new TermQuery(new Term(field, DateTools.dateToString(date1, Resolution.DAY))); } }
From source file:com.pjaol.search.geo.utils.DistanceQuery.java
License:Apache License
public Query getQuery() { return new ConstantScoreQuery(getFilter()); }
From source file:com.stratio.cassandra.index.query.Search.java
License:Apache License
/** * Returns the Lucene {@link Query} representation of this search. This {@link Query} include both the querying and * filtering {@link Condition}s. If none of them is set, then a {@link MatchAllDocsQuery} is returned, so it never * returns {@code null}.// www. j a va 2s . c o m * * @param schema The {@link Schema} to be used. * @param rangeQuery An additional range {@link Query} to be used. * @return The Lucene {@link Query} representation of this search. */ public Query query(Schema schema, Query rangeQuery) { if (queryCondition == null && filterCondition == null && rangeQuery == null) { return new MatchAllDocsQuery(); } BooleanQuery booleanQuery = new BooleanQuery(); if (queryCondition != null) { Query query = queryCondition.query(schema); booleanQuery.add(query, BooleanClause.Occur.MUST); } if (filterCondition != null) { Query query = new ConstantScoreQuery(filterCondition.query(schema)); booleanQuery.add(query, BooleanClause.Occur.MUST); } if (rangeQuery != null) { booleanQuery.add(rangeQuery, BooleanClause.Occur.MUST); } return booleanQuery; }