List of usage examples for org.apache.lucene.search Sort needsScores
public boolean needsScores()
From source file:com.qwazr.search.index.QueryUtils.java
License:Apache License
final static ResultDefinition search(final QueryContext queryContext, final ResultDocumentBuilder.BuilderFactory documentBuilderFactory) throws IOException, ParseException, ReflectiveOperationException, QueryNodeException { final QueryDefinition queryDef = queryContext.queryDefinition; Query query = getLuceneQuery(queryContext); final TimeTracker timeTracker = new TimeTracker(); final AnalyzerContext analyzerContext = queryContext.analyzer.getContext(); final Sort sort = queryDef.sorts == null ? null : SortUtils.buildSort(analyzerContext.fieldTypes, queryDef.sorts); final int numHits = queryDef.getEnd(); final boolean bNeedScore = sort != null ? sort.needsScores() : true; final QueryCollectors queryCollectors = new QueryCollectors(bNeedScore, sort, numHits, queryDef.facets, queryDef.functions, analyzerContext.fieldTypes); queryContext.indexSearcher.search(query, queryCollectors.finalCollector); final TopDocs topDocs = queryCollectors.getTopDocs(); final Integer totalHits = queryCollectors.getTotalHits(); timeTracker.next("search_query"); final FacetsBuilder facetsBuilder = queryCollectors.facetsCollector == null ? null : new FacetsBuilder(queryContext, queryDef.facets, query, queryCollectors.facetsCollector, timeTracker);/* ww w . ja v a2 s . c o m*/ final Map<String, HighlighterImpl> highlighters; if (queryDef.highlighters != null && topDocs != null) { highlighters = new LinkedHashMap<>(); queryDef.highlighters.forEach(new BiConsumer<String, HighlighterDefinition>() { @Override public void accept(String name, HighlighterDefinition highlighterDefinition) { highlighters.put(name, new HighlighterImpl(highlighterDefinition, queryContext.analyzer)); } }); } else highlighters = null; ResultDefinitionBuilder resultBuilder = new ResultDefinitionBuilder(queryDef, topDocs, queryContext.indexSearcher, query, highlighters, queryCollectors.functionsCollectors, analyzerContext.fieldTypes, timeTracker, documentBuilderFactory, facetsBuilder, totalHits); return documentBuilderFactory.build(resultBuilder); }
From source file:org.apache.solr.handler.ExportWriter.java
License:Apache License
public void write(OutputStream os) throws IOException { QueryResponseWriter rw = req.getCore().getResponseWriters().get(wt); if (rw instanceof BinaryResponseWriter) { //todo add support for other writers after testing writer = new JavaBinCodec(os, null); } else {// ww w. j av a2 s .co m respWriter = new OutputStreamWriter(os, StandardCharsets.UTF_8); writer = JSONResponseWriter.getPushWriter(respWriter, req, res); } Exception exception = res.getException(); if (exception != null) { if (!(exception instanceof IgnoreException)) { writeException(exception, writer, false); } return; } SolrRequestInfo info = SolrRequestInfo.getRequestInfo(); SortSpec sortSpec = info.getResponseBuilder().getSortSpec(); if (sortSpec == null) { writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true); return; } SolrIndexSearcher searcher = req.getSearcher(); Sort sort = searcher.weightSort(sortSpec.getSort()); if (sort == null) { writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true); return; } if (sort != null && sort.needsScores()) { writeException((new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true); return; } // There is a bailout in SolrIndexSearcher.getDocListNC when there are _no_ docs in the index at all. // if (lastDocRequested <= 0) { // That causes the totalHits and export entries in the context to _not_ get set. // The only time that really matters is when we search against an _empty_ set. That's too obscure // a condition to handle as part of this patch, if someone wants to pursue it it can be reproduced with: // ant test -Dtestcase=StreamingTest -Dtests.method=testAllValidExportTypes -Dtests.seed=10F13879D0D1D6AD -Dtests.slow=true -Dtests.locale=es-PA -Dtests.timezone=America/Bahia_Banderas -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1 // You'll have to uncomment the if below to hit the null pointer exception. // This is such an unusual case (i.e. an empty index) that catching this concdition here is probably OK. // This came to light in the very artifical case of indexing a single doc to Cloud. if (req.getContext().get("totalHits") != null) { totalHits = ((Integer) req.getContext().get("totalHits")).intValue(); sets = (FixedBitSet[]) req.getContext().get("export"); if (sets == null) { writeException( (new IOException(new SyntaxError("xport RankQuery is required for xsort: rq={!xport}"))), writer, true); return; } } SolrParams params = req.getParams(); String fl = params.get("fl"); String[] fields = null; if (fl == null) { writeException((new IOException(new SyntaxError("export field list (fl) must be specified."))), writer, true); return; } else { fields = fl.split(","); for (int i = 0; i < fields.length; i++) { fields[i] = fields[i].trim(); if (fields[i].equals("score")) { writeException( (new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true); return; } } } try { fieldWriters = getFieldWriters(fields, req.getSearcher()); } catch (Exception e) { writeException(e, writer, true); return; } writer.writeMap(m -> { m.put("responseHeader", singletonMap("status", 0)); m.put("response", (MapWriter) mw -> { mw.put("numFound", totalHits); mw.put("docs", (IteratorWriter) iw -> writeDocs(req, iw, sort)); }); }); }
From source file:org.apache.solr.response.SortingResponseWriter.java
License:Apache License
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse res) throws IOException { Exception e1 = res.getException(); if (e1 != null) { e1.printStackTrace(new PrintWriter(writer)); return;/* w w w. ja v a2s.c o m*/ } SolrRequestInfo info = SolrRequestInfo.getRequestInfo(); SortSpec sortSpec = info.getResponseBuilder().getSortSpec(); if (sortSpec == null) { throw new IOException(new SyntaxError("No sort criteria was provided.")); } SolrIndexSearcher searcher = req.getSearcher(); Sort sort = searcher.weightSort(sortSpec.getSort()); if (sort == null) { throw new IOException(new SyntaxError("No sort criteria was provided.")); } if (sort.needsScores()) { throw new IOException(new SyntaxError("Scoring is not currently supported with xsort.")); } FixedBitSet[] sets = (FixedBitSet[]) req.getContext().get("export"); Integer th = (Integer) req.getContext().get("totalHits"); if (sets == null) { throw new IOException(new SyntaxError("xport RankQuery is required for xsort: rq={!xport}")); } int totalHits = th.intValue(); SolrParams params = req.getParams(); String fl = params.get("fl"); if (fl == null) { throw new IOException(new SyntaxError("export field list (fl) must be specified.")); } String[] fields = fl.split(","); for (int i = 0; i < fields.length; i++) { if (fl.trim().equals("score")) { throw new IOException(new SyntaxError("Scoring is not currently supported with xsort.")); } } FieldWriter[] fieldWriters = getFieldWriters(fields, req.getSearcher()); writer.write( "{\"responseHeader\": {\"status\": 0}, \"response\":{\"numFound\":" + totalHits + ", \"docs\":["); //Write the data. List<AtomicReaderContext> leaves = req.getSearcher().getTopReaderContext().leaves(); SortDoc sortDoc = getSortDoc(req.getSearcher(), sort.getSort()); int count = 0; int queueSize = 30000; SortQueue queue = new SortQueue(queueSize, sortDoc); SortDoc[] outDocs = new SortDoc[queueSize]; boolean commaNeeded = false; while (count < totalHits) { //long begin = System.nanoTime(); queue.reset(); SortDoc top = queue.top(); for (int i = 0; i < leaves.size(); i++) { sortDoc.setNextReader(leaves.get(i)); DocIdSetIterator it = sets[i].iterator(); int docId = -1; while ((docId = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { sortDoc.setValues(docId); if (top.lessThan(sortDoc)) { top.setValues(sortDoc); top = queue.updateTop(); } } } int outDocsIndex = -1; for (int i = 0; i < queueSize; i++) { SortDoc s = queue.pop(); if (s.docId > -1) { outDocs[++outDocsIndex] = s; } } //long end = System.nanoTime(); count += (outDocsIndex + 1); try { for (int i = outDocsIndex; i >= 0; --i) { SortDoc s = outDocs[i]; if (commaNeeded) { writer.write(','); } writer.write('{'); writeDoc(s, leaves, fieldWriters, sets, writer); writer.write('}'); commaNeeded = true; s.reset(); } } catch (Throwable e) { Throwable ex = e; while (ex != null) { String m = ex.getMessage(); if (m != null && m.contains("Broken pipe")) { logger.info("Early client disconnect during export"); return; } ex = ex.getCause(); } if (e instanceof IOException) { throw ((IOException) e); } else { throw new IOException(e); } } } //System.out.println("Sort Time 2:"+Long.toString(total/1000000)); writer.write("]}}"); writer.flush(); }
From source file:org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregator.java
License:Apache License
@Override public boolean needsScores() { Sort sort = subSearchContext.sort(); if (sort != null) { return sort.needsScores() || subSearchContext.trackScores(); } else {/*from ww w .ja v a 2s .c om*/ // sort by score return true; } }