Example usage for org.apache.lucene.search TopDocsCollector topDocs

List of usage examples for org.apache.lucene.search TopDocsCollector topDocs

Introduction

In this page you can find the example usage for org.apache.lucene.search TopDocsCollector topDocs.

Prototype

public TopDocs topDocs() 

Source Link

Document

Returns the top docs that were collected by this collector.

Usage

From source file:arena.lucene.LuceneIndexSearcherImpl.java

License:Open Source License

protected TopDocs executeSearch(IndexSearcher searcher, Query query, Filter filter, Sort sort,
        int collectorLimit) throws IOException {
    // Decide on how to search based on which elements of the lucene query model are available
    if (query != null) {
        // Full scoring search
        TopDocsCollector<? extends ScoreDoc> collector = null;
        if (sort == null) {
            collector = TopScoreDocCollector.create(collectorLimit, true);
        } else {//from  ww  w  . j a v  a 2  s . com
            SortField sortFields[] = sort.getSort();
            if (sortFields != null && sortFields.length > 0 && sortFields[0].getType() == SortField.SCORE
                    && !sortFields[0].getReverse()) {
                collector = TopScoreDocCollector.create(collectorLimit, true);
            } else {
                collector = TopFieldCollector.create(sort, collectorLimit, false, true, true, true);
            }
        }
        searcher.search(query, filter, collector);
        return collector.topDocs();

    } else if (filter != null) {
        // No query = no need for scoring, just dump the results into a hit collector that runs 
        // off the results in the order we want 
        DocIdSetIterator filterMatchesIterator = filter.getDocIdSet(searcher.getIndexReader()).iterator();
        if (sort == null) {
            // no sort available, so the natural iteration order is fine
            // if we have an iterator that means sorting is already handled, so just pull off the first n rows into the output
            ScoreDoc[] scoreDocs = new ScoreDoc[collectorLimit];
            int found = 0;
            int docId;
            while (found < collectorLimit
                    && (docId = filterMatchesIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                scoreDocs[found++] = new ScoreDoc(docId, 1f);
            }
            return new TopDocs(found, found < collectorLimit ? Arrays.copyOf(scoreDocs, found) : scoreDocs, 1f);
        } else {
            TopDocsCollector<? extends ScoreDoc> collector = TopFieldCollector.create(sort, collectorLimit,
                    false, true, true, true);
            int docId;
            while ((docId = filterMatchesIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                collector.collect(docId);
            }
            return collector.topDocs();

        }
    } else if (sort != null) {
        // no query and no filter so no score but add every doc in the index for non-score sorting            
        TopDocsCollector<? extends ScoreDoc> collector = TopFieldCollector.create(sort, collectorLimit, false,
                true, true, true);
        int numDocs = searcher.getIndexReader().numDocs();
        for (int n = 0; n < numDocs; n++) {
            collector.collect(n);
        }
        return collector.topDocs();
    } else {
        // no query filter or sort: return the top n docs
        ScoreDoc[] scoreDocs = new ScoreDoc[Math.min(collectorLimit, searcher.getIndexReader().numDocs())];

        for (int n = 0; n < scoreDocs.length; n++) {
            scoreDocs[n] = new ScoreDoc(n, 1f);
        }
        return new TopDocs(scoreDocs.length, scoreDocs, 1f);
    }
}

From source file:com.aurel.track.lucene.search.associatedFields.AbstractAssociatedFieldSearcher.java

License:Open Source License

/**
 * Get the OR separated IDs which match the specified field's user entered string
 * @param analyzer//from  w  w  w  .  j  a  v a  2  s.c o m
 * @param fieldName
 * @param fieldValue
 * @param fieldID
 * @param locale
 * @return
 */
@Override
protected String searchExplicitField(Analyzer analyzer, String fieldName, String fieldValue, Integer fieldID,
        Locale locale) {
    IndexSearcher indexSearcher = null;
    try {
        Query query = getAssociatedFieldQuery(analyzer, fieldValue);
        if (query == null) {
            return fieldValue;
        }
        indexSearcher = LuceneSearcher.getIndexSearcher(getIndexSearcherID());
        if (indexSearcher == null) {
            return fieldValue;
        }
        ScoreDoc[] scoreDocs;
        try {
            TopDocsCollector<ScoreDoc> collector = TopScoreDocCollector.create(LuceneSearcher.MAXIMAL_HITS);
            indexSearcher.search(query, collector);
            scoreDocs = collector.topDocs().scoreDocs;
        } catch (IOException e) {
            LOGGER.warn("Searching the " + getLuceneFieldName() + " failed with " + e.getMessage());
            LOGGER.debug(ExceptionUtils.getStackTrace(e));
            return fieldValue;
        }
        if (scoreDocs == null || scoreDocs.length == 0) {
            return fieldValue;
        }
        if (scoreDocs.length > LuceneSearcher.MAX_BOOLEAN_CLAUSES) {
            LOGGER.warn("Maximum number of boolean clauses was exceeded");
        }

        Set<Integer> workItemIDs = new HashSet<Integer>();
        for (int i = 0; i < scoreDocs.length; i++) {
            int docID = scoreDocs[i].doc;
            Document doc;
            try {
                doc = indexSearcher.doc(docID);
            } catch (IOException e) {
                LOGGER.error("Getting the documents from index searcher for " + getLuceneFieldName()
                        + "  failed with " + e.getMessage());
                LOGGER.debug(ExceptionUtils.getStackTrace(e));
                return fieldValue;
            }

            String workItemFieldName = getWorkItemFieldName();
            String workItemIDStr = doc.get(workItemFieldName);
            Integer workItemID = null;
            if (workItemIDStr != null) {
                try {
                    workItemID = Integer.valueOf(workItemIDStr);
                    workItemIDs.add(workItemID);
                } catch (Exception e) {
                    LOGGER.debug(e);
                }
            }
            //by links there are two workitems for bidirectional links
            String additionalWorkItemFieldName = getAdditionalWorkItemFieldName(doc);
            if (additionalWorkItemFieldName != null) {
                workItemIDStr = doc.get(additionalWorkItemFieldName);
                workItemID = null;
                if (workItemIDStr != null) {
                    try {
                        workItemID = Integer.valueOf(workItemIDStr);
                        workItemIDs.add(workItemID);
                    } catch (Exception e) {
                    }
                }
            }

        }
        return LuceneSearcher.createORDividedIDs(workItemIDs);
    } catch (Exception e) {
        LOGGER.warn("Getting the " + getLuceneFieldName() + " field " + fieldValue + " failed with "
                + e.getMessage());
        LOGGER.debug(ExceptionUtils.getStackTrace(e));
        return fieldValue;
    } finally {
        LuceneSearcher.closeIndexSearcherAndUnderlyingIndexReader(indexSearcher, getLuceneFieldName());
    }
}

From source file:com.aurel.track.lucene.search.listFields.AbstractListFieldSearcher.java

License:Open Source License

/**
 * Finds the list options which match the user entered string in link descriptions
 * @param analyzer//from w w  w.j  a  v  a 2s  .  com
 * @param fieldName
 * @param label
 * @param fieldID
 * @param locale
 * @return
 */
@Override
protected String searchExplicitField(Analyzer analyzer, String fieldName, String label, Integer fieldID,
        Locale locale) {
    IndexSearcher indexSearcher = null;
    try {
        Query query = getExplicitFieldQuery(analyzer, fieldName, label, fieldID, locale);
        if (query == null) {
            return label;
        }
        indexSearcher = LuceneSearcher.getIndexSearcher(getIndexSearcherID());
        if (indexSearcher == null) {
            return label;
        }
        ScoreDoc[] scoreDocs;
        try {
            TopDocsCollector<ScoreDoc> collector = TopScoreDocCollector.create(LuceneSearcher.MAXIMAL_HITS);
            indexSearcher.search(query, collector);
            scoreDocs = collector.topDocs().scoreDocs;
        } catch (IOException e) {
            LOGGER.warn("Searching by fieldName " + fieldName + " and fieldValue " + label + " failed with "
                    + e.getMessage());
            LOGGER.debug(ExceptionUtils.getStackTrace(e));
            return label;
        }
        if (scoreDocs == null || scoreDocs.length == 0) {
            return label;
        }
        if (scoreDocs.length > LuceneSearcher.MAX_BOOLEAN_CLAUSES) {
            LOGGER.warn("Maximum number of boolean clauses was exceeded");
        }
        Set<Integer> listOptionIDs = new HashSet<Integer>();
        for (int i = 0; i < scoreDocs.length; i++) {
            int docID = scoreDocs[i].doc;
            Document doc;
            try {
                doc = indexSearcher.doc(docID);
            } catch (IOException e) {
                LOGGER.error("Getting the documents from index searcher for fieldName " + fieldName
                        + " and fieldValue " + label + "  failed with " + e.getMessage());
                LOGGER.debug(ExceptionUtils.getStackTrace(e));
                return label;
            }
            String listOptionIDStr = doc.get(getValueFieldName());
            Integer listOptionID = null;
            if (listOptionIDStr != null) {
                try {
                    listOptionID = Integer.valueOf(listOptionIDStr);
                    listOptionIDs.add(listOptionID);
                } catch (Exception e) {
                }
            }
        }
        return LuceneSearcher.createORDividedIDs(listOptionIDs);
    } catch (Exception e) {
        LOGGER.error("Getting the fieldName " + fieldName + " and fieldValue " + label + " failed with "
                + e.getMessage());
        LOGGER.debug(ExceptionUtils.getStackTrace(e));
        return label;
    } finally {
        LuceneSearcher.closeIndexSearcherAndUnderlyingIndexReader(indexSearcher, fieldName);
    }
}

From source file:com.aurel.track.lucene.search.listFields.AbstractListFieldSearcher.java

License:Open Source License

/**
 * Get the workItemIDs which match the user entered string
 * @param analyzer/*from   w  ww .j  ava2  s  . co m*/
 * @param fieldValue
 * @param locale
 * @return
 */
@Override
protected String searchNoExplicitField(Analyzer analyzer, String toBeProcessedString, Locale locale) {
    IndexSearcher indexSearcher = null;
    Map<Integer, Set<Integer>> result = new HashMap<Integer, Set<Integer>>();
    try {
        Query lookupQuery = getNoExlplicitFieldQuery(analyzer, toBeProcessedString, locale);
        if (lookupQuery == null) {
            return "";
        }
        indexSearcher = LuceneSearcher.getIndexSearcher(getIndexSearcherID());
        if (indexSearcher == null) {
            return "";
        }
        ScoreDoc[] scoreDocs;
        try {
            TopDocsCollector<ScoreDoc> collector = TopScoreDocCollector.create(LuceneSearcher.MAXIMAL_HITS);
            indexSearcher.search(lookupQuery, collector);
            scoreDocs = collector.topDocs().scoreDocs;
        } catch (IOException e) {
            return "";
        }
        if (scoreDocs == null || scoreDocs.length == 0) {
            return "";
        }
        if (scoreDocs.length > LuceneSearcher.MAX_BOOLEAN_CLAUSES) {
            LOGGER.warn("Maximum number of boolean clauses was exceeded by not localized lookup");
        }
        Document doc;
        for (int i = 0; i < scoreDocs.length; i++) {
            int docID = scoreDocs[i].doc;
            try {
                doc = indexSearcher.doc(docID);
            } catch (IOException e) {
                LOGGER.error("Getting the documents from index searcher for fieldValue " + toBeProcessedString
                        + " failed with " + e.getMessage());
                LOGGER.debug(ExceptionUtils.getStackTrace(e));
                return "";
            }
            String typeStr = doc.get(getTypeFieldName());
            String idStr = doc.get(getValueFieldName());
            Integer type = null;
            Integer id = null;
            try {
                type = Integer.valueOf(typeStr);
                id = Integer.valueOf(idStr);
                Set<Integer> ids = result.get(type);
                if (ids == null) {
                    ids = new HashSet<Integer>();
                    result.put(type, ids);
                }
                ids.add(id);
            } catch (NumberFormatException ex) {
                continue;
            }
        }
    } catch (Exception ex) {
    } finally {
        LuceneSearcher.closeIndexSearcherAndUnderlyingIndexReader(indexSearcher, "no field");
    }
    Set<Integer> types = result.keySet();
    StringBuffer directQuery = new StringBuffer();
    for (Integer type : types) {
        Set<Integer> ids = result.get(type);
        String orDividedIDs = LuceneSearcher.createORDividedIDs(ids);
        String[] workItemFieldNames = getWorkItemFieldNames(type);
        for (int i = 0; i < workItemFieldNames.length; i++) {
            if (i > 0) {
                directQuery.append(" OR ");
            }
            if (ids.size() > 1) {
                directQuery.append(workItemFieldNames[i] + LuceneSearcher.FIELD_NAME_VALUE_SEPARATOR + "("
                        + orDividedIDs + ")");
            } else {
                directQuery.append(
                        workItemFieldNames[i] + LuceneSearcher.FIELD_NAME_VALUE_SEPARATOR + orDividedIDs);
            }
        }
    }
    return directQuery.toString();
}

From source file:com.aurel.track.lucene.search.LuceneSearcher.java

License:Open Source License

private static int[] getQueryResults(Query query, String userQueryString, String preprocessedQueryString,
        Map<Integer, String> highlightedTextMap) {
    int[] hitIDs = new int[0];
    IndexSearcher indexSearcher = null;/* w w w  .ja  v a  2s  .c om*/
    try {
        long start = 0;
        if (LOGGER.isDebugEnabled()) {
            start = new Date().getTime();
        }
        indexSearcher = getIndexSearcher(LuceneUtil.INDEXES.WORKITEM_INDEX);
        if (indexSearcher == null) {
            return hitIDs;
        }
        ScoreDoc[] scoreDocs;
        try {
            TopDocsCollector<ScoreDoc> collector = TopScoreDocCollector.create(MAXIMAL_HITS);
            indexSearcher.search(query, collector);
            scoreDocs = collector.topDocs().scoreDocs;
        } catch (IOException e) {
            LOGGER.warn("Getting the workitem search results failed with failed with " + e.getMessage());
            LOGGER.debug(ExceptionUtils.getStackTrace(e));
            return hitIDs;
        }
        if (LOGGER.isDebugEnabled()) {
            long end = new Date().getTime();
            LOGGER.debug("Found " + scoreDocs.length + " document(s) (in " + (end - start)
                    + " milliseconds) that matched the user query '" + userQueryString
                    + "' the preprocessed query '" + preprocessedQueryString + "' and the query.toString() '"
                    + query.toString() + "'");
        }
        QueryScorer queryScorer = new QueryScorer(query/*, LuceneUtil.HIGHLIGHTER_FIELD*/);
        Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer);
        Highlighter highlighter = new Highlighter(queryScorer); // Set the best scorer fragments
        highlighter.setTextFragmenter(fragmenter); // Set fragment to highlight
        hitIDs = new int[scoreDocs.length];
        for (int i = 0; i < scoreDocs.length; i++) {
            int docID = scoreDocs[i].doc;
            Document doc = null;
            try {
                doc = indexSearcher.doc(docID);
            } catch (IOException e) {
                LOGGER.error("Getting the workitem documents failed with " + e.getMessage());
                LOGGER.debug(ExceptionUtils.getStackTrace(e));
            }
            if (doc != null) {
                Integer itemID = Integer.valueOf(doc.get(LuceneUtil.getFieldName(SystemFields.ISSUENO)));
                if (itemID != null) {
                    hitIDs[i] = itemID.intValue();
                    if (highlightedTextMap != null) {
                        String highligherFieldValue = doc.get(LuceneUtil.HIGHLIGHTER_FIELD);
                        TokenStream tokenStream = null;
                        try {
                            tokenStream = TokenSources.getTokenStream(LuceneUtil.HIGHLIGHTER_FIELD, null,
                                    highligherFieldValue, LuceneUtil.getAnalyzer(), -1);
                        } catch (Exception ex) {
                            LOGGER.debug(ex.getMessage());
                        }
                        if (tokenStream != null) {
                            String fragment = highlighter.getBestFragment(tokenStream, highligherFieldValue);
                            if (fragment != null) {
                                highlightedTextMap.put(itemID, fragment);
                            }
                        }
                    }
                }
            }
        }
        return hitIDs;
    } catch (BooleanQuery.TooManyClauses e) {
        LOGGER.error("Searching the query resulted in too many clauses. Try to narrow the query results. "
                + e.getMessage());
        LOGGER.debug(ExceptionUtils.getStackTrace(e));
        throw e;
    } catch (Exception e) {
        LOGGER.error("Searching the workitems failed with " + e.getMessage());
        LOGGER.debug(ExceptionUtils.getStackTrace(e));
        return hitIDs;
    } finally {
        closeIndexSearcherAndUnderlyingIndexReader(indexSearcher, "workItem");
    }
}

From source file:com.wrmsr.search.dsl.SearcherImpl.java

License:Apache License

@Override
public ScoreDoc[] search(Query query, Supplier<Float> scoreSupplier, int maxHits) throws IOException {
    Query scoredQuery = new ComputedScoreQuery(new DocSpecific.Composite(docSpecificSet), scoreSupplier, query);
    TopDocsCollector topDocsCollector = TopScoreDocCollector.create(maxHits, true);
    indexSearcher.search(scoredQuery, topDocsCollector);
    return topDocsCollector.topDocs().scoreDocs;
}

From source file:io.datalayer.lucene.search.LuceneQueryTest.java

License:Apache License

private static void query(String indexDir, Query q) throws IOException, ParseException {

    int hitsPerPage = 10;
    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(indexDir)));
    IndexSearcher indexSearcher = new IndexSearcher(reader);
    TopDocsCollector collector = TopScoreDocCollector.create(hitsPerPage, false);
    indexSearcher.search(q, collector);//  www  .  j  a va 2  s.  c  o m
    ScoreDoc[] hits = collector.topDocs().scoreDocs;

    LOGGER.info("Found " + hits.length + " hits.");
    for (int i = 0; i < hits.length; ++i) {
        int docId = hits[i].doc;
        Document d = indexSearcher.doc(docId);
        // LOGGER.info((i + 1) + ". " + d.get("title"));
    }

    // searcher can only be closed when there
    // is no need to access the documents any more.
    // indexSearcher.close();

}

From source file:io.datalayer.lucene.search.LuceneSearchTest.java

License:Apache License

private void queryIndex(Query query, String fieldname) throws CorruptIndexException, IOException {

    LOGGER.info("-------------------------------------");

    long start = java.util.Calendar.getInstance().getTimeInMillis();

    int hitsPerPage = 100;

    IndexReader reader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(reader);
    TopDocsCollector collector = TopScoreDocCollector.create(hitsPerPage, false);

    indexSearcher.search(query, collector);

    ScoreDoc[] hits = collector.topDocs().scoreDocs;

    long end = java.util.Calendar.getInstance().getTimeInMillis();

    // float duration = (end - start) / 1000;

    LOGGER.info("Found " + hits.length + " hits in " + (end - start) + " milliseconds");
    for (int i = 0; i < hits.length; ++i) {
        int docId = hits[i].doc;
        Document document = indexSearcher.doc(docId);
        LOGGER.info((i + 1) + ". " + document.get(fieldname));
    }/*  w ww  .  j a  v  a  2s. c o  m*/

}

From source file:org.apache.solr.handler.component.ExpandAllComponent.java

License:Apache License

@Override
public void process(ResponseBuilder rb) throws IOException {

    if (!doExpandAll(rb)) {
        return;/*  w ww . ja va  2  s.co m*/
    }

    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();

    boolean isShard = params.getBool(ShardParams.IS_SHARD, false);
    String ids = params.get(ShardParams.IDS);

    if (ids == null && isShard) {
        return;
    }

    String field = params.get(ExpandParams.EXPAND_FIELD);
    if (field == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
                    CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
                    field = cp.getField();
                }
            }
        }
    }

    if (field == null) {
        throw new IOException("Expand field is null.");
    }

    String sortParam = params.get(ExpandParams.EXPAND_SORT);
    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
    String qs = params.get(ExpandParams.EXPAND_Q);
    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);

    Sort sort = null;

    if (sortParam != null) {
        sort = QueryParsing.parseSortSpec(sortParam, rb.req).getSort();
    }

    Query query = null;
    if (qs == null) {
        query = rb.getQuery();
    } else {
        try {
            QParser parser = QParser.getParser(qs, null, req);
            query = parser.getQuery();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    List<Query> newFilters = new ArrayList();

    if (fqs == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
                    newFilters.add(q);
                }
            }
        }
    } else {
        try {
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
                    QParser fqp = QParser.getParser(fq, null, req);
                    newFilters.add(fqp.getQuery());
                }
            }
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    SolrIndexSearcher searcher = req.getSearcher();
    AtomicReader reader = searcher.getAtomicReader();
    SortedDocValues values = FieldCache.DEFAULT.getTermsIndex(reader, field);
    FixedBitSet groupBits = new FixedBitSet(values.getValueCount());
    DocList docList = rb.getResults().docList;
    IntOpenHashSet collapsedSet = new IntOpenHashSet(docList.size() * 2);

    DocIterator idit = docList.iterator();

    while (idit.hasNext()) {
        int doc = idit.nextDoc();
        int ord = values.getOrd(doc);
        if (ord > -1) {
            groupBits.set(ord);
            collapsedSet.add(doc);
        }
    }

    Collector collector = null;
    GroupExpandCollector groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit,
            sort);
    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
    if (pfilter.postFilter != null) {
        pfilter.postFilter.setLastDelegate(groupExpandCollector);
        collector = pfilter.postFilter;
    } else {
        collector = groupExpandCollector;
    }

    searcher.search(query, pfilter.filter, collector);
    IntObjectOpenHashMap groups = groupExpandCollector.getGroups();
    Iterator<IntObjectCursor> it = groups.iterator();
    Map<String, DocSlice> outMap = new HashMap();
    BytesRef bytesRef = new BytesRef();
    CharsRef charsRef = new CharsRef();
    FieldType fieldType = searcher.getSchema().getField(field).getType();

    while (it.hasNext()) {
        IntObjectCursor cursor = it.next();
        int ord = cursor.key;
        TopDocsCollector topDocsCollector = (TopDocsCollector) cursor.value;
        TopDocs topDocs = topDocsCollector.topDocs();
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if (scoreDocs.length > 0) {
            int[] docs = new int[scoreDocs.length];
            float[] scores = new float[scoreDocs.length];
            for (int i = 0; i < docs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                docs[i] = scoreDoc.doc;
                scores[i] = scoreDoc.score;
            }
            DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits,
                    topDocs.getMaxScore());
            values.lookupOrd(ord, bytesRef);
            fieldType.indexedToReadable(bytesRef, charsRef);
            String group = charsRef.toString();
            outMap.put(group, slice);
        }
    }

    rb.rsp.add("expanded", outMap);
}

From source file:org.apache.solr.handler.component.ExpandComponent.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/* w w  w .j  av a2  s  . com*/
public void process(ResponseBuilder rb) throws IOException {

    if (!rb.doExpand) {
        return;
    }

    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();

    boolean isShard = params.getBool(ShardParams.IS_SHARD, false);
    String ids = params.get(ShardParams.IDS);

    if (ids == null && isShard) {
        return;
    }

    String field = params.get(ExpandParams.EXPAND_FIELD);
    if (field == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
                    CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
                    field = cp.getField();
                }
            }
        }
    }

    if (field == null) {
        throw new IOException("Expand field is null.");
    }

    String sortParam = params.get(ExpandParams.EXPAND_SORT);
    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
    String qs = params.get(ExpandParams.EXPAND_Q);
    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);

    Sort sort = null;

    if (sortParam != null) {
        sort = QueryParsing.parseSortSpec(sortParam, rb.req).getSort();
    }

    Query query;
    if (qs == null) {
        query = rb.getQuery();
    } else {
        try {
            QParser parser = QParser.getParser(qs, null, req);
            query = parser.getQuery();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    List<Query> newFilters = new ArrayList<>();

    if (fqs == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
                    newFilters.add(q);
                }
            }
        }
    } else {
        try {
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
                    QParser fqp = QParser.getParser(fq, null, req);
                    newFilters.add(fqp.getQuery());
                }
            }
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    SolrIndexSearcher searcher = req.getSearcher();
    AtomicReader reader = searcher.getAtomicReader();
    SortedDocValues values = FieldCache.DEFAULT.getTermsIndex(reader, field);
    FixedBitSet groupBits = new FixedBitSet(values.getValueCount());
    DocList docList = rb.getResults().docList;
    IntOpenHashSet collapsedSet = new IntOpenHashSet(docList.size() * 2);

    DocIterator idit = docList.iterator();

    while (idit.hasNext()) {
        int doc = idit.nextDoc();
        int ord = values.getOrd(doc);
        if (ord > -1) {
            groupBits.set(ord);
            collapsedSet.add(doc);
        }
    }

    Collector collector;
    if (sort != null)
        sort = sort.rewrite(searcher);
    GroupExpandCollector groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit,
            sort);
    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
    if (pfilter.postFilter != null) {
        pfilter.postFilter.setLastDelegate(groupExpandCollector);
        collector = pfilter.postFilter;
    } else {
        collector = groupExpandCollector;
    }

    searcher.search(query, pfilter.filter, collector);
    IntObjectMap groups = groupExpandCollector.getGroups();
    Map<String, DocSlice> outMap = new HashMap();
    CharsRef charsRef = new CharsRef();
    FieldType fieldType = searcher.getSchema().getField(field).getType();
    for (IntObjectCursor cursor : (Iterable<IntObjectCursor>) groups) {
        int ord = cursor.key;
        TopDocsCollector topDocsCollector = (TopDocsCollector) cursor.value;
        TopDocs topDocs = topDocsCollector.topDocs();
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if (scoreDocs.length > 0) {
            int[] docs = new int[scoreDocs.length];
            float[] scores = new float[scoreDocs.length];
            for (int i = 0; i < docs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                docs[i] = scoreDoc.doc;
                scores[i] = scoreDoc.score;
            }
            DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits,
                    topDocs.getMaxScore());
            final BytesRef bytesRef = values.lookupOrd(ord);
            fieldType.indexedToReadable(bytesRef, charsRef);
            String group = charsRef.toString();
            outMap.put(group, slice);
        }
    }

    rb.rsp.add("expanded", outMap);
}