Example usage for org.apache.solr.handler.component ResponseBuilder getResults

List of usage examples for org.apache.solr.handler.component ResponseBuilder getResults

Introduction

In this page you can find the example usage for org.apache.solr.handler.component ResponseBuilder getResults.

Prototype

public DocListAndSet getResults() 

Source Link

Usage

From source file:com.frank.search.common.PagerComponent.java

License:GNU General Public License

@Override
@SuppressWarnings("unchecked")
public void process(ResponseBuilder rb) throws IOException {
    /* get request params */
    SolrParams par = rb.req.getParams();
    int rows = par.getInt(CommonParams.ROWS, 0);
    int start = par.getInt(CommonParams.START, 0);
    int pages = par.getInt(PARAM_PAGER, 0);
    int pages_pre = par.getInt(PARAM_PAGER_PRE, 2);

    /* neet to work ? */
    if (pages == 0 || rows == 0)
        return;/*from   w  w  w .  j a  v  a2s.c  o m*/

    /* pager list */
    NamedList lst = new SimpleOrderedMap<Object>();
    NamedList lst2 = new SimpleOrderedMap<Object>();

    /* paging pages */
    int doc_count = rb.getResults().docSet.size();
    int page_count = doc_count / rows;
    int page_actual = start / rows;
    int page_pre = pages_pre;
    int page_post = pages - page_pre - 1;

    /* page range */
    if (page_actual - page_pre < 0) {
        page_post += -(page_actual - page_pre);
        page_pre -= -(page_actual - page_pre);
    } else if (page_actual + page_post > page_count) {
        page_post = pages - page_pre;
        page_pre = page_actual + pages - page_count;
    }

    /* sanity */
    if (page_pre < 0)
        page_pre = 0;
    if (page_post < 0)
        page_post = 0;

    /* next pages list */
    int i = (page_actual - page_pre);
    for (i = (i <= 0 ? 0 : i); i <= page_count && i <= (page_actual + page_post); i++)
        lst2.add(Integer.toString(i + 1), i * rows);
    lst.add("pages", lst2);

    /* navi */
    if (page_actual > 0)
        lst.add("prev", (page_actual - 1) * rows);
    if (page_actual - page_pre > 0)
        lst.add("first", 0);
    if (page_actual < page_count)
        lst.add("next", (page_actual + 1) * rows);
    if (page_actual + page_post < page_count)
        lst.add("last", page_count * rows);
    lst.add("actual", page_actual + 1);
    lst.add("count", page_count);

    /* finish */
    rb.rsp.add("pager", lst);
}

From source file:com.indoqa.solr.spatial.clustering.SpatialClusteringComponent.java

License:Apache License

private DblClusters<Document> createDocumentClusters(ResponseBuilder rb, int maxCount, Set<String> fields)
        throws IOException {
    DblClusters<Document> clusters = new DblClusters<>(2, maxCount);

    DocSet docSet = rb.getResults().docSet;
    DocIterator iterator = docSet.iterator();

    while (iterator.hasNext()) {
        Integer docId = iterator.next();
        Document doc = rb.req.getSearcher().doc(docId, fields);

        IndexableField latitudeField = doc.getField(this.fieldNameLat);
        IndexableField longitudeField = doc.getField(this.fieldNameLon);

        if (latitudeField == null || longitudeField == null) {
            continue;
        }/*  w  w  w  .ja v  a2  s . c o  m*/

        String latitudeString = latitudeField.stringValue();
        String longitudeString = longitudeField.stringValue();

        if (!isNumeric(latitudeString) || !isNumeric(longitudeString)) {
            continue;
        }

        clusters.add(1, new double[] { Double.valueOf(latitudeString), Double.valueOf(longitudeString) }, doc);
    }
    return clusters;
}

From source file:com.search.MySearchHandler.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    // int sleep = req.getParams().getInt("sleep",0);
    // if (sleep > 0) {log.error("SLEEPING for " + sleep);
    // Thread.sleep(sleep);}

    /*** Pre-processing of the Query by REGEX starts here -------------- ***/
    SolrParams originalParams = req.getOriginalParams();
    SolrParams psuedoParams = req.getParams(); // These psuedoParams keep
    // changing//from  w  ww.  ja va2  s. c  om
    if (originalParams.get(CommonParams.Q) != null) {
        String finalQuery;
        String originalQuery = originalParams.get(CommonParams.Q);
        rsp.add("Original query", originalQuery);
        SchemaField keyField = null;
        keyField = req.getCore().getLatestSchema().getUniqueKeyField();
        if (keyField != null) {
            fieldSet.add(keyField.getName());
        }
        /***
         * START CODE TO PARSE QUERY
         * 
         * Arafath's code to prepare query starts here The query should be
         * in the following format ->
         * 
         * 
         * 
         * Example : Original Query:
         * "Which musical object did russ conway play" Temporary Query :
         * "relations:instrument AND entity:enty" // Generate the relation
         * Final Query : "name:"russ conway" AND occupation:musician"
         */
        ParsedQuestion parsedq = new ParsedQuestion();
        parsedq = parseQues(originalQuery);
        if (parsedq != null) {
            System.out.println(parsedq);
            Map<Integer, String> relationstr = getRelation(parsedq.getRelationKeyWord(), parsedq.getWhtype(),
                    req);

            /**
             * END CODE TO PARSE QUERY
             */

            /*** Final Phase starts here ***/
            finalQuery = "title:\"" + parsedq.getSearchName() + "\"";
            NamedList finalparamsList = req.getParams().toNamedList();
            finalparamsList.setVal(finalparamsList.indexOf(CommonParams.Q, 0), finalQuery);
            psuedoParams = SolrParams.toSolrParams(finalparamsList);
            if (psuedoParams.get(CommonParams.SORT) == null) {
                finalparamsList.add(CommonParams.SORT, "score desc");
            } else {
                finalparamsList.setVal(finalparamsList.indexOf(CommonParams.SORT, 0), "score desc");
            }
            int documentsRetrieved = 0;
            if (relationstr != null) {
                rsp.add("total relations retrieved", relationstr.size());
                rsp.add("relations", relationstr);
                NamedList finaldocresults = new NamedList();
                NamedList forwarddocresults = new NamedList();
                Set<String> checkDocuments = new HashSet<String>();

                for (int i = 0; i < relationstr.size() && (documentsRetrieved < 10); i++) {
                    NamedList relationdocresults = new NamedList();
                    String desiredField = relationstr.get(i);
                    Set<String> tempFieldSet = new HashSet<String>();
                    int docsRetrievedforThisRelation = 0;
                    tempFieldSet.add(desiredField);
                    psuedoParams = SolrParams.toSolrParams(finalparamsList);
                    if (psuedoParams.get(CommonParams.FL) == null) {
                        finalparamsList.add(CommonParams.FL, desiredField);
                    } else {
                        finalparamsList.setVal(finalparamsList.indexOf(CommonParams.FL, 0), desiredField);
                    }
                    SolrQueryRequest finalreq = new LocalSolrQueryRequest(req.getCore(), finalparamsList);
                    rsp.add("Final Query", finalreq.getParams().get(CommonParams.Q));

                    SolrQueryResponse finalrsp = new SolrQueryResponse();

                    ResponseBuilder finalrb = new ResponseBuilder(finalreq, finalrsp, components);
                    for (SearchComponent c : components) {
                        c.prepare(finalrb);
                        c.process(finalrb);
                    }

                    DocList finaldocs = finalrb.getResults().docList;
                    if (finaldocs == null || finaldocs.size() == 0) {
                        log.debug("No results");
                        // support for reverse query
                    } else {
                        DocIterator finaliterator = finaldocs.iterator();
                        Document finaldoc;
                        for (int j = 0; j < finaldocs.size(); j++) {
                            try {
                                if (finaliterator.hasNext()) {
                                    int finaldocid = finaliterator.nextDoc();
                                    finaldoc = finalrb.req.getSearcher().doc(finaldocid, tempFieldSet);
                                    if (!checkDocuments.contains(finaldoc.get("id"))) {
                                        if (finaldoc.get(desiredField) != null) {
                                            checkDocuments.add(finaldoc.get("id"));
                                            docsRetrievedforThisRelation++;
                                            documentsRetrieved++;
                                            relationdocresults.add(finaldoc.get("title"), finaldoc);
                                            if (documentsRetrieved >= 10) {
                                                break;
                                            }
                                        }
                                    }
                                }
                            } catch (IOException ex) {
                                java.util.logging.Logger.getLogger(MySearchHandler.class.getName())
                                        .log(Level.SEVERE, null, ex);
                            }
                        }
                        if (docsRetrievedforThisRelation > 0) {
                            rsp.add("docs retrieved for : " + desiredField, docsRetrievedforThisRelation);
                            forwarddocresults.add(desiredField, relationdocresults);
                        }
                    }
                    finalreq.close();
                    if (documentsRetrieved > 0) {
                        rsp.add("type", "forward");
                        rsp.add("final results", forwarddocresults);
                    }
                }
                if (documentsRetrieved == 0) {
                    NamedList reversedocresults = new NamedList();
                    relationstr = getRelationReverse(parsedq.getRelationKeyWord(), req);
                    System.out.println(relationstr);
                    StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_46);
                    String reversequery = "";
                    for (int i = 0; i < relationstr.size(); i++) {
                        QueryParser relationsparser = new QueryParser(Version.LUCENE_46, relationstr.get(i),
                                analyzer);
                        try {
                            reversequery += relationsparser.parse(parsedq.getSearchName()).toString() + " ";
                        } catch (ParseException e) {

                            e.printStackTrace();
                        }
                    }
                    QueryParser relationsparser = new QueryParser(Version.LUCENE_46, "infotype", analyzer);
                    reversequery += relationsparser.parse(parsedq.getWhtype().firstKey().toLowerCase());

                    NamedList reverseList = req.getParams().toNamedList();
                    psuedoParams = SolrParams.toSolrParams(reverseList);
                    reverseList.setVal(reverseList.indexOf(CommonParams.Q, 0), reversequery);
                    SolrQueryRequest reversereq = new LocalSolrQueryRequest(req.getCore(), reverseList);
                    SolrQueryResponse reversersp = new SolrQueryResponse();
                    ResponseBuilder reverserb = new ResponseBuilder(reversereq, reversersp, components);
                    for (SearchComponent c : components) {
                        try {
                            c.prepare(reverserb);
                            c.process(reverserb);
                        } catch (IOException ex) {

                            java.util.logging.Logger.getLogger(MySearchHandler.class.getName())
                                    .log(Level.SEVERE, null, ex);
                        }
                    }
                    DocList reversedocs = reverserb.getResults().docList;
                    if (reversedocs == null || reversedocs.size() == 0) {
                        log.debug("No results");
                        // GET SECOND entry from WHTYPE .. search with that ..
                    } else {
                        //   NamedList docresults = new NamedList();
                        DocIterator reverseiterator = reversedocs.iterator();
                        Document reversedoc;
                        int docScore = 0;
                        for (int m = 0; m < reversedocs.size(); m++) {
                            try {
                                int reversedocid = reverseiterator.nextDoc();
                                reversedoc = reverserb.req.getSearcher().doc(reversedocid, fieldSet);
                                if (reversedoc.get("title") != null) {
                                    documentsRetrieved++;
                                    reversedocresults.add(reversedoc.get("title"), reversedoc);
                                    if (documentsRetrieved >= 10) {
                                        break;
                                    }
                                }
                            } catch (IOException ex) {
                                java.util.logging.Logger.getLogger(MySearchHandler.class.getName())
                                        .log(Level.SEVERE, null, ex);
                            }
                        }
                    }
                    if (documentsRetrieved == 0) {
                        rsp.add("message", "No Results found. Try another query!");
                    } else {
                        rsp.add("type", "reverse");
                        rsp.add("final results", reversedocresults);
                    }
                    reversereq.close();
                }
            } else {
                if (documentsRetrieved == 0) {
                    rsp.add("message", "No Results found. Please rephrase the query!");
                }
            }
        } else {
            rsp.add("message", "This is not a valid query!");
        }
    } else {
        rsp.add("message", "User should provide at least one word as a query!");
    }
}

From source file:com.search.MySearchHandler.java

License:Apache License

private Map<Integer, String> getRelationReverse(String value, SolrQueryRequest req) {
    /*** Galla's modified code starts here ---- >
     * /* www  .  jav a 2 s  .  c om*/
     */
    StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_46);
    QueryParser relationsparser = new QueryParser(Version.LUCENE_46, "relations", analyzer);
    QueryParser entityparser = new QueryParser(Version.LUCENE_46, "entity", analyzer);
    QueryParser fieldidparser = new QueryParser(Version.LUCENE_46, "fieldid", analyzer);

    Map<Integer, String> desiredFieldList = null;
    int desiredFieldsCount = 0;
    String desiredRelation = null;

    Set<String> checkRelation = null;
    if (desiredFieldsCount < 5) {
        desiredFieldList = new HashMap<Integer, String>();
        checkRelation = new HashSet<String>();
        NamedList tempparamsList = req.getParams().toNamedList();
        SolrParams psuedoParams = SolrParams.toSolrParams(tempparamsList);
        if (psuedoParams.get(CommonParams.SORT) == null) {
            tempparamsList.add(CommonParams.SORT, "score desc");
        } else {
            tempparamsList.setVal(tempparamsList.indexOf(CommonParams.SORT, 0), "score desc");
        }
        SolrQueryRequest firstreq = null;
        SolrQueryResponse firstrsp = null;
        ResponseBuilder firstrb = null;
        DocList docs = null;

        String relString = "";
        String fieldString = "";
        try {
            relString = relationsparser.parse(value).toString();

            fieldString = fieldidparser.parse(value).toString();
        } catch (ParseException e) {

            e.printStackTrace();
        }
        //      (+relations:"children" and +entity:"num") or (relations:"children" and fieldid:"children") or (fieldid:"children" and entity:"num")
        String tempQuery = "(" + relString + ")" + " OR (" + fieldString + ")";

        System.out.println(tempQuery);
        tempparamsList.setVal(tempparamsList.indexOf(CommonParams.Q, 0), tempQuery);

        firstreq = new LocalSolrQueryRequest(req.getCore(), tempparamsList);
        firstrsp = new SolrQueryResponse();
        firstrb = new ResponseBuilder(firstreq, firstrsp, components);

        for (SearchComponent c : components) {
            try {
                c.prepare(firstrb);
                c.process(firstrb);
            } catch (IOException ex) {

                java.util.logging.Logger.getLogger(MySearchHandler.class.getName()).log(Level.SEVERE, null, ex);
            }
        }

        docs = firstrb.getResults().docList;

        if (docs == null || docs.size() == 0) {
            log.debug("No results");
            // GET SECOND entry from WHTYPE .. search with that ..

        } else {
            //   NamedList docresults = new NamedList();
            DocIterator iterator = docs.iterator();
            Document doc;
            int docScore = 0;
            for (int i = 0; i < docs.size(); i++) {
                try {
                    int docid = iterator.nextDoc();
                    doc = firstrb.req.getSearcher().doc(docid, fieldSet);
                    desiredRelation = doc.get("fieldid");
                    if (!checkRelation.contains(desiredRelation)) {
                        checkRelation.add(desiredRelation);
                        desiredFieldList.put(desiredFieldsCount++, desiredRelation);
                        System.out.println("vgalla's relation : " + desiredRelation);
                        if (desiredFieldsCount >= 5) {
                            return desiredFieldList;
                        }
                    }
                } catch (IOException ex) {
                    java.util.logging.Logger.getLogger(MySearchHandler.class.getName()).log(Level.SEVERE, null,
                            ex);
                }
            }
        }
        firstreq.close();

        /*** Galla's code ends here ----- >
         * 
         */
    }

    return desiredFieldList;
}

From source file:com.search.MySearchHandler.java

License:Apache License

private static Map<Integer, String> getRelation(String value, TreeMap<String, Double> whtype,
        SolrQueryRequest req) {// w w  w  . j  a  va2s  .  c  o m

    /***
     * Galla's modified code starts here ---- >
     * 
     */
    Map<Integer, String> desiredFieldList = null;
    if (whtype != null) {
        StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_46);
        QueryParser relationsparser = new QueryParser(Version.LUCENE_46, "relations", analyzer);
        QueryParser entityparser = new QueryParser(Version.LUCENE_46, "entity", analyzer);
        QueryParser fieldidparser = new QueryParser(Version.LUCENE_46, "fieldid", analyzer);

        int desiredFieldsCount = 0;
        String desiredRelation = null;
        Set<String> whtypeSet = whtype.keySet();
        Set<String> checkRelation = null;
        if (!whtypeSet.isEmpty() && (desiredFieldsCount < 5)) {
            desiredFieldList = new HashMap<Integer, String>();
            checkRelation = new HashSet<String>();
            NamedList tempparamsList = req.getParams().toNamedList();
            SolrParams psuedoParams = SolrParams.toSolrParams(tempparamsList);
            if (psuedoParams.get(CommonParams.SORT) == null) {
                tempparamsList.add(CommonParams.SORT, "score desc");
            } else {
                tempparamsList.setVal(tempparamsList.indexOf(CommonParams.SORT, 0), "score desc");
            }
            SolrQueryRequest firstreq = null;
            SolrQueryResponse firstrsp = null;
            ResponseBuilder firstrb = null;
            DocList docs = null;
            for (String tempStr : whtypeSet) {
                String tempType = tempStr.toLowerCase().trim();
                String relString = "";
                String entyString = "";
                String fieldString = "";
                try {
                    relString = relationsparser.parse(value).toString();
                    entyString = entityparser.parse(tempType).toString();
                    fieldString = fieldidparser.parse(value).toString();
                } catch (ParseException e) {

                    e.printStackTrace();
                }
                // (+relations:"children" and +entity:"num") or
                // (relations:"children" and fieldid:"children") or
                // (fieldid:"children" and entity:"num")
                String tempQuery = "(" + relString + " AND " + entyString + ")" + " OR " + "(" + relString
                        + " AND " + fieldString + ")" + " OR " + "(" + fieldString + " AND " + entyString + ")";

                System.out.println(tempQuery);
                tempparamsList.setVal(tempparamsList.indexOf(CommonParams.Q, 0), tempQuery);

                firstreq = new LocalSolrQueryRequest(req.getCore(), tempparamsList);
                firstrsp = new SolrQueryResponse();
                firstrb = new ResponseBuilder(firstreq, firstrsp, components);

                for (SearchComponent c : components) {
                    try {
                        c.prepare(firstrb);
                        c.process(firstrb);
                    } catch (IOException ex) {

                        java.util.logging.Logger.getLogger(MySearchHandler.class.getName()).log(Level.SEVERE,
                                null, ex);
                    }
                }

                docs = firstrb.getResults().docList;

                if (docs == null || docs.size() == 0) {
                    log.debug("No results");
                    // GET SECOND entry from WHTYPE .. search with that ..

                } else {
                    // NamedList docresults = new NamedList();
                    DocIterator iterator = docs.iterator();
                    Document doc;
                    int docScore = 0;
                    for (int i = 0; i < docs.size(); i++) {
                        try {
                            int docid = iterator.nextDoc();
                            doc = firstrb.req.getSearcher().doc(docid, fieldSet);
                            desiredRelation = doc.get("fieldid");
                            if (!checkRelation.contains(desiredRelation)) {
                                checkRelation.add(desiredRelation);
                                desiredFieldList.put(desiredFieldsCount++, desiredRelation);
                                System.out.println("vgalla's relation : " + desiredRelation);
                                if (desiredFieldsCount >= 5) {
                                    return desiredFieldList;
                                }
                            }
                        } catch (IOException ex) {
                            java.util.logging.Logger.getLogger(MySearchHandler.class.getName())
                                    .log(Level.SEVERE, null, ex);
                        }
                    }
                }
                firstreq.close();

                /***
                 * Galla's code ends here ----- >
                 * 
                 */

                String exrelstring = "";

                String[] strarray = value.split(" ");

                for (int i = 0; i < strarray.length; i++) {
                    if (exrelmap.containsKey(strarray[i].toLowerCase().trim())) {
                        exrelstring += exrelmap.get(strarray[i].toLowerCase().trim());
                    }
                }

                if (!exrelstring.equals("")) {
                    String[] temp = exrelstring.split("~~");
                    for (int i = 0; i < temp.length; i++) {
                        if (!temp[i].trim().equals("")) {

                            String mapdetect = mappingmap.get(temp[i].trim());

                            if (mapdetect.toLowerCase().trim().equals(tempType)) {
                                desiredRelation = temp[i].trim();
                                if (!checkRelation.contains(desiredRelation)) {
                                    checkRelation.add(desiredRelation);
                                    desiredFieldList.put(desiredFieldsCount++, desiredRelation);
                                    System.out.println("arafath's relation : " + desiredRelation);
                                    if (desiredFieldsCount >= 5) {
                                        return desiredFieldList;
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    return desiredFieldList;
}

From source file:com.search.MySearchHandlerTest.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    // int sleep = req.getParams().getInt("sleep",0);
    // if (sleep > 0) {log.error("SLEEPING for " + sleep);  Thread.sleep(sleep);}

    /*** Pre-processing of the Query by REGEX starts here --------------***/
    SolrParams originalParams = req.getOriginalParams();
    SolrParams psuedoParams = req.getParams(); // These psuedoParams keep changing
    if (originalParams.get(CommonParams.Q) != null) {
        String finalQuery;//from  www  . jav  a2  s  .c  om

        String originalQuery = originalParams.get(CommonParams.Q);

        rsp.add("Original query", originalQuery);

        /*** Arafath's code to prepare query starts here
         * The query should be in the following format ->
         * Example : 
         *       Original Query: "Which musical object did russ conway play"
         *      Temporary Query : "relations:instrument AND entity:enty" // Generate the relation
         *       Final Query : "name:"russ conway" AND occupation:musician"
         */

        String tempQuery = "relations:instrumental AND entity:enty";
        rsp.add("Temporary query", tempQuery);
        String desiredField = null;
        Set<String> fieldSet = null;
        SchemaField keyField = null;

        NamedList tempparamsList = req.getParams().toNamedList();
        tempparamsList.setVal(tempparamsList.indexOf(CommonParams.Q, 0), tempQuery);
        psuedoParams = SolrParams.toSolrParams(tempparamsList);
        //      if (psuedoParams.get(CommonParams.SORT) == null) {
        //         tempparamsList.add(CommonParams.SORT, "score desc");
        //      } else {
        //         tempparamsList.setVal(tempparamsList.indexOf(CommonParams.SORT, 0), "score desc");
        //      }

        SolrQueryRequest firstreq = new LocalSolrQueryRequest(req.getCore(), tempparamsList);
        SolrQueryResponse firstrsp = new SolrQueryResponse();
        firstrsp.setAllValues(rsp.getValues());
        ResponseBuilder firstrb = new ResponseBuilder(firstreq, firstrsp, components);

        for (SearchComponent c : components) {
            c.prepare(firstrb);
            c.process(firstrb);
        }
        rsp.add("response", firstrb.getResults().docList);
        /***
                 DocList docs = firstrb.getResults().docList;
                 if (docs == null || docs.size() == 0) {
                    log.debug("No results");
                 } else {
                    fieldSet = new HashSet <String> ();
                    keyField = firstrb.req.getCore().getLatestSchema().getUniqueKeyField();
                    if (keyField != null) {
                       fieldSet.add(keyField.getName());
                    }
                    fieldSet.add("fieldid");
                    fieldSet.add("relations");
                    fieldSet.add("entity");
                    fieldSet.add("count");
                    NamedList docresults = new NamedList();
                    DocIterator iterator = docs.iterator();
                    Document doc;
                    int docScore = 0;
                    rsp.add("doc retrieved ", docs.size());
                    for (int i=0; i<docs.size(); i++) {
                       try {
          int docid = iterator.nextDoc();
          doc = firstrb.req.getSearcher().doc(docid, fieldSet);
          if (Integer.parseInt(doc.get("count")) > docScore) {
             docScore = Integer.parseInt(doc.get("count"));
             desiredField = doc.get("fieldid");
          }
          docresults.add(String.valueOf(docid), doc);
                       } catch (IOException ex) {
          java.util.logging.Logger.getLogger(CustomQueryComponent.class.getName()).log(Level.SEVERE, null,ex);
                       }
                    }
                    fieldSet.clear();
                    rsp.add("Intermediate results", docresults);
                    if (desiredField != null) {
                       rsp.add("Required Field", desiredField);
                    }
                 } ***/

        firstreq.close();

        /*** Final Phase starts here ***/
        /***   finalQuery = "name:\"russ conway\" AND occupation:musician";
              NamedList finalparamsList = req.getParams().toNamedList();
              finalparamsList.setVal(finalparamsList.indexOf(CommonParams.Q, 0), finalQuery);
              psuedoParams = SolrParams.toSolrParams(finalparamsList);
              if (psuedoParams.get(CommonParams.SORT) == null) {
                 finalparamsList.add(CommonParams.SORT, "score desc");
              } else {
                 finalparamsList.setVal(finalparamsList.indexOf(CommonParams.SORT, 0), "score desc");
              }   
           //   if (desiredField != null) {
           //      if (psuedoParams.get(CommonParams.FL) != null) {
           //         finalparamsList.setVal(finalparamsList.indexOf(CommonParams.FL, 0), desiredField);
           //      } else {
           //         finalparamsList.add(CommonParams.FL, desiredField);
           //      }
           //   }
                
              SolrQueryRequest finalreq = new LocalSolrQueryRequest(req.getCore(), finalparamsList);
              rsp.add("Final Query", finalreq.getParams().get(CommonParams.Q));
              ResponseBuilder rb = new ResponseBuilder(finalreq,rsp,components);
              for (SearchComponent c : components) {
                 c.prepare(rb);
                 c.process(rb);
              } ***/
        /*** testing
                 DocList finaldocs = rb.getResults().docList;
                 if (finaldocs == null || finaldocs.size() == 0) {
                    log.debug("No results");
                 } else {
                    keyField = rb.req.getCore().getLatestSchema().getUniqueKeyField();
                    if (keyField != null) {
                       fieldSet.add(keyField.getName());
                    }
                    if (desiredField != null) {
                       fieldSet.add(desiredField);
                    }
                    fieldSet.add("name");
                    NamedList finaldocresults = new NamedList();
                    DocIterator finaliterator = finaldocs.iterator();
                    Document finaldoc;
                    rsp.add("finaldocs retrieved ", finaldocs.size());
                    for (int i=0; i<docs.size(); i++) {
                       try {
          if (finaliterator.hasNext()) {
             int finaldocid = finaliterator.nextDoc();
             finaldoc = rb.req.getSearcher().doc(finaldocid, fieldSet);
             finaldocresults.add(String.valueOf(finaldocid), finaldoc);
          }
                       } catch (IOException ex) {
          java.util.logging.Logger.getLogger(MySearchHandler.class.getName()).log(Level.SEVERE, null,ex);
                       }
                    }
                    rsp.add("final results", finaldocresults);
                 } ***/
        //   finalreq.close(); 
    } else {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Need to give at least one word as query!");
    }
}

From source file:com.searchbox.TaggerComponent.java

License:Apache License

private NamedList doDocuments(ResponseBuilder rb, SolrParams params, SolrIndexSearcher searcher, int lcount) {
    /*-----------------*/

    String[] localfields = params.getParams(TaggerComponentParams.QUERY_FIELDS);
    String[] fields = null;/*  www  .ja v  a 2 s.c  o m*/

    if (gfields != null) {
        fields = gfields;
    }
    if (localfields != null) {
        fields = localfields;
    }

    if (fields == null) {
        LOGGER.error("Fields aren't defined, not performing tagging.");
        return null;
    }

    DocList docs = rb.getResults().docList;
    if (docs == null || docs.size() == 0) {
        LOGGER.debug("No results");
    }
    LOGGER.debug("Doing This many docs:\t" + docs.size());

    Set<String> fset = new HashSet<String>();

    SchemaField keyField = rb.req.getCore().getSchema().getUniqueKeyField();
    if (null != keyField) {
        fset.add(keyField.getName());
    }
    for (String field : fields) {
        fset.add(field);
    }

    NamedList response = new SimpleOrderedMap();

    DocIterator iterator = docs.iterator();
    for (int i = 0; i < docs.size(); i++) {
        try {
            int docId = iterator.nextDoc();

            Document doc = searcher.doc(docId, fset);
            StringBuilder sb = new StringBuilder();
            for (String field : fields) {
                IndexableField[] multifield = doc.getFields(field);
                for (IndexableField singlefield : multifield) {
                    sb.append(singlefield.stringValue() + ". ");
                }
            }

            String q = sb.toString();
            String id = doc.getField(keyField.getName()).stringValue();
            // do work here
            TaggerResultSet trs = dfb.tagText(q, lcount);
            NamedList docresponse = new SimpleOrderedMap();
            for (TaggerResult tr : trs.suggestions) {
                docresponse.add(tr.suggestion, tr.score);
            }
            response.add(id, docresponse);
        } catch (IOException ex) {
            java.util.logging.Logger.getLogger(TaggerComponent.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    // response.add(suggestion.suggestion, suggestion.probability);
    return response;
}

From source file:com.tamingtext.qa.PassageRankingComponent.java

License:Apache License

@Override
public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false)) {
        return;// w w w.j  a v  a2s.  c  o  m
    }
    Query origQuery = rb.getQuery();
    //TODO: longer term, we don't have to be a span query, we could re-analyze the document
    if (origQuery != null) {
        if (origQuery instanceof SpanNearQuery == false) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "Illegal query type.  The incoming query must be a Lucene SpanNearQuery and it was a "
                            + origQuery.getClass().getName());
        }
        SpanNearQuery sQuery = (SpanNearQuery) origQuery;
        SolrIndexSearcher searcher = rb.req.getSearcher();
        IndexReader reader = searcher.getIndexReader();
        Spans spans = sQuery.getSpans(reader);
        //Assumes the query is a SpanQuery
        //Build up the query term weight map and the bi-gram
        Map<String, Float> termWeights = new HashMap<String, Float>();
        Map<String, Float> bigramWeights = new HashMap<String, Float>();
        createWeights(params.get(CommonParams.Q), sQuery, termWeights, bigramWeights, reader);
        float adjWeight = params.getFloat(ADJACENT_WEIGHT, DEFAULT_ADJACENT_WEIGHT);
        float secondAdjWeight = params.getFloat(SECOND_ADJ_WEIGHT, DEFAULT_SECOND_ADJACENT_WEIGHT);
        float bigramWeight = params.getFloat(BIGRAM_WEIGHT, DEFAULT_BIGRAM_WEIGHT);
        //get the passages
        int primaryWindowSize = params.getInt(QAParams.PRIMARY_WINDOW_SIZE, DEFAULT_PRIMARY_WINDOW_SIZE);
        int adjacentWindowSize = params.getInt(QAParams.ADJACENT_WINDOW_SIZE, DEFAULT_ADJACENT_WINDOW_SIZE);
        int secondaryWindowSize = params.getInt(QAParams.SECONDARY_WINDOW_SIZE, DEFAULT_SECONDARY_WINDOW_SIZE);
        WindowBuildingTVM tvm = new WindowBuildingTVM(primaryWindowSize, adjacentWindowSize,
                secondaryWindowSize);
        PassagePriorityQueue rankedPassages = new PassagePriorityQueue();
        //intersect w/ doclist
        DocList docList = rb.getResults().docList;
        while (spans.next() == true) {
            //build up the window
            if (docList.exists(spans.doc())) {
                tvm.spanStart = spans.start();
                tvm.spanEnd = spans.end();
                reader.getTermFreqVector(spans.doc(), sQuery.getField(), tvm);
                //The entries map contains the window, do some ranking of it
                if (tvm.passage.terms.isEmpty() == false) {
                    log.debug("Candidate: Doc: {} Start: {} End: {} ",
                            new Object[] { spans.doc(), spans.start(), spans.end() });
                }
                tvm.passage.lDocId = spans.doc();
                tvm.passage.field = sQuery.getField();
                //score this window
                try {
                    addPassage(tvm.passage, rankedPassages, termWeights, bigramWeights, adjWeight,
                            secondAdjWeight, bigramWeight);
                } catch (CloneNotSupportedException e) {
                    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                            "Internal error cloning Passage", e);
                }
                //clear out the entries for the next round
                tvm.passage.clear();
            }
        }
        NamedList qaResp = new NamedList();
        rb.rsp.add("qaResponse", qaResp);
        int rows = params.getInt(QA_ROWS, 5);

        SchemaField uniqField = rb.req.getSchema().getUniqueKeyField();
        if (rankedPassages.size() > 0) {
            int size = Math.min(rows, rankedPassages.size());
            Set<String> fields = new HashSet<String>();
            for (int i = size - 1; i >= 0; i--) {
                Passage passage = rankedPassages.pop();
                if (passage != null) {
                    NamedList passNL = new NamedList();
                    qaResp.add(("answer"), passNL);
                    String idName;
                    String idValue;
                    if (uniqField != null) {
                        idName = uniqField.getName();
                        fields.add(idName);
                        fields.add(passage.field);//prefetch this now, so that it is cached
                        idValue = searcher.doc(passage.lDocId, fields).get(idName);
                    } else {
                        idName = "luceneDocId";
                        idValue = String.valueOf(passage.lDocId);
                    }
                    passNL.add(idName, idValue);
                    passNL.add("field", passage.field);
                    //get the window
                    String fldValue = searcher.doc(passage.lDocId, fields).get(passage.field);
                    if (fldValue != null) {
                        //get the window of words to display, we don't use the passage window, as that is based on the term vector
                        int start = passage.terms.first().start;//use the offsets
                        int end = passage.terms.last().end;
                        if (start >= 0 && start < fldValue.length() && end >= 0 && end < fldValue.length()) {
                            passNL.add("window",
                                    fldValue.substring(start, end + passage.terms.last().term.length()));
                        } else {
                            log.debug("Passage does not have correct offset information");
                            passNL.add("window", fldValue);//we don't have offsets, or they are incorrect, return the whole field value
                        }
                    }
                } else {
                    break;
                }
            }
        }
    }

}

From source file:com.zemanta.solrcassandrabridge.CassandraBridgeComponent.java

License:Apache License

@Override
public void process(ResponseBuilder rb) throws IOException {

    // First we need to get Documents, so we get the "id" of the field
    Set<String> fields = new HashSet<String>();
    fields.add(key_field_name);/*from www  .j  ava 2s.com*/
    SolrDocumentList docs = SolrPluginUtils.docListToSolrDocumentList(rb.getResults().docList,
            rb.req.getSearcher(), fields, null);

    // Docid_list is an array of ids to be retrieved
    List<BigInteger> docid_list = new ArrayList<BigInteger>();
    // We'll be putting things into output map in the form of {id: {field_name: value, ...}, ...}
    HashMap<BigInteger, HashMap<String, String>> output_map = new HashMap<BigInteger, HashMap<String, String>>();

    // Iterate through documents and get values of their id field
    for (SolrDocument doc : docs) {
        int docid = (Integer) doc.getFieldValue(key_field_name);
        docid_list.add(BigInteger.valueOf(docid));
        // prepare an output map for this id - empty hashmaps to be filled
        output_map.put(BigInteger.valueOf(docid), new HashMap<String, String>());
    }

    // Intersection of requested fields and bridged fields is what we will ask cassandra for
    ReturnFields returnFields = new SolrReturnFields(rb.req.getParams().getParams(CommonParams.FL), rb.req);
    Set<String> cassandra_fields;
    if (returnFields.wantsAllFields()) {
        cassandra_fields = bridged_fields;
    } else {
        cassandra_fields = returnFields.getLuceneFieldNames();
        cassandra_fields.retainAll(bridged_fields);
    }
    log.warn("Fields." + String.valueOf(cassandra_fields));

    // Get specific fields from cassandra to output_map
    cassandraConnector.getFieldsFromCassandra(docid_list, output_map, new ArrayList<String>(cassandra_fields));

    // Iterate through documents for the second time
    // Add the fields that cassandra returned
    // We could skip intermediate map, but we prefer separation of code messing with cassandra from code messing with solr structures
    for (SolrDocument doc : docs) {
        int docid = (Integer) doc.getFieldValue(key_field_name);
        for (Map.Entry<String, String> entry : output_map.get(BigInteger.valueOf(docid)).entrySet()) {
            doc.setField(entry.getKey(), entry.getValue());
        }
    }

    /// We replace the current response
    @SuppressWarnings("unchecked")
    NamedList<SolrDocumentList> vals = rb.rsp.getValues();
    int idx = vals.indexOf("response", 0);
    if (idx >= 0) {
        // I am pretty sure we always take this code path
        log.debug("Replacing DocList with SolrDocumentList " + docs.size());
        vals.setVal(idx, docs);
    } else {
        log.debug("Adding SolrDocumentList response" + docs.size());
        vals.add("response", docs);
    }

}

From source file:de.uni_tuebingen.ub.ixTheo.handler.component.FacetPrefixSortComponent.java

License:Apache License

/**
 * Actually run the query/*from   ww  w . ja v  a2  s  .  c  o m*/
 */
@Override
public void process(ResponseBuilder rb) throws IOException {
    if (rb.doFacets) {
        final ModifiableSolrParams params = new ModifiableSolrParams();
        final SolrParams origParams = rb.req.getParams();
        final Iterator<String> iter = origParams.getParameterNamesIterator();
        setCollator(origParams.get("lang"));
        while (iter.hasNext()) {
            final String paramName = iter.next();
            // Deduplicate the list with LinkedHashSet, but _only_ for facet
            // params.
            if (!paramName.startsWith(FacetParams.FACET)) {
                params.add(paramName, origParams.getParams(paramName));
                continue;
            }
            final HashSet<String> deDupe = new LinkedHashSet<>(Arrays.asList(origParams.getParams(paramName)));
            params.add(paramName, deDupe.toArray(new String[deDupe.size()]));
        }

        final SimplePrefixSortFacets facets = new SimplePrefixSortFacets(rb.req, rb.getResults().docSet, params,
                rb);
        final NamedList<Object> counts = org.apache.solr.handler.component.FacetComponent
                .getFacetCounts(facets);

        final String[] pivots = params.getParams(FacetParams.FACET_PIVOT);
        if (pivots != null && pivots.length > 0) {
            PivotFacetProcessor pivotProcessor = new PivotFacetProcessor(rb.req, rb.getResults().docSet, params,
                    rb);
            SimpleOrderedMap<List<NamedList<Object>>> v = pivotProcessor.process(pivots);
            if (v != null) {
                counts.add(PIVOT_KEY, v);
            }
        }

        // Check whether we have to reorder out results
        // according to prefix

        final String sort = params.get(FacetParams.FACET_SORT);
        if (FacetPrefixSortParams.FACET_SORT_PREFIX.equals(sort)) {

            // Determine a score relative to the original query

            // Determine the query and make it compatible with our metric
            // class
            // by splitting the single terms
            String[] queryTerms = params.getParams(CommonParams.Q);
            final Collection<String> queryTermsCollection = new ArrayList<>();
            for (String s : queryTerms) {
                // Split at whitespace except we have a quoted term
                Matcher matcher = WHITE_SPACES_WITH_QUOTES_SPLITTING_PATTERN.matcher(s);
                while (matcher.find()) {
                    queryTermsCollection.add(matcher.group().replaceAll("^\"|\"$", ""));
                }
            }

            // In some contexts, i.e. in KWC that are derived from ordinary
            // keywords or if
            // wildcards occur, also add all the query terms as a single
            // phrase term
            // with stripped wildcards
            StringBuilder sb = new StringBuilder();
            for (String s : queryTermsCollection) {
                s = s.replace("*", "");
                sb.append(s);
                sb.append(" ");
            }

            queryTermsCollection.add(sb.toString().trim());

            final ArrayList<String> queryList = new ArrayList<>(queryTermsCollection);
            final String facetfield = params.get(FacetParams.FACET_FIELD);

            // Get the current facet entry and make it compatible with our
            // metric class
            // "facet_fields" itself contains a NamedList with the
            // facet.field as key

            final NamedList<Object> facetFieldsNamedList = (NamedList<Object>) counts.get("facet_fields");
            final NamedList<Object> facetFields = (NamedList<Object>) facetFieldsNamedList.get(facetfield);

            final List<Entry<Entry<String, Object>, Double>> facetPrefixListScored = new ArrayList<>();
            for (final Entry<String, Object> entry : facetFields) {
                final String facetTerms = entry.getKey();

                // Split up each KWC and calculate the scoring

                ArrayList<String> facetList = new ArrayList<>(
                        Arrays.asList(facetTerms.split("(?<!" + Pattern.quote("\\") + ")/")));

                // For usability reasons sort the result facets according to
                // the order of the search
                facetList = KeywordSort.sortToReferenceChain(queryList, facetList);

                final double score = KeywordChainMetric.calculateSimilarityScore(queryList, facetList);

                // Collect the result in a sorted list and throw away
                // garbage
                if (score > 0) {
                    String facetTermsSorted = StringUtils.join(facetList, "/");
                    Map.Entry<String, Object> sortedEntry = new AbstractMap.SimpleEntry<>(facetTermsSorted,
                            entry.getValue());
                    facetPrefixListScored.add(new AbstractMap.SimpleEntry<>(sortedEntry, score));
                }
            }

            Collections.sort(facetPrefixListScored, ENTRY_COMPARATOR);

            // Extract all the values wrap it back to NamedList again and
            // replace in the original structure

            facetFieldsNamedList.clear();
            NamedList<Object> facetNamedListSorted = new NamedList<>();

            // We had to disable all limits and offsets sort according
            // Handle this accordingly now

            int offset = (params.getInt(FacetParams.FACET_OFFSET) != null)
                    ? params.getInt(FacetParams.FACET_OFFSET)
                    : 0;
            int limit = (params.getInt(FacetParams.FACET_LIMIT) != null)
                    ? params.getInt(FacetParams.FACET_LIMIT)
                    : 100;

            // Strip uneeded elements
            int s = facetPrefixListScored.size();
            int off = (offset < s) ? offset : 0;
            limit = (limit < 0) ? s : limit; // Handle a negative limit
            // param, i.e. unlimited results
            int lim = (offset + limit <= s) ? (offset + limit) : s;

            final List<Entry<Entry<String, Object>, Double>> facetPrefixListScoredTruncated = facetPrefixListScored
                    .subList(off, lim);

            for (Entry<Entry<String, Object>, Double> e : facetPrefixListScoredTruncated) {
                facetNamedListSorted.add(e.getKey().getKey(), e.getKey().getValue());
            }

            facetFieldsNamedList.add(facetfield, facetNamedListSorted);
            NamedList<Object> countList = new NamedList<>();
            countList.add("count", facetPrefixListScored.size());
            facetFieldsNamedList.add(facetfield + "-count", countList);

            counts.remove("facet_fields");
            counts.add("facet_fields", facetFieldsNamedList);
        }

        rb.rsp.add("facet_counts", counts);
    }
}