Example usage for org.apache.solr.handler.component ResponseBuilder getQuery

List of usage examples for org.apache.solr.handler.component ResponseBuilder getQuery

Introduction

In this page you can find the example usage for org.apache.solr.handler.component ResponseBuilder getQuery.

Prototype

public Query getQuery() 

Source Link

Usage

From source file:com.billiger.solr.handler.component.QLTBComponent.java

License:Apache License

/**
 * Add boost terms to the query if it matches a know query.
 *
 * The query string is analyzed and compared to the known query strings
 * from the XML file. If a matching (i.e. equal) query string is found,
 * the associated terms (ConstantScoreQuery) are added: the original
 * query (object, not string) is added as a MUST term in a newly created
 * BooleanQuery, whereas the new terms are added either as Occur.SHOULD
 * for positive boost, or Occur.MUST_NOT for zero or negative boost.
 *
 * prepare() might trigger a reload of the XML file if it resides in
 * the data/ directory and the reader is new.
 *
 *///  ww w .j  a  v a 2  s .co  m
@Override
public final void prepare(final ResponseBuilder rb) {
    if (disabled(rb)) {
        return;
    }
    Query query = rb.getQuery();
    String queryStr = rb.getQueryString();
    if (query == null || queryStr == null) {
        return;
    }
    IndexReader reader = rb.req.getSearcher().getIndexReader();
    List<Query> boostTerms = null;
    try {
        queryStr = getAnalyzedQuery(queryStr);
        boostTerms = getQLTBMap(reader, rb.req.getCore()).get(queryStr);
        if (boostTerms == null || boostTerms.isEmpty()) {
            return;
        }
        log.debug("QLTBComponent.prepare() query: \"" + queryStr + "\" with " + boostTerms.size()
                + " boost terms");
    } catch (Exception ex) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "error loading QLTB", ex);
    }
    BooleanQuery newq = new BooleanQuery(true);
    newq.add(query, BooleanClause.Occur.MUST);
    for (Query term : boostTerms) {
        if (term.getBoost() > 0.0) {
            newq.add(new BooleanClause(term, BooleanClause.Occur.SHOULD));
        } else {
            newq.add(new BooleanClause(term, BooleanClause.Occur.MUST_NOT));
        }
    }
    rb.setQuery(newq);
}

From source file:com.doculibre.constellio.solr.handler.component.ConstellioAuthorizationComponent.java

License:Open Source License

@SuppressWarnings("unchecked")
@Override/*from  ww  w .j av a 2s .  c om*/
public void prepare(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrIndexSearcher searcher = req.getSearcher();
    //IndexReader reader = req.getSearcher().getReader();
    SolrParams params = req.getParams();

    // A runtime param can skip
    if (!params.getBool(ENABLE, true)) {
        return;
    }

    Query query = rb.getQuery();
    String qstr = rb.getQueryString();
    if (query == null || qstr == null) {
        return;
    }

    ConstellioUser user;
    String userIdStr = params.get(ConstellioSolrQueryParams.USER_ID);
    if (userIdStr != null) {
        UserServices userServices = ConstellioSpringUtils.getUserServices();
        try {
            user = userServices.get(new Long(userIdStr));
        } catch (NumberFormatException e) {
            user = null;
        }
    } else {
        user = null;
    }

    String collectionName = params.get(ConstellioSolrQueryParams.COLLECTION_NAME);
    RecordCollectionServices collectionServices = ConstellioSpringUtils.getRecordCollectionServices();
    FederationServices federationServices = ConstellioSpringUtils.getFederationServices();
    RecordCollection collection = collectionServices.get(collectionName);

    List<TermQuery> restrictedCollectionQueries = new ArrayList<TermQuery>();
    if (collection.isFederationOwner()) {
        List<RecordCollection> includedCollections = federationServices.listIncludedCollections(collection);
        for (RecordCollection includedCollection : includedCollections) {
            if (includedCollection.hasSearchPermission()
                    && (user == null || !user.hasSearchPermission(includedCollection))) {
                restrictedCollectionQueries.add(new TermQuery(
                        new Term(IndexField.COLLECTION_ID_FIELD, "" + includedCollection.getId())));
            }
        }
    }

    // User must be logged in to see private records
    if (user != null) {
        String luceneQueryStr = params.get(ConstellioSolrQueryParams.LUCENE_QUERY);
        if (StringUtils.isBlank(luceneQueryStr)) {
            return;
        }

        IndexSchema schema = req.getSchema();
        SolrQueryParser queryParser = new SolrQueryParser(rb.getQparser(), IndexField.DEFAULT_SEARCH_FIELD);
        Query luceneQuery;
        try {
            luceneQuery = queryParser.parse(luceneQueryStr);
        } catch (SyntaxError e) {
            log.error("Error parsing lucene query " + luceneQueryStr, e);
            return;
        }
        // Create a new query which will only include private records
        BooleanQuery privateRecordQuery = new BooleanQuery(true);
        privateRecordQuery.add(luceneQuery, BooleanClause.Occur.MUST);
        for (TermQuery restrictionCollectionQuery : restrictedCollectionQueries) {
            privateRecordQuery.add(restrictionCollectionQuery, BooleanClause.Occur.MUST_NOT);
        }

        TermQuery privateRecordTQ = new TermQuery(new Term(IndexField.PUBLIC_RECORD_FIELD, "F"));
        privateRecordQuery.add(privateRecordTQ, BooleanClause.Occur.MUST);

        DocSet privateRecordIdDocSet = searcher.getDocSet(privateRecordQuery);

        if (privateRecordIdDocSet.size() > 0) {
            RecordServices recordServices = ConstellioSpringUtils.getRecordServices();
            ACLServices aclServices = ConstellioSpringUtils.getACLServices();
            ConnectorManagerServices connectorManagerServices = ConstellioSpringUtils
                    .getConnectorManagerServices();

            List<Record> privateRecords = new ArrayList<Record>();
            DocIterator docIt = privateRecordIdDocSet.iterator();
            while (docIt.hasNext()) {
                int docId = docIt.nextDoc();
                Document luceneDoc = searcher.doc(docId);
                Long recordId = new Long(luceneDoc.get(IndexField.RECORD_ID_FIELD));
                Record record = recordServices.get(recordId, collection);
                privateRecords.add(record);
            }
            // First pass : Remove ACL authorized records
            List<Record> unevaluatedPrivateRecords = aclServices.removeAuthorizedRecords(privateRecords, user);
            if (!unevaluatedPrivateRecords.isEmpty()) {
                Set<UserCredentials> userCredentials = user.getUserCredentials();
                // Second pass : Ask the connector manager
                ConnectorManager connectorManager = connectorManagerServices.getDefaultConnectorManager();
                List<Record> authorizedRecords = connectorManagerServices
                        .authorizeByConnector(unevaluatedPrivateRecords, userCredentials, connectorManager);
                List<Record> unauthorizedRecords = ListUtils.removeAll(unevaluatedPrivateRecords,
                        authorizedRecords);

                if (!unauthorizedRecords.isEmpty()) {
                    // Create a new query which will exclude unauthorized records
                    BooleanQuery authorizedRecordQuery = new BooleanQuery(true);
                    authorizedRecordQuery.add(query, BooleanClause.Occur.MUST);
                    for (Record unauthorizedRecord : unauthorizedRecords) {
                        TermQuery unauthorizedRecordTQ = new TermQuery(
                                new Term(IndexField.RECORD_ID_FIELD, "" + unauthorizedRecord.getId()));
                        authorizedRecordQuery.add(unauthorizedRecordTQ, BooleanClause.Occur.MUST_NOT);
                    }
                    rb.setQuery(authorizedRecordQuery);
                }
            }
        }
    } else {
        BooleanQuery publicRecordQuery = new BooleanQuery(true);
        publicRecordQuery.add(query, BooleanClause.Occur.MUST);
        TermQuery publicRecordTQ = new TermQuery(new Term(IndexField.PUBLIC_RECORD_FIELD, "T"));
        publicRecordQuery.add(publicRecordTQ, BooleanClause.Occur.MUST);
        for (TermQuery restrictionCollectionQuery : restrictedCollectionQueries) {
            publicRecordQuery.add(restrictionCollectionQuery, BooleanClause.Occur.MUST_NOT);
        }
        rb.setQuery(publicRecordQuery);
    }
}

From source file:com.plugtree.solradvert.AdvertComponent.java

License:Apache License

private Collection<Object> getFacts(ResponseBuilder rb) {
    Collection<Object> facts = new ArrayList<Object>();
    QueryFactsCollector factsCollector = new QueryFactsCollector();

    // put the main query
    if (rb.getQuery() != null) {
        factsCollector.collect(rb.getQuery(), facts);
    }//from   w  w  w.  j  a v a 2s  .  c o  m

    // put all the filter queries
    if (rb.getFilters() != null) {
        for (Query fq : rb.getFilters()) {
            factsCollector.collect(fq, facts);
        }
    }

    // put the AdvertQuery
    // this is only for backwards-compatibility, so old tests don't fail
    AdvertQuery aq = new AdvertQueryImpl(rb);
    facts.add(aq);

    // put the SchemaTool
    SchemaTool st = new SchemaTool(rb);
    facts.add(st);

    // put the response builder
    facts.add(rb);

    logger.debug("Collected facts: " + facts);

    return facts;
}

From source file:com.plugtree.solradvert.core.AdvertQueryImpl.java

License:Apache License

public AdvertQueryImpl(ResponseBuilder rb) {
    this.rb = rb;
    this.q = rb.getQuery();
    this.fqs = rb.getFilters();
}

From source file:com.sn.solr.plugin.rank.RankEngine.java

License:Apache License

/**
 * Provides implementation for Dense ranking ["1223"] as identified by the
 * {@link RankStrategy#LEGACY_DENSE} the difference is that this
 * implementation is computed without using facet results so this will 
 * noticeably slower than computing rank based on facets
 * use {@link RankStrategy#DENSE}. Besides this implementation might cause 
 * lot of cache evictions putting stress on memory. 
 *
 * @see #computeDenseRank(List)//from w  ww. jav  a  2 s  .co  m
 * 
 * @param pairList List of {@link Pair} objects that holds the value of rank 
 * field & respective count.
 */
@Deprecated
public static Map<String, Number> computeLegacyDenseRank(ResponseBuilder rb, String idField, String rankField)
        throws IOException {
    SolrIndexSearcher searcher = rb.req.getSearcher();
    SolrParams params = rb.req.getParams();// .getParams(FacetParams.FACET_FIELD);

    String _start = params.get(CommonParams.START);
    String _rows = params.get(CommonParams.ROWS);
    int start = 0;
    int rows = 10;

    if (_start != null & AppHelper.isInteger(_start))
        start = new Integer(_start);
    if (_rows != null & AppHelper.isInteger(_rows))
        rows = new Integer(_rows);

    LOG.info("Computing rank using strategy: {}", RankStrategy.ORDINAL.getDescription());
    FieldSelector fs = new MapFieldSelector(new String[] { idField, rankField });
    Map<String, Number> rankMap = new HashMap<String, Number>();
    DocList docs = searcher.getDocList(rb.getQuery(), rb.getFilters(), rb.getSortSpec().getSort(), 0,
            start + rows, 0);
    int denseRank = 1;
    int _CurrScore = 0;
    int _PrevScore = 0;
    int i = 0;
    for (DocIterator it = docs.iterator(); it.hasNext();) {
        Document doc = searcher.doc(it.nextDoc(), fs);
        _CurrScore = new Integer(doc.get(rankField));
        if (i == 0) {
            _PrevScore = _CurrScore;
        }
        if (_PrevScore != _CurrScore) {
            _PrevScore = _CurrScore;
            denseRank++;
        }
        if (i >= start) {
            rankMap.put(doc.get(idField), denseRank);
        }
        i++;
    }

    return rankMap;
}

From source file:com.tamingtext.qa.PassageRankingComponent.java

License:Apache License

@Override
public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false)) {
        return;/*from  w ww  .ja  va  2s  . com*/
    }
    Query origQuery = rb.getQuery();
    //TODO: longer term, we don't have to be a span query, we could re-analyze the document
    if (origQuery != null) {
        if (origQuery instanceof SpanNearQuery == false) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "Illegal query type.  The incoming query must be a Lucene SpanNearQuery and it was a "
                            + origQuery.getClass().getName());
        }
        SpanNearQuery sQuery = (SpanNearQuery) origQuery;
        SolrIndexSearcher searcher = rb.req.getSearcher();
        IndexReader reader = searcher.getIndexReader();
        Spans spans = sQuery.getSpans(reader);
        //Assumes the query is a SpanQuery
        //Build up the query term weight map and the bi-gram
        Map<String, Float> termWeights = new HashMap<String, Float>();
        Map<String, Float> bigramWeights = new HashMap<String, Float>();
        createWeights(params.get(CommonParams.Q), sQuery, termWeights, bigramWeights, reader);
        float adjWeight = params.getFloat(ADJACENT_WEIGHT, DEFAULT_ADJACENT_WEIGHT);
        float secondAdjWeight = params.getFloat(SECOND_ADJ_WEIGHT, DEFAULT_SECOND_ADJACENT_WEIGHT);
        float bigramWeight = params.getFloat(BIGRAM_WEIGHT, DEFAULT_BIGRAM_WEIGHT);
        //get the passages
        int primaryWindowSize = params.getInt(QAParams.PRIMARY_WINDOW_SIZE, DEFAULT_PRIMARY_WINDOW_SIZE);
        int adjacentWindowSize = params.getInt(QAParams.ADJACENT_WINDOW_SIZE, DEFAULT_ADJACENT_WINDOW_SIZE);
        int secondaryWindowSize = params.getInt(QAParams.SECONDARY_WINDOW_SIZE, DEFAULT_SECONDARY_WINDOW_SIZE);
        WindowBuildingTVM tvm = new WindowBuildingTVM(primaryWindowSize, adjacentWindowSize,
                secondaryWindowSize);
        PassagePriorityQueue rankedPassages = new PassagePriorityQueue();
        //intersect w/ doclist
        DocList docList = rb.getResults().docList;
        while (spans.next() == true) {
            //build up the window
            if (docList.exists(spans.doc())) {
                tvm.spanStart = spans.start();
                tvm.spanEnd = spans.end();
                reader.getTermFreqVector(spans.doc(), sQuery.getField(), tvm);
                //The entries map contains the window, do some ranking of it
                if (tvm.passage.terms.isEmpty() == false) {
                    log.debug("Candidate: Doc: {} Start: {} End: {} ",
                            new Object[] { spans.doc(), spans.start(), spans.end() });
                }
                tvm.passage.lDocId = spans.doc();
                tvm.passage.field = sQuery.getField();
                //score this window
                try {
                    addPassage(tvm.passage, rankedPassages, termWeights, bigramWeights, adjWeight,
                            secondAdjWeight, bigramWeight);
                } catch (CloneNotSupportedException e) {
                    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                            "Internal error cloning Passage", e);
                }
                //clear out the entries for the next round
                tvm.passage.clear();
            }
        }
        NamedList qaResp = new NamedList();
        rb.rsp.add("qaResponse", qaResp);
        int rows = params.getInt(QA_ROWS, 5);

        SchemaField uniqField = rb.req.getSchema().getUniqueKeyField();
        if (rankedPassages.size() > 0) {
            int size = Math.min(rows, rankedPassages.size());
            Set<String> fields = new HashSet<String>();
            for (int i = size - 1; i >= 0; i--) {
                Passage passage = rankedPassages.pop();
                if (passage != null) {
                    NamedList passNL = new NamedList();
                    qaResp.add(("answer"), passNL);
                    String idName;
                    String idValue;
                    if (uniqField != null) {
                        idName = uniqField.getName();
                        fields.add(idName);
                        fields.add(passage.field);//prefetch this now, so that it is cached
                        idValue = searcher.doc(passage.lDocId, fields).get(idName);
                    } else {
                        idName = "luceneDocId";
                        idValue = String.valueOf(passage.lDocId);
                    }
                    passNL.add(idName, idValue);
                    passNL.add("field", passage.field);
                    //get the window
                    String fldValue = searcher.doc(passage.lDocId, fields).get(passage.field);
                    if (fldValue != null) {
                        //get the window of words to display, we don't use the passage window, as that is based on the term vector
                        int start = passage.terms.first().start;//use the offsets
                        int end = passage.terms.last().end;
                        if (start >= 0 && start < fldValue.length() && end >= 0 && end < fldValue.length()) {
                            passNL.add("window",
                                    fldValue.substring(start, end + passage.terms.last().term.length()));
                        } else {
                            log.debug("Passage does not have correct offset information");
                            passNL.add("window", fldValue);//we don't have offsets, or they are incorrect, return the whole field value
                        }
                    }
                } else {
                    break;
                }
            }
        }
    }

}

From source file:org.alfresco.solr.component.QueryLoggingComponent.java

License:Open Source License

private void log(ResponseBuilder rb) throws IOException {
    boolean isShard = rb.req.getParams().getBool(ShardParams.IS_SHARD, false);
    if (!isShard) {
        CoreContainer container = rb.req.getCore().getCoreContainer();
        SolrCore logCore = container.getCore(rb.req.getCore().getName() + "_qlog");
        if (logCore != null) {
            JSONObject json = (JSONObject) rb.req.getContext().get(AbstractQParser.ALFRESCO_JSON);

            SolrQueryRequest request = null;
            UpdateRequestProcessor processor = null;
            try {
                request = new LocalSolrQueryRequest(logCore, new NamedList<>());
                processor = logCore.getUpdateProcessingChain(null).createProcessor(request,
                        new SolrQueryResponse());

                AddUpdateCommand cmd = new AddUpdateCommand(request);
                cmd.overwrite = true;/*  w w w.  j  a  v a 2s  .c o  m*/
                SolrInputDocument input = new SolrInputDocument();
                input.addField("id", GUID.generate());
                input.addField("_version_", "1");

                input.addField("timestamp", DateTimeFormatter.ISO_INSTANT.format(Instant.now()));

                if (json != null) {
                    try {
                        ArrayList<String> authorityList = new ArrayList<String>(1);
                        JSONArray authorities = json.getJSONArray("authorities");
                        for (int i = 0; i < authorities.length(); i++) {
                            String authorityString = authorities.getString(i);
                            authorityList.add(authorityString);
                        }

                        for (String authority : authorityList) {
                            if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER) {
                                input.addField("user", authority);
                                break;
                            }
                        }
                    } catch (JSONException e) {
                        input.addField("user", "<UNKNOWN>");
                    }
                } else {
                    input.addField("user", "<UNKNOWN>");
                }

                String userQuery = rb.req.getParams().get(SpellingParams.SPELLCHECK_Q);
                if (userQuery == null) {
                    if (json != null) {
                        try {
                            userQuery = json.getString("query");
                        } catch (JSONException e) {
                        }
                    }
                }
                if (userQuery == null) {
                    userQuery = rb.req.getParams().get(CommonParams.Q);
                }

                if (userQuery != null) {
                    input.addField("user_query", userQuery);
                }

                Query query = rb.getQuery();
                input.addField("query", query.toString());

                if (rb.getResults().docList != null) {
                    input.addField("found", rb.getResults().docList.matches());
                }
                input.addField("time", rb.req.getRequestTimer().getTime());

                cmd.solrDoc = input;
                processor.processAdd(cmd);
            }

            finally {
                if (processor != null) {
                    processor.finish();
                }
                if (request != null) {
                    request.close();
                }
            }
        }
    }
}

From source file:org.solbase.SolbaseComponent.java

License:Apache License

public void process(ResponseBuilder rb) throws IOException {

    DocList list = rb.getResults().docList;

    DocIterator it = list.iterator();/*  w  w w . j av a2s. com*/

    List<Integer> docIds = new ArrayList<Integer>(list.size());

    while (it.hasNext())
        docIds.add(it.next());

    IndexReader reader = (IndexReader) ((SolrIndexReader) rb.req.getSearcher().getIndexReader())
            .getWrappedReader();

    SolrQueryRequest req = rb.req;

    SolrParams params = req.getParams();
    String ids = params.get(ShardParams.IDS);

    // only set firstPhase threadLocal in case of sharding.
    // this is to optimize our querying time by not fetching actually doc obj in first phase of sharding
    // first phase of sharding only tries to fetch docids and scores which are already in tv
    if (SolbaseShardUtil.getNumShard() != 0) {
        if (ids != null) {
            IndexReader.firstPhase.set(false);
        } else {
            IndexReader.firstPhase.set(true);
        }
    } else {
        // it's always false in case of stand alone
        IndexReader.firstPhase.set(false);
    }

    logger.debug(reader.getIndexName() + " : Fetching " + docIds.size() + " Docs");

    if (docIds.size() > 0) {

        List<byte[]> fieldFilter = null;
        Set<String> returnFields = rb.rsp.getReturnFields();
        if (returnFields != null) {

            // copy return fields list
            fieldFilter = new ArrayList<byte[]>(returnFields.size());
            for (String field : returnFields) {
                fieldFilter.add(Bytes.toBytes(field));
            }

            // add highlight fields
            SolrHighlighter highligher = rb.req.getCore().getHighlighter();
            if (highligher.isHighlightingEnabled(rb.req.getParams())) {
                for (String field : highligher.getHighlightFields(rb.getQuery(), rb.req, null))
                    if (!returnFields.contains(field))
                        fieldFilter.add(Bytes.toBytes(field));
            }
            // fetch unique key if one exists.
            SchemaField keyField = rb.req.getSearcher().getSchema().getUniqueKeyField();
            if (null != keyField)
                if (!returnFields.contains(keyField))
                    fieldFilter.add(Bytes.toBytes(keyField.getName()));
        }

        FieldSelector selector = new SolbaseFieldSelector(docIds, fieldFilter);

        // This will bulk load these docs
        rb.req.getSearcher().getReader().document(docIds.get(0), selector);

    }

    ReaderCache.flushThreadLocalCaches(reader.getIndexName());
}

From source file:org.tallison.solr.search.concordance.KeywordCooccurComponent.java

License:Apache License

/**
 * in this component, this is called 3 different ways:
 * 1.  unsharded config/*from   w w w. j  ava2  s. com*/
 * 2.  user sends in lq=true param
 * 3.  it is distributed, in which case it uses the rb's query as a filter on the local core, and shards out requests to the remaining cores
 */
@Override
public void process(ResponseBuilder rb) throws IOException {
    NamedList results = null;
    try {
        results = KeywordCooccurRankHandler.doLocalSearch(rb.getQuery(), rb.req);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    rb.rsp.add(KeywordCooccurRankHandler.NODE, results);

    /**/
}

From source file:org.tallison.solr.search.concordance.KWICComponent.java

License:Apache License

/**
 * in this component, this is called 3 different ways:
 * 1.  unsharded config/*from  w  w w  .  j  a v a  2  s.com*/
 * 2.  user sends in lq=true param
 * 3.  it is distributed, in which case it uses the rb's query as a filter on the local core, and shards out requests to the remaining cores
 */
@Override
public void process(ResponseBuilder rb) throws IOException {
    NamedList results = null;
    try {
        results = KWICRequestHandler.doLocalSearch(rb.getQuery(), rb.req);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    rb.rsp.add(KWICRequestHandler.NODE, results);
}