Example usage for org.apache.solr.common.params CommonParams Q

List of usage examples for org.apache.solr.common.params CommonParams Q

Introduction

In this page you can find the example usage for org.apache.solr.common.params CommonParams Q.

Prototype

String Q

To view the source code for org.apache.solr.common.params CommonParams Q.

Click Source Link

Document

query string

Usage

From source file:org.dfdeshom.solr.mlt.MoreLikeThisHandler.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    SolrParams params = req.getParams();

    // Set field flags
    ReturnFields returnFields = new SolrReturnFields(req);
    rsp.setReturnFields(returnFields);//from   w  w w  .  j a v a 2  s.c o m
    int flags = 0;
    if (returnFields.wantsScore()) {
        flags |= SolrIndexSearcher.GET_SCORES;
    }

    String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
    String q = params.get(CommonParams.Q);
    Query query = null;
    SortSpec sortSpec = null;
    List<Query> filters = null;
    QParser parser = null;

    try {
        if (q != null) {
            parser = QParser.getParser(q, defType, req);
            query = parser.getQuery();
            sortSpec = parser.getSort(true);
        }

        String[] fqs = req.getParams().getParams(CommonParams.FQ);
        if (fqs != null && fqs.length != 0) {
            filters = new ArrayList<Query>();
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0) {
                    QParser fqp = QParser.getParser(fq, null, req);
                    filters.add(fqp.getQuery());
                }
            }
        }
    } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }

    SolrIndexSearcher searcher = req.getSearcher();

    MoreLikeThisHelper mlt = new MoreLikeThisHelper(params, searcher);

    // Hold on to the interesting terms if relevant
    TermStyle termStyle = TermStyle.get(params.get(MoreLikeThisParams.INTERESTING_TERMS));
    List<InterestingTerm> interesting = (termStyle == TermStyle.NONE) ? null
            : new ArrayList<InterestingTerm>(mlt.mlt.getMaxQueryTerms());

    DocListAndSet mltDocs = null;

    // Parse Required Params
    // This will either have a single Reader or valid query
    Reader reader = null;
    try {
        if (q == null || q.trim().length() < 1) {
            Iterable<ContentStream> streams = req.getContentStreams();
            if (streams != null) {
                Iterator<ContentStream> iter = streams.iterator();
                if (iter.hasNext()) {
                    reader = iter.next().getReader();
                }
                if (iter.hasNext()) {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                            "MoreLikeThis does not support multiple ContentStreams");
                }
            }
        }

        int start = params.getInt(CommonParams.START, 0);
        int rows = params.getInt(CommonParams.ROWS, 10);

        // Find documents MoreLikeThis - either with a reader or a query
        // --------------------------------------------------------------------------------
        if (reader != null) {
            mltDocs = mlt.getMoreLikeThis(reader, sortSpec.getSort(), start, rows, filters, interesting, flags);
        } else if (q != null) {
            // Matching options
            boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true);
            int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);

            // Find the base match
            DocList match = searcher.getDocList(query, null, null, matchOffset, 1, flags); // only get the first one...
            if (includeMatch) {
                rsp.add("match", match);
            }

            // This is an iterator, but we only handle the first match
            DocIterator iterator = match.iterator();
            if (iterator.hasNext()) {
                // do a MoreLikeThis query for each document in results
                int id = iterator.nextDoc();
                mltDocs = mlt.getMoreLikeThis(parser, id, sortSpec.getSort(), start, rows, filters, interesting,
                        flags);
            }
        } else {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                    "MoreLikeThis requires either a query (?q=) or text to find similar documents.");
        }

    } finally {
        if (reader != null) {
            reader.close();
        }
    }

    if (mltDocs == null) {
        mltDocs = new DocListAndSet(); // avoid NPE
    }
    rsp.add("response", mltDocs.docList);

    if (interesting != null) {
        if (termStyle == TermStyle.DETAILS) {
            NamedList<Float> it = new NamedList<Float>();
            for (InterestingTerm t : interesting) {
                it.add(t.term.toString(), t.boost);
            }
            rsp.add("interestingTerms", it);
        } else {
            List<String> it = new ArrayList<String>(interesting.size());
            for (InterestingTerm t : interesting) {
                it.add(t.term.text());
            }
            rsp.add("interestingTerms", it);
        }
    }

    // maybe facet the results
    if (params.getBool(FacetParams.FACET, false)) {
        if (mltDocs.docSet == null) {
            rsp.add("facet_counts", null);
        } else {
            SimpleFacets f = new SimpleFacets(req, mltDocs.docSet, params);
            rsp.add("facet_counts", f.getFacetCounts());
        }
    }
    boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false);

    boolean dbgQuery = false, dbgResults = false;
    if (dbg == false) {//if it's true, we are doing everything anyway.
        String[] dbgParams = req.getParams().getParams(CommonParams.DEBUG);
        if (dbgParams != null) {
            for (int i = 0; i < dbgParams.length; i++) {
                if (dbgParams[i].equals(CommonParams.QUERY)) {
                    dbgQuery = true;
                } else if (dbgParams[i].equals(CommonParams.RESULTS)) {
                    dbgResults = true;
                }
            }
        }
    } else {
        dbgQuery = true;
        dbgResults = true;
    }
    // Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug?
    if (dbg == true) {
        try {
            NamedList<Object> dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.getRawMLTQuery(),
                    mltDocs.docList, dbgQuery, dbgResults);
            if (null != dbgInfo) {
                if (null != filters) {
                    dbgInfo.add("filter_queries", req.getParams().getParams(CommonParams.FQ));
                    List<String> fqs = new ArrayList<String>(filters.size());
                    for (Query fq : filters) {
                        fqs.add(QueryParsing.toString(fq, req.getSchema()));
                    }
                    dbgInfo.add("parsed_filter_queries", fqs);
                }
                rsp.add("debug", dbgInfo);
            }
        } catch (Exception e) {
            SolrException.log(SolrCore.log, "Exception during debug", e);
            rsp.add("exception_during_debug", SolrException.toStr(e));
        }
    }
}

From source file:org.dice.solrenhancements.morelikethis.DiceMoreLikeThisHandler.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    // set and override parameters
    SolrIndexSearcher searcher = req.getSearcher();
    SchemaField uniqueKeyField = searcher.getSchema().getUniqueKeyField();
    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
    configureSolrParameters(req, params, uniqueKeyField.getName());

    // Set field flags
    ReturnFields returnFields = new SolrReturnFields(req);
    rsp.setReturnFields(returnFields);/*from w  ww .  java 2  s.  c  o  m*/
    int flags = 0;
    if (returnFields.wantsScore()) {
        flags |= SolrIndexSearcher.GET_SCORES;
    }
    // note: set in configureSolrParameters
    String defType = params.get(QueryParsing.DEFTYPE, EDISMAX);
    String q = params.get(CommonParams.Q);
    Query query = null;
    SortSpec sortSpec = null;
    QParser parser = null;

    List<Query> targetFqFilters = null;
    List<Query> mltFqFilters = null;

    try {
        if (q != null) {
            parser = QParser.getParser(q, defType, req);
            query = parser.getQuery();
            sortSpec = parser.getSort(true);
        } else {
            parser = QParser.getParser(null, defType, req);
            sortSpec = parser.getSort(true);
        }

        targetFqFilters = getFilters(req, CommonParams.FQ);
        mltFqFilters = getFilters(req, MoreLikeThisParams.FQ);
    } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }

    MoreLikeThisHelper mlt = new MoreLikeThisHelper(params, searcher, uniqueKeyField, parser);

    // Hold on to the interesting terms if relevant
    MoreLikeThisParams.TermStyle termStyle = MoreLikeThisParams.TermStyle
            .get(params.get(MoreLikeThisParams.INTERESTING_TERMS));

    MLTResult mltResult = null;
    DocListAndSet mltDocs = null;

    // Parse Required Params
    // This will either have a single Reader or valid query
    Reader reader = null;
    try {
        int start = params.getInt(CommonParams.START, 0);
        int rows = params.getInt(CommonParams.ROWS, 10);

        // for use when passed a content stream
        if (q == null || q.trim().length() < 1) {
            reader = getContentStreamReader(req, reader);
        }
        // Find documents MoreLikeThis - either with a reader or a query
        // --------------------------------------------------------------------------------
        if (reader != null) {
            // this will only be initialized if used with a content stream (see above)
            mltResult = mlt.getMoreLikeThisFromContentSteam(reader, start, rows, mltFqFilters, flags,
                    sortSpec.getSort());
        } else if (q != null) {
            // Matching options
            mltResult = getMoreLikeTheseFromQuery(rsp, params, flags, q, query, sortSpec, targetFqFilters,
                    mltFqFilters, searcher, mlt, start, rows);
        } else {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                    "MoreLikeThis requires either a query (?q=) or text to find similar documents.");
        }
        if (mltResult != null) {
            mltDocs = mltResult.getDoclist();
        }

    } finally {
        if (reader != null) {
            reader.close();
        }
    }

    if (mltDocs == null) {
        mltDocs = new DocListAndSet(); // avoid NPE
    }
    rsp.add("response", mltDocs.docList);

    if (mltResult != null && termStyle != MoreLikeThisParams.TermStyle.NONE) {
        addInterestingTerms(rsp, termStyle, mltResult);
    }

    // maybe facet the results
    if (params.getBool(FacetParams.FACET, false)) {
        addFacet(req, rsp, params, mltDocs);
    }

    addDebugInfo(req, rsp, q, mltFqFilters, mlt, mltResult);
}

From source file:org.dice.solrenhancements.spellchecker.DiceSpellCheckComponent.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false) || spellCheckers.isEmpty()) {
        return;//from www . j ava  2  s  . c om
    }
    boolean shardRequest = "true".equals(params.get(ShardParams.IS_SHARD));
    String q = params.get(SPELLCHECK_Q);
    SolrSpellChecker spellChecker = getSpellChecker(params);
    Collection<Token> tokens = null;

    if (q == null) {
        // enforce useage of the spellcheck.q parameter - i.e. a query we can tokenize with a regular tokenizer and not
        // a solr query for the spell checking. Useage of the SolrQueryConverter is buggy and breaks frequently
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The spellcheck.q parameter is required.");
    } else {
        //we have a spell check param, tokenize it with the query analyzer applicable for this spellchecker
        tokens = getTokens(q, spellChecker.getQueryAnalyzer());
    }
    if (tokens != null && tokens.isEmpty() == false) {
        if (spellChecker != null) {
            int count = params.getInt(SPELLCHECK_COUNT, 1);
            boolean onlyMorePopular = params.getBool(SPELLCHECK_ONLY_MORE_POPULAR, DEFAULT_ONLY_MORE_POPULAR);
            boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS, false);
            boolean collate = params.getBool(SPELLCHECK_COLLATE, false);
            float accuracy = params.getFloat(SPELLCHECK_ACCURACY, Float.MIN_VALUE);
            Integer alternativeTermCount = params.getInt(SpellingParams.SPELLCHECK_ALTERNATIVE_TERM_COUNT);
            Integer maxResultsForSuggest = params.getInt(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST);
            ModifiableSolrParams customParams = new ModifiableSolrParams();
            for (String checkerName : getDictionaryNames(params)) {
                customParams.add(getCustomParams(checkerName, params));
            }

            Integer hitsInteger = (Integer) rb.rsp.getToLog().get("hits");
            long hits = 0;
            if (hitsInteger == null) {
                hits = rb.getNumberDocumentsFound();
            } else {
                hits = hitsInteger.longValue();
            }
            SpellingResult spellingResult = null;
            if (maxResultsForSuggest == null || hits <= maxResultsForSuggest) {
                SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX;
                if (onlyMorePopular) {
                    suggestMode = SuggestMode.SUGGEST_MORE_POPULAR;
                } else if (alternativeTermCount != null) {
                    suggestMode = SuggestMode.SUGGEST_ALWAYS;
                }

                IndexReader reader = rb.req.getSearcher().getIndexReader();
                SpellingOptions options = new SpellingOptions(tokens, reader, count, alternativeTermCount,
                        suggestMode, extendedResults, accuracy, customParams);
                spellingResult = spellChecker.getSuggestions(options);
            } else {
                spellingResult = new SpellingResult();
            }
            boolean isCorrectlySpelled = hits > (maxResultsForSuggest == null ? 0 : maxResultsForSuggest);
            NamedList suggestions = toNamedList(shardRequest, spellingResult, q, extendedResults, collate,
                    isCorrectlySpelled);
            if (collate) {
                ModifiableSolrParams modParams = new ModifiableSolrParams(params);
                // SH: having both spellcheck.q and q set screws up collations for some queries, such as "java develope"
                modParams.remove(CommonParams.Q);

                //SH: Note that the collator runs a query against the DF specified field. Ideally it should
                //run the query against the spellchecker field but that's inaccessible here
                addCollationsToResponse(modParams, spellingResult, rb, q, suggestions,
                        spellChecker.isSuggestionsMayOverlap());
            }
            NamedList response = new SimpleOrderedMap();
            response.add("suggestions", suggestions);
            rb.rsp.add("spellcheck", response);

        } else {
            throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "Specified dictionaries do not exist: "
                    + getDictionaryNameAsSingleString(getDictionaryNames(params)));
        }
    }
}

From source file:org.dice.solrenhancements.spellchecker.DiceSpellCheckComponent.java

License:Apache License

@Override
@SuppressWarnings({ "unchecked", "deprecation" })
public void finishStage(ResponseBuilder rb) {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false) || rb.stage != ResponseBuilder.STAGE_GET_FIELDS)
        return;/*from  www  . j a v  a2 s  . com*/

    boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS, false);
    boolean collate = params.getBool(SPELLCHECK_COLLATE, false);
    boolean collationExtendedResults = params.getBool(SPELLCHECK_COLLATE_EXTENDED_RESULTS, false);
    int maxCollationTries = params.getInt(SPELLCHECK_MAX_COLLATION_TRIES, 0);
    int maxCollations = params.getInt(SPELLCHECK_MAX_COLLATIONS, 1);
    Integer maxResultsForSuggest = params.getInt(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST);
    int count = rb.req.getParams().getInt(SPELLCHECK_COUNT, 1);
    int numSug = Math.max(count, AbstractLuceneSpellChecker.DEFAULT_SUGGESTION_COUNT);

    String origQuery = params.get(SPELLCHECK_Q);
    if (origQuery == null) {
        origQuery = rb.getQueryString();
        if (origQuery == null) {
            origQuery = params.get(CommonParams.Q);
        }
    }

    long hits = rb.grouping() ? rb.totalHitCount : rb.getNumberDocumentsFound();
    boolean isCorrectlySpelled = hits > (maxResultsForSuggest == null ? 0 : maxResultsForSuggest);

    SpellCheckMergeData mergeData = new SpellCheckMergeData();
    if (maxResultsForSuggest == null || !isCorrectlySpelled) {
        for (ShardRequest sreq : rb.finished) {
            for (ShardResponse srsp : sreq.responses) {
                NamedList nl = (NamedList) srsp.getSolrResponse().getResponse().get("spellcheck");
                LOG.info(srsp.getShard() + " " + nl);
                if (nl != null) {
                    mergeData.totalNumberShardResponses++;
                    collectShardSuggestions(nl, mergeData);
                    collectShardCollations(mergeData, nl, maxCollationTries);
                }
            }
        }
    }

    // all shard responses have been collected
    // create token and get top suggestions
    SolrSpellChecker checker = getSpellChecker(rb.req.getParams());
    SpellingResult result = checker.mergeSuggestions(mergeData, numSug, count, extendedResults);

    NamedList response = new SimpleOrderedMap();
    NamedList suggestions = toNamedList(false, result, origQuery, extendedResults, collate, isCorrectlySpelled);
    if (collate) {
        SpellCheckCollation[] sortedCollations = mergeData.collations.values()
                .toArray(new SpellCheckCollation[mergeData.collations.size()]);
        Arrays.sort(sortedCollations);
        int i = 0;
        while (i < maxCollations && i < sortedCollations.length) {
            SpellCheckCollation collation = sortedCollations[i];
            i++;
            if (collationExtendedResults) {
                NamedList extendedResult = new NamedList();
                extendedResult.add("collationQuery", collation.getCollationQuery());
                extendedResult.add("hits", collation.getHits());
                extendedResult.add("misspellingsAndCorrections", collation.getMisspellingsAndCorrections());
                suggestions.add("collation", extendedResult);
            } else {
                suggestions.add("collation", collation.getCollationQuery());
            }
        }
    }

    response.add("suggestions", suggestions);
    rb.rsp.add("spellcheck", response);
}

From source file:org.dice.solrenhancements.unsupervisedfeedback.DiceUnsupervisedFeedbackHandler.java

License:Apache License

@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    SolrIndexSearcher searcher = req.getSearcher();
    SchemaField uniqueKeyField = searcher.getSchema().getUniqueKeyField();
    ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
    configureSolrParameters(req, params, uniqueKeyField.getName());

    // Set field flags
    ReturnFields returnFields = new SolrReturnFields(req);
    rsp.setReturnFields(returnFields);/*from  w w w.jav a 2  s .c o m*/
    int flags = 0;
    if (returnFields.wantsScore()) {
        flags |= SolrIndexSearcher.GET_SCORES;
    }

    String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
    int maxDocumentsToMatch = params.getInt(UnsupervisedFeedbackParams.MAX_DOCUMENTS_TO_PROCESS,
            UnsupervisedFeedback.DEFAULT_MAX_NUM_DOCUMENTS_TO_PROCESS);
    String q = params.get(CommonParams.Q);
    Query query = null;
    SortSpec sortSpec = null;
    QParser parser = null;

    List<Query> targetFqFilters = null;
    List<Query> mltFqFilters = null;

    try {

        parser = QParser.getParser(q, defType, req);
        query = parser.getQuery();
        sortSpec = parser.getSort(true);

        targetFqFilters = getFilters(req, CommonParams.FQ);
        mltFqFilters = getFilters(req, UnsupervisedFeedbackParams.FQ);
    } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }

    UnsupervisedFeedbackHelper mlt = new UnsupervisedFeedbackHelper(params, searcher, uniqueKeyField, parser);

    // Hold on to the interesting terms if relevant
    UnsupervisedFeedbackParams.TermStyle termStyle = UnsupervisedFeedbackParams.TermStyle
            .get(params.get(UnsupervisedFeedbackParams.INTERESTING_TERMS));
    List<InterestingTerm> interesting = (termStyle == UnsupervisedFeedbackParams.TermStyle.NONE) ? null
            : new ArrayList<InterestingTerm>(mlt.uf.getMaxQueryTermsPerField());

    DocListAndSet uffDocs = null;

    // Parse Required Params
    // This will either have a single Reader or valid query
    Reader reader = null;
    try {
        int start = params.getInt(CommonParams.START, 0);
        int rows = params.getInt(CommonParams.ROWS, 10);

        // Find documents MoreLikeThis - either with a reader or a query
        // --------------------------------------------------------------------------------
        if (q == null) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                    "Dice unsupervised feedback handler requires either a query (?q=) to find similar documents.");

        } else {

            uffDocs = expandQueryAndReExecute(rsp, params, maxDocumentsToMatch, flags, q, query, sortSpec,
                    targetFqFilters, mltFqFilters, searcher, mlt, interesting, uffDocs, start, rows);
        }

    } finally {
        if (reader != null) {
            reader.close();
        }
    }

    if (uffDocs == null) {
        uffDocs = new DocListAndSet(); // avoid NPE
    }
    rsp.add("response", uffDocs.docList);

    if (interesting != null) {
        addInterestingTerms(rsp, termStyle, interesting);
    }

    // maybe facet the results
    if (params.getBool(FacetParams.FACET, false)) {
        addFacet(req, rsp, params, uffDocs);
    }

    addDebugInfo(req, rsp, q, mltFqFilters, mlt, uffDocs);
}

From source file:org.dspace.app.xmlui.aspect.discovery.json.JSONSolrSearcher.java

License:BSD License

@Override
public void setup(SourceResolver resolver, Map objectModel, String src, Parameters par)
        throws ProcessingException, SAXException, IOException {
    //Retrieve all the given parameters
    Request request = ObjectModelHelper.getRequest(objectModel);
    this.response = ObjectModelHelper.getResponse(objectModel);

    query = request.getParameter(CommonParams.Q);
    if (query == null) {
        query = "*:*";
    }/*from   ww  w .  j  ava2  s  .  c  o  m*/

    //Retrieve all our filter queries
    filterQueries = request.getParameterValues(CommonParams.FQ);

    //Retrieve our facet fields
    facetFields = request.getParameterValues(FacetParams.FACET_FIELD);

    //Retrieve our facet limit (if any)
    if (request.getParameter(FacetParams.FACET_LIMIT) != null) {
        try {
            facetLimit = Integer.parseInt(request.getParameter(FacetParams.FACET_LIMIT));
        } catch (Exception e) {
            //Should an invalid value be supplied use -1
            facetLimit = -1;
        }
    } else {
        facetLimit = -1;
    }

    //Retrieve our sorting value
    facetSort = request.getParameter(FacetParams.FACET_SORT);
    //Make sure we have a valid sorting value
    if (!FacetParams.FACET_SORT_INDEX.equals(facetSort) && !FacetParams.FACET_SORT_COUNT.equals(facetSort)) {
        facetSort = null;
    }

    //Retrieve our facet min count
    facetMinCount = 1;
    try {
        facetMinCount = Integer.parseInt(request.getParameter(FacetParams.FACET_MINCOUNT));
    } catch (Exception e) {
        facetMinCount = 1;
    }
    jsonWrf = request.getParameter("json.wrf");

    //Retrieve our discovery solr path
    ExtendedProperties props = null;
    //Method that will retrieve all the possible configs we have

    props = ExtendedProperties.convertProperties(ConfigurationManager.getProperties());

    InputStream is = null;
    try {
        File config = new File(props.getProperty("dspace.dir") + "/config/dspace-solr-search.cfg");
        if (config.exists()) {
            props.combine(new ExtendedProperties(config.getAbsolutePath()));
        } else {
            is = SolrServiceImpl.class.getResourceAsStream("dspace-solr-search.cfg");
            ExtendedProperties defaults = new ExtendedProperties();
            defaults.load(is);
            props.combine(defaults);
        }
    } catch (Exception e) {
        log.error("Error while retrieving solr url", e);
        e.printStackTrace();
    } finally {
        if (is != null) {
            is.close();
        }
    }

    if (props.getProperty("solr.search.server") != null) {
        this.solrServerUrl = props.getProperty("solr.search.server").toString();
    }

}

From source file:org.dspace.app.xmlui.aspect.discovery.json.JSONSolrSearcher.java

License:BSD License

public void generate() throws IOException, SAXException, ProcessingException {
    if (solrServerUrl == null) {
        return;//from  w w  w.  j a  v a 2s. c  om
    }

    Map<String, String> params = new HashMap<String, String>();

    String solrRequestUrl = solrServerUrl + "/select";

    //Add our default parameters
    params.put(CommonParams.ROWS, "0");
    params.put(CommonParams.WT, "json");
    //We uwe json as out output type
    params.put("json.nl", "map");
    params.put("json.wrf", jsonWrf);
    params.put(FacetParams.FACET, Boolean.TRUE.toString());

    //Generate our json out of the given params
    try {
        params.put(CommonParams.Q, URLEncoder.encode(query, Constants.DEFAULT_ENCODING));
    } catch (UnsupportedEncodingException uee) {
        //Should never occur
        return;
    }

    params.put(FacetParams.FACET_LIMIT, String.valueOf(facetLimit));
    if (facetSort != null) {
        params.put(FacetParams.FACET_SORT, facetSort);
    }
    params.put(FacetParams.FACET_MINCOUNT, String.valueOf(facetMinCount));

    solrRequestUrl = AbstractDSpaceTransformer.generateURL(solrRequestUrl, params);
    if (facetFields != null || filterQueries != null) {
        StringBuilder urlBuilder = new StringBuilder(solrRequestUrl);
        if (facetFields != null) {

            //Add our facet fields
            for (String facetField : facetFields) {
                urlBuilder.append("&").append(FacetParams.FACET_FIELD).append("=");

                //This class can only be used for autocomplete facet fields
                if (!facetField.endsWith(".year") && !facetField.endsWith("_ac")) {
                    urlBuilder.append(URLEncoder.encode(facetField + "_ac", Constants.DEFAULT_ENCODING));
                } else {
                    urlBuilder.append(URLEncoder.encode(facetField, Constants.DEFAULT_ENCODING));
                }
            }

        }
        if (filterQueries != null) {
            for (String filterQuery : filterQueries) {
                urlBuilder.append("&").append(CommonParams.FQ).append("=")
                        .append(URLEncoder.encode(filterQuery, Constants.DEFAULT_ENCODING));
            }
        }

        solrRequestUrl = urlBuilder.toString();
    }

    try {
        GetMethod get = new GetMethod(solrRequestUrl);
        new HttpClient().executeMethod(get);
        String result = get.getResponseBodyAsString();
        if (result != null) {
            ByteArrayInputStream inputStream = new ByteArrayInputStream(result.getBytes("UTF-8"));

            byte[] buffer = new byte[8192];

            response.setHeader("Content-Length", String.valueOf(result.length()));
            int length;
            while ((length = inputStream.read(buffer)) > -1) {
                out.write(buffer, 0, length);
            }
            out.flush();
        }
    } catch (Exception e) {
        log.error("Error while getting json solr result for discovery search recommendation", e);
        e.printStackTrace();
    }

}

From source file:org.dspace.statistics.SolrLogger.java

License:BSD License

public static void shardSolrIndex() throws IOException, SolrServerException {
    /*//from   w w  w .  j ava  2  s  .  c  o m
    Start by faceting by year so we can include each year in a separate core !
     */
    SolrQuery yearRangeQuery = new SolrQuery();
    yearRangeQuery.setQuery("*:*");
    yearRangeQuery.setRows(0);
    yearRangeQuery.setFacet(true);
    yearRangeQuery.add(FacetParams.FACET_RANGE, "time");
    //We go back to 2000 the year 2000, this is a bit overkill but this way we ensure we have everything
    //The alternative would be to sort but that isn't recommended since it would be a very costly query !
    yearRangeQuery.add(FacetParams.FACET_RANGE_START,
            "NOW/YEAR-" + (Calendar.getInstance().get(Calendar.YEAR) - 2000) + "YEARS");
    //Add the +0year to ensure that we DO NOT include the current year
    yearRangeQuery.add(FacetParams.FACET_RANGE_END, "NOW/YEAR+0YEARS");
    yearRangeQuery.add(FacetParams.FACET_RANGE_GAP, "+1YEAR");
    yearRangeQuery.add(FacetParams.FACET_MINCOUNT, String.valueOf(1));

    //Create a temp directory to store our files in !
    File tempDirectory = new File(
            ConfigurationManager.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
    tempDirectory.mkdirs();

    QueryResponse queryResponse = solr.query(yearRangeQuery);
    //We only have one range query !
    List<RangeFacet.Count> yearResults = queryResponse.getFacetRanges().get(0).getCounts();
    for (RangeFacet.Count count : yearResults) {
        long totalRecords = count.getCount();

        //Create a range query from this !
        //We start with out current year
        DCDate dcStart = new DCDate(count.getValue());
        Calendar endDate = Calendar.getInstance();
        //Advance one year for the start of the next one !
        endDate.setTime(dcStart.toDate());
        endDate.add(Calendar.YEAR, 1);
        DCDate dcEndDate = new DCDate(endDate.getTime());

        StringBuilder filterQuery = new StringBuilder();
        filterQuery.append("time:([");
        filterQuery.append(ClientUtils.escapeQueryChars(dcStart.toString()));
        filterQuery.append(" TO ");
        filterQuery.append(ClientUtils.escapeQueryChars(dcEndDate.toString()));
        filterQuery.append("]");
        //The next part of the filter query excludes the content from midnight of the next year !
        filterQuery.append(" NOT ").append(ClientUtils.escapeQueryChars(dcEndDate.toString()));
        filterQuery.append(")");

        Map<String, String> yearQueryParams = new HashMap<String, String>();
        yearQueryParams.put(CommonParams.Q, "*:*");
        yearQueryParams.put(CommonParams.ROWS, String.valueOf(10000));
        yearQueryParams.put(CommonParams.FQ, filterQuery.toString());
        yearQueryParams.put(CommonParams.WT, "csv");

        //Start by creating a new core
        String coreName = "statistics-" + dcStart.getYear();
        HttpSolrServer statisticsYearServer = createCore(solr, coreName);

        System.out.println("Moving: " + totalRecords + " into core " + coreName);
        log.info("Moving: " + totalRecords + " records into core " + coreName);

        List<File> filesToUpload = new ArrayList<File>();
        for (int i = 0; i < totalRecords; i += 10000) {
            String solrRequestUrl = solr.getBaseURL() + "/select";
            solrRequestUrl = generateURL(solrRequestUrl, yearQueryParams);

            HttpGet get = new HttpGet(solrRequestUrl);
            HttpResponse response = new DefaultHttpClient().execute(get);
            InputStream csvInputstream = response.getEntity().getContent();
            //Write the csv ouput to a file !
            File csvFile = new File(tempDirectory.getPath() + File.separatorChar + "temp." + dcStart.getYear()
                    + "." + i + ".csv");
            FileUtils.copyInputStreamToFile(csvInputstream, csvFile);
            filesToUpload.add(csvFile);

            //Add 10000 & start over again
            yearQueryParams.put(CommonParams.START, String.valueOf((i + 10000)));
        }

        for (File tempCsv : filesToUpload) {
            //Upload the data in the csv files to our new solr core
            ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(
                    "/update/csv");
            contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8");
            contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8");

            statisticsYearServer.request(contentStreamUpdateRequest);
        }
        statisticsYearServer.commit(true, true);

        //Delete contents of this year from our year query !
        solr.deleteByQuery(filterQuery.toString());
        solr.commit(true, true);

        log.info("Moved " + totalRecords + " records into core: " + coreName);
    }

    FileUtils.deleteDirectory(tempDirectory);
}

From source file:org.dspace.statistics.SolrLogger.java

License:BSD License

public static void reindexBitstreamHits(boolean removeDeletedBitstreams) throws Exception {
    Context context = new Context();

    try {/* www .j a  v  a2  s . com*/
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");
        query.addFilterQuery("type:" + Constants.BITSTREAM);
        //Only retrieve records which do not have a bundle name
        query.addFilterQuery("-bundleName:[* TO *]");
        query.setRows(0);
        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();

        File tempDirectory = new File(
                ConfigurationManager.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
        tempDirectory.mkdirs();
        List<File> tempCsvFiles = new ArrayList<File>();
        for (int i = 0; i < totalRecords; i += 10000) {
            Map<String, String> params = new HashMap<String, String>();
            params.put(CommonParams.Q, "*:*");
            params.put(CommonParams.FQ, "-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
            params.put(CommonParams.WT, "csv");
            params.put(CommonParams.ROWS, String.valueOf(10000));
            params.put(CommonParams.START, String.valueOf(i));

            String solrRequestUrl = solr.getBaseURL() + "/select";
            solrRequestUrl = generateURL(solrRequestUrl, params);

            HttpGet get = new HttpGet(solrRequestUrl);
            HttpResponse response = new DefaultHttpClient().execute(get);

            InputStream csvOutput = response.getEntity().getContent();
            Reader csvReader = new InputStreamReader(csvOutput);
            List<String[]> rows = new CSVReader(csvReader).readAll();
            String[][] csvParsed = rows.toArray(new String[rows.size()][]);
            String[] header = csvParsed[0];
            //Attempt to find the bitstream id index !
            int idIndex = 0;
            for (int j = 0; j < header.length; j++) {
                if (header[j].equals("id")) {
                    idIndex = j;
                }
            }

            File tempCsv = new File(tempDirectory.getPath() + File.separatorChar + "temp." + i + ".csv");
            tempCsvFiles.add(tempCsv);
            CSVWriter csvp = new CSVWriter(new FileWriter(tempCsv));
            //csvp.setAlwaysQuote(false);

            //Write the header !
            csvp.writeNext((String[]) ArrayUtils.add(header, "bundleName"));
            Map<Integer, String> bitBundleCache = new HashMap<Integer, String>();
            //Loop over each line (skip the headers though)!
            for (int j = 1; j < csvParsed.length; j++) {
                String[] csvLine = csvParsed[j];
                //Write the default line !
                int bitstreamId = Integer.parseInt(csvLine[idIndex]);
                //Attempt to retrieve our bundle name from the cache !
                String bundleName = bitBundleCache.get(bitstreamId);
                if (bundleName == null) {
                    //Nothing found retrieve the bitstream
                    Bitstream bitstream = Bitstream.find(context, bitstreamId);
                    //Attempt to retrieve our bitstream !
                    if (bitstream != null) {
                        Bundle[] bundles = bitstream.getBundles();
                        if (bundles != null && 0 < bundles.length) {
                            Bundle bundle = bundles[0];
                            bundleName = bundle.getName();
                            context.removeCached(bundle, bundle.getID());
                        } else {
                            //No bundle found, we are either a collection or a community logo, check for it !
                            DSpaceObject parentObject = bitstream.getParentObject();
                            if (parentObject instanceof Collection) {
                                bundleName = "LOGO-COLLECTION";
                            } else if (parentObject instanceof Community) {
                                bundleName = "LOGO-COMMUNITY";
                            }
                            if (parentObject != null) {
                                context.removeCached(parentObject, parentObject.getID());
                            }

                        }
                        //Cache the bundle name
                        bitBundleCache.put(bitstream.getID(), bundleName);
                        //Remove the bitstream from cache
                        context.removeCached(bitstream, bitstreamId);
                    }
                    //Check if we don't have a bundlename
                    //If we don't have one & we do not need to delete the deleted bitstreams ensure that a BITSTREAM_DELETED bundle name is given !
                    if (bundleName == null && !removeDeletedBitstreams) {
                        bundleName = "BITSTREAM_DELETED";
                    }
                }
                csvp.writeNext((String[]) ArrayUtils.add(csvLine, bundleName));
            }

            //Loop over our parsed csv
            csvp.flush();
            csvp.close();
        }

        //Add all the separate csv files
        for (File tempCsv : tempCsvFiles) {
            ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(
                    "/update/csv");
            contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8");
            contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8");

            solr.request(contentStreamUpdateRequest);
        }

        //Now that all our new bitstream stats are in place, delete all the old ones !
        solr.deleteByQuery("-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
        //Commit everything to wrap up
        solr.commit(true, true);
        //Clean up our directory !
        FileUtils.deleteDirectory(tempDirectory);
    } catch (Exception e) {
        log.error("Error while updating the bitstream statistics", e);
        throw e;
    } finally {
        context.abort();
    }
}

From source file:org.dspace.statistics.SolrLogger.java

License:BSD License

/**
 * Export all SOLR usage statistics for viewing/downloading content to a flat text file.
 * The file goes to a series/*from ww  w.  j a  v a2 s.  c  o  m*/
 *
 * @throws Exception
 */
public static void exportHits() throws Exception {
    Context context = new Context();

    File tempDirectory = new File(
            ConfigurationManager.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
    tempDirectory.mkdirs();

    try {
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");

        ModifiableSolrParams solrParams = new ModifiableSolrParams();
        solrParams.set(CommonParams.Q, "statistics_type:view OR (*:* AND -statistics_type:*)");
        solrParams.set(CommonParams.WT, "javabin");
        solrParams.set(CommonParams.ROWS, String.valueOf(10000));

        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();
        System.out.println("There are " + totalRecords + " usage events in SOLR for download/view.");

        for (int i = 0; i < totalRecords; i += 10000) {
            solrParams.set(CommonParams.START, String.valueOf(i));
            QueryResponse queryResponse = solr.query(solrParams);
            SolrDocumentList docs = queryResponse.getResults();

            File exportOutput = new File(
                    tempDirectory.getPath() + File.separatorChar + "usagestats_" + i + ".csv");
            exportOutput.delete();

            //export docs
            addDocumentsToFile(context, docs, exportOutput);
            System.out.println("Export hits [" + i + " - " + String.valueOf(i + 9999) + "] to "
                    + exportOutput.getCanonicalPath());
        }
    } catch (Exception e) {
        log.error("Error while exporting SOLR data", e);
        throw e;
    } finally {
        context.abort();
    }
}