Example usage for org.apache.solr.common.params CommonParams START

List of usage examples for org.apache.solr.common.params CommonParams START

Introduction

In this page you can find the example usage for org.apache.solr.common.params CommonParams START.

Prototype

String START

To view the source code for org.apache.solr.common.params CommonParams START.

Click Source Link

Document

zero based offset of matching documents to retrieve

Usage

From source file:org.dspace.statistics.SolrLogger.java

License:BSD License

public static void reindexBitstreamHits(boolean removeDeletedBitstreams) throws Exception {
    Context context = new Context();

    try {/*from w  w w  .j  a va 2 s.  com*/
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");
        query.addFilterQuery("type:" + Constants.BITSTREAM);
        //Only retrieve records which do not have a bundle name
        query.addFilterQuery("-bundleName:[* TO *]");
        query.setRows(0);
        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();

        File tempDirectory = new File(
                ConfigurationManager.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
        tempDirectory.mkdirs();
        List<File> tempCsvFiles = new ArrayList<File>();
        for (int i = 0; i < totalRecords; i += 10000) {
            Map<String, String> params = new HashMap<String, String>();
            params.put(CommonParams.Q, "*:*");
            params.put(CommonParams.FQ, "-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
            params.put(CommonParams.WT, "csv");
            params.put(CommonParams.ROWS, String.valueOf(10000));
            params.put(CommonParams.START, String.valueOf(i));

            String solrRequestUrl = solr.getBaseURL() + "/select";
            solrRequestUrl = generateURL(solrRequestUrl, params);

            HttpGet get = new HttpGet(solrRequestUrl);
            HttpResponse response = new DefaultHttpClient().execute(get);

            InputStream csvOutput = response.getEntity().getContent();
            Reader csvReader = new InputStreamReader(csvOutput);
            List<String[]> rows = new CSVReader(csvReader).readAll();
            String[][] csvParsed = rows.toArray(new String[rows.size()][]);
            String[] header = csvParsed[0];
            //Attempt to find the bitstream id index !
            int idIndex = 0;
            for (int j = 0; j < header.length; j++) {
                if (header[j].equals("id")) {
                    idIndex = j;
                }
            }

            File tempCsv = new File(tempDirectory.getPath() + File.separatorChar + "temp." + i + ".csv");
            tempCsvFiles.add(tempCsv);
            CSVWriter csvp = new CSVWriter(new FileWriter(tempCsv));
            //csvp.setAlwaysQuote(false);

            //Write the header !
            csvp.writeNext((String[]) ArrayUtils.add(header, "bundleName"));
            Map<Integer, String> bitBundleCache = new HashMap<Integer, String>();
            //Loop over each line (skip the headers though)!
            for (int j = 1; j < csvParsed.length; j++) {
                String[] csvLine = csvParsed[j];
                //Write the default line !
                int bitstreamId = Integer.parseInt(csvLine[idIndex]);
                //Attempt to retrieve our bundle name from the cache !
                String bundleName = bitBundleCache.get(bitstreamId);
                if (bundleName == null) {
                    //Nothing found retrieve the bitstream
                    Bitstream bitstream = Bitstream.find(context, bitstreamId);
                    //Attempt to retrieve our bitstream !
                    if (bitstream != null) {
                        Bundle[] bundles = bitstream.getBundles();
                        if (bundles != null && 0 < bundles.length) {
                            Bundle bundle = bundles[0];
                            bundleName = bundle.getName();
                            context.removeCached(bundle, bundle.getID());
                        } else {
                            //No bundle found, we are either a collection or a community logo, check for it !
                            DSpaceObject parentObject = bitstream.getParentObject();
                            if (parentObject instanceof Collection) {
                                bundleName = "LOGO-COLLECTION";
                            } else if (parentObject instanceof Community) {
                                bundleName = "LOGO-COMMUNITY";
                            }
                            if (parentObject != null) {
                                context.removeCached(parentObject, parentObject.getID());
                            }

                        }
                        //Cache the bundle name
                        bitBundleCache.put(bitstream.getID(), bundleName);
                        //Remove the bitstream from cache
                        context.removeCached(bitstream, bitstreamId);
                    }
                    //Check if we don't have a bundlename
                    //If we don't have one & we do not need to delete the deleted bitstreams ensure that a BITSTREAM_DELETED bundle name is given !
                    if (bundleName == null && !removeDeletedBitstreams) {
                        bundleName = "BITSTREAM_DELETED";
                    }
                }
                csvp.writeNext((String[]) ArrayUtils.add(csvLine, bundleName));
            }

            //Loop over our parsed csv
            csvp.flush();
            csvp.close();
        }

        //Add all the separate csv files
        for (File tempCsv : tempCsvFiles) {
            ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(
                    "/update/csv");
            contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8");
            contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8");

            solr.request(contentStreamUpdateRequest);
        }

        //Now that all our new bitstream stats are in place, delete all the old ones !
        solr.deleteByQuery("-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
        //Commit everything to wrap up
        solr.commit(true, true);
        //Clean up our directory !
        FileUtils.deleteDirectory(tempDirectory);
    } catch (Exception e) {
        log.error("Error while updating the bitstream statistics", e);
        throw e;
    } finally {
        context.abort();
    }
}

From source file:org.dspace.statistics.SolrLogger.java

License:BSD License

/**
 * Export all SOLR usage statistics for viewing/downloading content to a flat text file.
 * The file goes to a series//from w  w  w .  ja va  2s  .c  o m
 *
 * @throws Exception
 */
public static void exportHits() throws Exception {
    Context context = new Context();

    File tempDirectory = new File(
            ConfigurationManager.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
    tempDirectory.mkdirs();

    try {
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");

        ModifiableSolrParams solrParams = new ModifiableSolrParams();
        solrParams.set(CommonParams.Q, "statistics_type:view OR (*:* AND -statistics_type:*)");
        solrParams.set(CommonParams.WT, "javabin");
        solrParams.set(CommonParams.ROWS, String.valueOf(10000));

        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();
        System.out.println("There are " + totalRecords + " usage events in SOLR for download/view.");

        for (int i = 0; i < totalRecords; i += 10000) {
            solrParams.set(CommonParams.START, String.valueOf(i));
            QueryResponse queryResponse = solr.query(solrParams);
            SolrDocumentList docs = queryResponse.getResults();

            File exportOutput = new File(
                    tempDirectory.getPath() + File.separatorChar + "usagestats_" + i + ".csv");
            exportOutput.delete();

            //export docs
            addDocumentsToFile(context, docs, exportOutput);
            System.out.println("Export hits [" + i + " - " + String.valueOf(i + 9999) + "] to "
                    + exportOutput.getCanonicalPath());
        }
    } catch (Exception e) {
        log.error("Error while exporting SOLR data", e);
        throw e;
    } finally {
        context.abort();
    }
}

From source file:org.dspace.statistics.SolrLoggerServiceImpl.java

License:BSD License

@Override
public void shardSolrIndex() throws IOException, SolrServerException {
    /*//from  ww w  .jav  a2  s. co m
    Start by faceting by year so we can include each year in a separate core !
     */
    SolrQuery yearRangeQuery = new SolrQuery();
    yearRangeQuery.setQuery("*:*");
    yearRangeQuery.setRows(0);
    yearRangeQuery.setFacet(true);
    yearRangeQuery.add(FacetParams.FACET_RANGE, "time");
    //We go back to 2000 the year 2000, this is a bit overkill but this way we ensure we have everything
    //The alternative would be to sort but that isn't recommended since it would be a very costly query !
    yearRangeQuery.add(FacetParams.FACET_RANGE_START,
            "NOW/YEAR-" + (Calendar.getInstance().get(Calendar.YEAR) - 2000) + "YEARS");
    //Add the +0year to ensure that we DO NOT include the current year
    yearRangeQuery.add(FacetParams.FACET_RANGE_END, "NOW/YEAR+0YEARS");
    yearRangeQuery.add(FacetParams.FACET_RANGE_GAP, "+1YEAR");
    yearRangeQuery.add(FacetParams.FACET_MINCOUNT, String.valueOf(1));

    //Create a temp directory to store our files in !
    File tempDirectory = new File(
            configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
    tempDirectory.mkdirs();

    QueryResponse queryResponse = solr.query(yearRangeQuery);
    //We only have one range query !
    List<RangeFacet.Count> yearResults = queryResponse.getFacetRanges().get(0).getCounts();
    for (RangeFacet.Count count : yearResults) {
        long totalRecords = count.getCount();

        //Create a range query from this !
        //We start with out current year
        DCDate dcStart = new DCDate(count.getValue());
        Calendar endDate = Calendar.getInstance();
        //Advance one year for the start of the next one !
        endDate.setTime(dcStart.toDate());
        endDate.add(Calendar.YEAR, 1);
        DCDate dcEndDate = new DCDate(endDate.getTime());

        StringBuilder filterQuery = new StringBuilder();
        filterQuery.append("time:([");
        filterQuery.append(ClientUtils.escapeQueryChars(dcStart.toString()));
        filterQuery.append(" TO ");
        filterQuery.append(ClientUtils.escapeQueryChars(dcEndDate.toString()));
        filterQuery.append("]");
        //The next part of the filter query excludes the content from midnight of the next year !
        filterQuery.append(" NOT ").append(ClientUtils.escapeQueryChars(dcEndDate.toString()));
        filterQuery.append(")");

        Map<String, String> yearQueryParams = new HashMap<String, String>();
        yearQueryParams.put(CommonParams.Q, "*:*");
        yearQueryParams.put(CommonParams.ROWS, String.valueOf(10000));
        yearQueryParams.put(CommonParams.FQ, filterQuery.toString());
        yearQueryParams.put(CommonParams.WT, "csv");

        //Start by creating a new core
        String coreName = "statistics-" + dcStart.getYear();
        HttpSolrServer statisticsYearServer = createCore(solr, coreName);

        System.out.println("Moving: " + totalRecords + " into core " + coreName);
        log.info("Moving: " + totalRecords + " records into core " + coreName);

        List<File> filesToUpload = new ArrayList<File>();
        for (int i = 0; i < totalRecords; i += 10000) {
            String solrRequestUrl = solr.getBaseURL() + "/select";
            solrRequestUrl = generateURL(solrRequestUrl, yearQueryParams);

            HttpGet get = new HttpGet(solrRequestUrl);
            HttpResponse response = new DefaultHttpClient().execute(get);
            InputStream csvInputstream = response.getEntity().getContent();
            //Write the csv ouput to a file !
            File csvFile = new File(tempDirectory.getPath() + File.separatorChar + "temp." + dcStart.getYear()
                    + "." + i + ".csv");
            FileUtils.copyInputStreamToFile(csvInputstream, csvFile);
            filesToUpload.add(csvFile);

            //Add 10000 & start over again
            yearQueryParams.put(CommonParams.START, String.valueOf((i + 10000)));
        }

        for (File tempCsv : filesToUpload) {
            //Upload the data in the csv files to our new solr core
            ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(
                    "/update/csv");
            contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8");
            contentStreamUpdateRequest.setParam("skip", "_version_");
            contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8");

            statisticsYearServer.request(contentStreamUpdateRequest);
        }
        statisticsYearServer.commit(true, true);

        //Delete contents of this year from our year query !
        solr.deleteByQuery(filterQuery.toString());
        solr.commit(true, true);

        log.info("Moved " + totalRecords + " records into core: " + coreName);
    }

    FileUtils.deleteDirectory(tempDirectory);
}

From source file:org.dspace.statistics.SolrLoggerServiceImpl.java

License:BSD License

@Override
public void reindexBitstreamHits(boolean removeDeletedBitstreams) throws Exception {
    Context context = new Context();

    try {/*from  w w w  .ja  va2s .  co  m*/
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");
        query.addFilterQuery("type:" + Constants.BITSTREAM);
        //Only retrieve records which do not have a bundle name
        query.addFilterQuery("-bundleName:[* TO *]");
        query.setRows(0);
        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();

        File tempDirectory = new File(
                configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
        tempDirectory.mkdirs();
        List<File> tempCsvFiles = new ArrayList<File>();
        for (int i = 0; i < totalRecords; i += 10000) {
            Map<String, String> params = new HashMap<String, String>();
            params.put(CommonParams.Q, "*:*");
            params.put(CommonParams.FQ, "-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
            params.put(CommonParams.WT, "csv");
            params.put(CommonParams.ROWS, String.valueOf(10000));
            params.put(CommonParams.START, String.valueOf(i));

            String solrRequestUrl = solr.getBaseURL() + "/select";
            solrRequestUrl = generateURL(solrRequestUrl, params);

            HttpGet get = new HttpGet(solrRequestUrl);
            HttpResponse response = new DefaultHttpClient().execute(get);

            InputStream csvOutput = response.getEntity().getContent();
            Reader csvReader = new InputStreamReader(csvOutput);
            List<String[]> rows = new CSVReader(csvReader).readAll();
            String[][] csvParsed = rows.toArray(new String[rows.size()][]);
            String[] header = csvParsed[0];
            //Attempt to find the bitstream id index !
            int idIndex = 0;
            for (int j = 0; j < header.length; j++) {
                if (header[j].equals("id")) {
                    idIndex = j;
                }
            }

            File tempCsv = new File(tempDirectory.getPath() + File.separatorChar + "temp." + i + ".csv");
            tempCsvFiles.add(tempCsv);
            CSVWriter csvp = new CSVWriter(new FileWriter(tempCsv));
            //csvp.setAlwaysQuote(false);

            //Write the header !
            csvp.writeNext((String[]) ArrayUtils.add(header, "bundleName"));
            Map<String, String> bitBundleCache = new HashMap<>();
            //Loop over each line (skip the headers though)!
            for (int j = 1; j < csvParsed.length; j++) {
                String[] csvLine = csvParsed[j];
                //Write the default line !
                String bitstreamId = csvLine[idIndex];
                //Attempt to retrieve our bundle name from the cache !
                String bundleName = bitBundleCache.get(bitstreamId);
                if (bundleName == null) {
                    //Nothing found retrieve the bitstream
                    Bitstream bitstream = bitstreamService.findByIdOrLegacyId(context, bitstreamId);
                    //Attempt to retrieve our bitstream !
                    if (bitstream != null) {
                        List<Bundle> bundles = bitstream.getBundles();
                        if (bundles != null && 0 < bundles.size()) {
                            Bundle bundle = bundles.get(0);
                            bundleName = bundle.getName();
                        } else {
                            //No bundle found, we are either a collection or a community logo, check for it !
                            DSpaceObject parentObject = bitstreamService.getParentObject(context, bitstream);
                            if (parentObject instanceof Collection) {
                                bundleName = "LOGO-COLLECTION";
                            } else if (parentObject instanceof Community) {
                                bundleName = "LOGO-COMMUNITY";
                            }

                        }
                        //Cache the bundle name
                        bitBundleCache.put(bitstream.getID().toString(), bundleName);
                        //Remove the bitstream from cache
                    }
                    //Check if we don't have a bundlename
                    //If we don't have one & we do not need to delete the deleted bitstreams ensure that a BITSTREAM_DELETED bundle name is given !
                    if (bundleName == null && !removeDeletedBitstreams) {
                        bundleName = "BITSTREAM_DELETED";
                    }
                }
                csvp.writeNext((String[]) ArrayUtils.add(csvLine, bundleName));
            }

            //Loop over our parsed csv
            csvp.flush();
            csvp.close();
        }

        //Add all the separate csv files
        for (File tempCsv : tempCsvFiles) {
            ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest(
                    "/update/csv");
            contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8");
            contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8");

            solr.request(contentStreamUpdateRequest);
        }

        //Now that all our new bitstream stats are in place, delete all the old ones !
        solr.deleteByQuery("-bundleName:[* TO *] AND type:" + Constants.BITSTREAM);
        //Commit everything to wrap up
        solr.commit(true, true);
        //Clean up our directory !
        FileUtils.deleteDirectory(tempDirectory);
    } catch (Exception e) {
        log.error("Error while updating the bitstream statistics", e);
        throw e;
    } finally {
        context.abort();
    }
}

From source file:org.dspace.statistics.SolrLoggerServiceImpl.java

License:BSD License

@Override
public void exportHits() throws Exception {
    Context context = new Context();

    File tempDirectory = new File(
            configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator);
    tempDirectory.mkdirs();//from w w  w .  ja  va  2  s. c  om

    try {
        //First of all retrieve the total number of records to be updated
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");

        ModifiableSolrParams solrParams = new ModifiableSolrParams();
        solrParams.set(CommonParams.Q, "statistics_type:view OR (*:* AND -statistics_type:*)");
        solrParams.set(CommonParams.WT, "javabin");
        solrParams.set(CommonParams.ROWS, String.valueOf(10000));

        addAdditionalSolrYearCores(query);
        long totalRecords = solr.query(query).getResults().getNumFound();
        System.out.println("There are " + totalRecords + " usage events in SOLR for download/view.");

        for (int i = 0; i < totalRecords; i += 10000) {
            solrParams.set(CommonParams.START, String.valueOf(i));
            QueryResponse queryResponse = solr.query(solrParams);
            SolrDocumentList docs = queryResponse.getResults();

            File exportOutput = new File(
                    tempDirectory.getPath() + File.separatorChar + "usagestats_" + i + ".csv");
            exportOutput.delete();

            //export docs
            addDocumentsToFile(context, docs, exportOutput);
            System.out.println("Export hits [" + i + " - " + String.valueOf(i + 9999) + "] to "
                    + exportOutput.getCanonicalPath());
        }
    } catch (Exception e) {
        log.error("Error while exporting SOLR data", e);
        throw e;
    } finally {
        context.abort();
    }
}

From source file:org.eclipse.orion.internal.server.search.IndexPurgeJob.java

License:Open Source License

@Override
protected IStatus run(IProgressMonitor monitor) {
    Logger logger = LoggerFactory.getLogger(Indexer.class);
    if (logger.isDebugEnabled())
        logger.debug("Purging indexes"); //$NON-NLS-1$
    long start = System.currentTimeMillis();
    SolrQuery query = findAllQuery();/*from   www.j a v  a 2  s. c om*/
    try {
        QueryResponse solrResponse = this.server.query(findAllQuery);
        SolrDocumentList result = solrResponse.getResults();
        long numFound = result.getNumFound();
        long processed = 0;
        List<String> listIds = new ArrayList<String>();
        if (numFound > processed) {
            while (true) {
                checkCanceled(monitor);
                markStaleIndexes(result, listIds);
                processed += PAGE_SIZE;
                if (processed >= numFound)
                    break;
                query.setParam(CommonParams.START, Long.toString(processed));
                solrResponse = this.server.query(query);
                result = solrResponse.getResults();
                // New indexes may have been added, perhaps
                numFound = result.getNumFound();
            }
        }

        checkCanceled(monitor);
        if (listIds.size() > 0) {
            this.server.deleteById(listIds);
            this.server.commit();
        }
        if (logger.isDebugEnabled())
            logger.debug("\tPurged: " + listIds.size()); //$NON-NLS-1$

    } catch (Exception e) {
        handleIndexingFailure(e);
    }
    long duration = System.currentTimeMillis() - start;
    if (logger.isDebugEnabled())
        logger.debug("Purge job took " + duration + "ms"); //$NON-NLS-1$ //$NON-NLS-2$

    //throttle scheduling frequency so the job never runs more than 5% of the time
    long delay = Math.max(DEFAULT_DELAY, duration * 20);
    schedule(delay);
    return Status.OK_STATUS;
}

From source file:org.nlp.solr.handler.component.PagerComponent.java

License:GNU General Public License

@Override
public void process(ResponseBuilder rb) throws IOException {
    /* get request params */
    SolrParams par = rb.req.getParams();
    int rows = par.getInt(CommonParams.ROWS, 0);
    int start = par.getInt(CommonParams.START, 0);
    int pages = par.getInt(PARAM_PAGER, 0);
    int pages_pre = par.getInt(PARAM_PAGER_PRE, 2);

    /* neet to work ? */
    if (pages == 0 || rows == 0 || rb == null || rb.getResults() == null)
        return;//w  w w .ja  va2  s  .com

    /* select result list */
    int doc_count = 0;

    if (rb.getResults().docSet != null)
        doc_count = rb.getResults().docSet.size();
    else
        return;

    /* pager list */
    NamedList<Object> lst = new SimpleOrderedMap<Object>();
    NamedList<Object> lst2 = new SimpleOrderedMap<Object>();

    /* paging pages */
    int page_count = doc_count / rows;
    int page_actual = start / rows;
    int page_pre = pages_pre;
    int page_post = pages - page_pre - 1;

    /* last page */
    if (doc_count % rows != 0)
        page_count++;

    /* page range */
    if (page_actual - page_pre < 0) {
        page_post += -(page_actual - page_pre);
        page_pre -= -(page_actual - page_pre);
    } else if (page_actual + page_post > page_count) {
        page_post = pages - page_pre;
        page_pre = page_actual + pages - page_count;
    }

    /* sanity */
    if (page_pre < 0)
        page_pre = 0;
    if (page_post < 0)
        page_post = 0;

    /* next pages list */
    int i = (page_actual - page_pre);
    for (i = (i <= 0 ? 0 : i); i < page_count && i <= (page_actual + page_post); i++)
        lst2.add(Integer.toString(i + 1), i * rows);
    lst.add("pages", lst2);

    /* navi */
    if (page_actual > 0)
        lst.add("prev", (page_actual - 1) * rows);
    if (page_actual - page_pre > 0)
        lst.add("first", 0);
    if (page_actual < (page_count - 1))
        lst.add("next", (page_actual + 1) * rows);
    if (page_actual + page_post < (page_count - 1))
        lst.add("last", (page_count - 1) * rows);
    lst.add("actual", page_actual + 1);
    lst.add("count", page_count);

    /* finish */
    rb.rsp.add("pager", lst);
}

From source file:org.opencms.search.solr.CmsSolrIndex.java

License:Open Source License

/**
 * Performs the actual search.<p>/*w  ww.  j a  va  2  s.  com*/
 *
 * @param cms the current OpenCms context
 * @param ignoreMaxRows <code>true</code> to return all all requested rows, <code>false</code> to use max rows
 * @param query the OpenCms Solr query
 * @param response the servlet response to write the query result to, may also be <code>null</code>
 * @param ignoreSearchExclude if set to false, only contents with search_exclude unset or "false" will be found - typical for the the non-gallery case
 * @param filter the resource filter to use
 *
 * @return the found documents
 *
 * @throws CmsSearchException if something goes wrong
 *
 * @see #search(CmsObject, CmsSolrQuery, boolean)
 */
@SuppressWarnings("unchecked")
public CmsSolrResultList search(CmsObject cms, final CmsSolrQuery query, boolean ignoreMaxRows,
        ServletResponse response, boolean ignoreSearchExclude, CmsResourceFilter filter)
        throws CmsSearchException {

    // check if the user is allowed to access this index
    checkOfflineAccess(cms);
    if (!ignoreSearchExclude) {
        query.addFilterQuery(CmsSearchField.FIELD_SEARCH_EXCLUDE + ":\"false\"");
    }

    int previousPriority = Thread.currentThread().getPriority();
    long startTime = System.currentTimeMillis();

    // remember the initial query
    SolrQuery initQuery = query.clone();

    query.setHighlight(false);
    LocalSolrQueryRequest solrQueryRequest = null;
    try {

        // initialize the search context
        CmsObject searchCms = OpenCms.initCmsObject(cms);

        // change thread priority in order to reduce search impact on overall system performance
        if (getPriority() > 0) {
            Thread.currentThread().setPriority(getPriority());
        }

        // the lists storing the found documents that will be returned
        List<CmsSearchResource> resourceDocumentList = new ArrayList<CmsSearchResource>();
        SolrDocumentList solrDocumentList = new SolrDocumentList();

        // Initialize rows, offset, end and the current page.
        int rows = query.getRows() != null ? query.getRows().intValue() : CmsSolrQuery.DEFAULT_ROWS.intValue();
        if (!ignoreMaxRows && (rows > ROWS_MAX)) {
            rows = ROWS_MAX;
        }
        int start = query.getStart() != null ? query.getStart().intValue() : 0;
        int end = start + rows;
        int page = 0;
        if (rows > 0) {
            page = Math.round(start / rows) + 1;
        }

        // set the start to '0' and expand the rows before performing the query
        query.setStart(new Integer(0));
        query.setRows(new Integer((5 * rows * page) + start));

        // perform the Solr query and remember the original Solr response
        QueryResponse queryResponse = m_solr.query(query);
        long solrTime = System.currentTimeMillis() - startTime;

        // initialize the counts
        long hitCount = queryResponse.getResults().getNumFound();
        start = -1;
        end = -1;
        if ((rows > 0) && (page > 0) && (hitCount > 0)) {
            // calculate the final size of the search result
            start = rows * (page - 1);
            end = start + rows;
            // ensure that both i and n are inside the range of foundDocuments.size()
            start = new Long((start > hitCount) ? hitCount : start).intValue();
            end = new Long((end > hitCount) ? hitCount : end).intValue();
        } else {
            // return all found documents in the search result
            start = 0;
            end = new Long(hitCount).intValue();
        }
        long visibleHitCount = hitCount;
        float maxScore = 0;

        // If we're using a postprocessor, (re-)initialize it before using it
        if (m_postProcessor != null) {
            m_postProcessor.init();
        }

        // process found documents
        List<CmsSearchResource> allDocs = new ArrayList<CmsSearchResource>();
        int cnt = 0;
        for (int i = 0; (i < queryResponse.getResults().size()) && (cnt < end); i++) {
            try {
                SolrDocument doc = queryResponse.getResults().get(i);
                CmsSolrDocument searchDoc = new CmsSolrDocument(doc);
                if (needsPermissionCheck(searchDoc)) {
                    // only if the document is an OpenCms internal resource perform the permission check
                    CmsResource resource = filter == null ? getResource(searchCms, searchDoc)
                            : getResource(searchCms, searchDoc, filter);
                    if (resource != null) {
                        // permission check performed successfully: the user has read permissions!
                        if (cnt >= start) {
                            if (m_postProcessor != null) {
                                doc = m_postProcessor.process(searchCms, resource,
                                        (SolrInputDocument) searchDoc.getDocument());
                            }
                            resourceDocumentList.add(new CmsSearchResource(resource, searchDoc));
                            if (null != doc) {
                                solrDocumentList.add(doc);
                            }
                            maxScore = maxScore < searchDoc.getScore() ? searchDoc.getScore() : maxScore;
                        }
                        allDocs.add(new CmsSearchResource(resource, searchDoc));
                        cnt++;
                    } else {
                        visibleHitCount--;
                    }
                } else {
                    // if permission check is not required for this index,
                    // add a pseudo resource together with document to the results
                    resourceDocumentList.add(new CmsSearchResource(PSEUDO_RES, searchDoc));
                    solrDocumentList.add(doc);
                    maxScore = maxScore < searchDoc.getScore() ? searchDoc.getScore() : maxScore;
                    cnt++;
                }
            } catch (Exception e) {
                // should not happen, but if it does we want to go on with the next result nevertheless
                LOG.warn(Messages.get().getBundle().key(Messages.LOG_SOLR_ERR_RESULT_ITERATION_FAILED_0), e);
            }
        }
        // the last documents were all secret so let's take the last found docs
        if (resourceDocumentList.isEmpty() && (allDocs.size() > 0)) {
            page = Math.round(allDocs.size() / rows) + 1;
            int showCount = allDocs.size() % rows;
            showCount = showCount == 0 ? rows : showCount;
            start = allDocs.size() - new Long(showCount).intValue();
            end = allDocs.size();
            if (allDocs.size() > start) {
                resourceDocumentList = allDocs.subList(start, end);
                for (CmsSearchResource r : resourceDocumentList) {
                    maxScore = maxScore < r.getDocument().getScore() ? r.getDocument().getScore() : maxScore;
                    solrDocumentList.add(((CmsSolrDocument) r.getDocument()).getSolrDocument());
                }
            }
        }
        long processTime = System.currentTimeMillis() - startTime - solrTime;

        // create and return the result
        solrDocumentList.setStart(start);
        solrDocumentList.setMaxScore(new Float(maxScore));
        solrDocumentList.setNumFound(visibleHitCount);

        queryResponse.getResponse().setVal(queryResponse.getResponse().indexOf(QUERY_RESPONSE_NAME, 0),
                solrDocumentList);

        queryResponse.getResponseHeader().setVal(queryResponse.getResponseHeader().indexOf(QUERY_TIME_NAME, 0),
                new Integer(new Long(System.currentTimeMillis() - startTime).intValue()));
        long highlightEndTime = System.currentTimeMillis();
        SolrCore core = m_solr instanceof EmbeddedSolrServer
                ? ((EmbeddedSolrServer) m_solr).getCoreContainer().getCore(getCoreName())
                : null;
        CmsSolrResultList result = null;
        try {
            SearchComponent highlightComponenet = null;
            if (core != null) {
                highlightComponenet = core.getSearchComponent("highlight");
                solrQueryRequest = new LocalSolrQueryRequest(core, queryResponse.getResponseHeader());
            }
            SolrQueryResponse solrQueryResponse = null;
            if (solrQueryRequest != null) {
                // create and initialize the solr response
                solrQueryResponse = new SolrQueryResponse();
                solrQueryResponse.setAllValues(queryResponse.getResponse());
                int paramsIndex = queryResponse.getResponseHeader().indexOf(HEADER_PARAMS_NAME, 0);
                NamedList<Object> header = null;
                Object o = queryResponse.getResponseHeader().getVal(paramsIndex);
                if (o instanceof NamedList) {
                    header = (NamedList<Object>) o;
                    header.setVal(header.indexOf(CommonParams.ROWS, 0), new Integer(rows));
                    header.setVal(header.indexOf(CommonParams.START, 0), new Long(start));
                }

                // set the OpenCms Solr query as parameters to the request
                solrQueryRequest.setParams(initQuery);

                // perform the highlighting
                if ((header != null) && (initQuery.getHighlight()) && (highlightComponenet != null)) {
                    header.add(HighlightParams.HIGHLIGHT, "on");
                    if ((initQuery.getHighlightFields() != null)
                            && (initQuery.getHighlightFields().length > 0)) {
                        header.add(HighlightParams.FIELDS,
                                CmsStringUtil.arrayAsString(initQuery.getHighlightFields(), ","));
                    }
                    String formatter = initQuery.getParams(HighlightParams.FORMATTER) != null
                            ? initQuery.getParams(HighlightParams.FORMATTER)[0]
                            : null;
                    if (formatter != null) {
                        header.add(HighlightParams.FORMATTER, formatter);
                    }
                    if (initQuery.getHighlightFragsize() != 100) {
                        header.add(HighlightParams.FRAGSIZE, new Integer(initQuery.getHighlightFragsize()));
                    }
                    if (initQuery.getHighlightRequireFieldMatch()) {
                        header.add(HighlightParams.FIELD_MATCH,
                                new Boolean(initQuery.getHighlightRequireFieldMatch()));
                    }
                    if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(initQuery.getHighlightSimplePost())) {
                        header.add(HighlightParams.SIMPLE_POST, initQuery.getHighlightSimplePost());
                    }
                    if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(initQuery.getHighlightSimplePre())) {
                        header.add(HighlightParams.SIMPLE_PRE, initQuery.getHighlightSimplePre());
                    }
                    if (initQuery.getHighlightSnippets() != 1) {
                        header.add(HighlightParams.SNIPPETS, new Integer(initQuery.getHighlightSnippets()));
                    }
                    ResponseBuilder rb = new ResponseBuilder(solrQueryRequest, solrQueryResponse,
                            Collections.singletonList(highlightComponenet));
                    try {
                        rb.doHighlights = true;
                        DocListAndSet res = new DocListAndSet();
                        SchemaField idField = OpenCms.getSearchManager().getSolrServerConfiguration()
                                .getSolrSchema().getUniqueKeyField();

                        int[] luceneIds = new int[rows];
                        int docs = 0;
                        for (SolrDocument doc : solrDocumentList) {
                            String idString = (String) doc.getFirstValue(CmsSearchField.FIELD_ID);
                            int id = solrQueryRequest.getSearcher().getFirstMatch(
                                    new Term(idField.getName(), idField.getType().toInternal(idString)));
                            luceneIds[docs++] = id;
                        }
                        res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
                        rb.setResults(res);
                        rb.setQuery(QParser.getParser(initQuery.getQuery(), null, solrQueryRequest).getQuery());
                        rb.setQueryString(initQuery.getQuery());
                        highlightComponenet.prepare(rb);
                        highlightComponenet.process(rb);
                        highlightComponenet.finishStage(rb);
                    } catch (Exception e) {
                        LOG.error(e.getMessage() + " in query: " + initQuery, new Exception(e));
                    }

                    // Make highlighting also available via the CmsSolrResultList
                    queryResponse.setResponse(solrQueryResponse.getValues());

                    highlightEndTime = System.currentTimeMillis();
                }
            }

            result = new CmsSolrResultList(initQuery, queryResponse, solrDocumentList, resourceDocumentList,
                    start, new Integer(rows), end, page, visibleHitCount, new Float(maxScore), startTime,
                    highlightEndTime);
            if (LOG.isDebugEnabled()) {
                Object[] logParams = new Object[] { new Long(System.currentTimeMillis() - startTime),
                        new Long(result.getNumFound()), new Long(solrTime), new Long(processTime),
                        new Long(result.getHighlightEndTime() != 0 ? result.getHighlightEndTime() - startTime
                                : 0) };
                LOG.debug(query.toString() + "\n"
                        + Messages.get().getBundle().key(Messages.LOG_SOLR_SEARCH_EXECUTED_5, logParams));
            }
            if (response != null) {
                writeResp(response, solrQueryRequest, solrQueryResponse);
            }
        } finally {
            if (solrQueryRequest != null) {
                solrQueryRequest.close();
            }
            if (core != null) {
                core.close();
            }
        }
        return result;
    } catch (Exception e) {
        throw new CmsSearchException(Messages.get().container(Messages.LOG_SOLR_ERR_SEARCH_EXECUTION_FAILD_1,
                CmsEncoder.decode(query.toString()), e), e);
    } finally {
        if (solrQueryRequest != null) {
            solrQueryRequest.close();
        }
        // re-set thread to previous priority
        Thread.currentThread().setPriority(previousPriority);
    }

}

From source file:org.phenotips.diagnosis.differentialPhenotypes.PhenotypeSuggestService.java

License:Open Source License

/**
 * Prepare the map of parameters that can be passed to a Solr query, in order to get a list of diseases matching the
 * selected positive and negative phenotypes.
 *
 * @param phenotypes the list of already selected phenotypes
 * @param nphenotypes phenotypes that are not observed in the patient
 * @return the computed Solr query parameters
 *///from ww w  .  j  a  v  a  2s.c o m
private SolrQuery prepareParams(Collection<String> phenotypes, Collection<String> nphenotypes) {
    SolrQuery result = new SolrQuery();
    String q = "symptom:" + StringUtils.join(phenotypes, " symptom:");
    if (!nphenotypes.isEmpty()) {
        q += "  not_symptom:" + StringUtils.join(nphenotypes, " not_symptom:");
    }
    q += " -nameSort:\\** -nameSort:\\+* -nameSort:\\^*";
    result.set(CommonParams.Q, q.replaceAll("HP:", "HP\\\\:"));
    result.set(CommonParams.ROWS, "100");
    result.set(CommonParams.START, "0");
    result.set(CommonParams.DEBUG_QUERY, Boolean.toString(true));
    result.set(CommonParams.EXPLAIN_STRUCT, Boolean.toString(true));

    return result;
}

From source file:org.phenotips.ontology.internal.GeneNomenclature.java

License:Open Source License

@Override
public Set<OntologyTerm> search(Map<String, ?> fieldValues, Map<String, String> queryOptions) {
    try {//  w w  w. j  a  v a2 s.  c  om
        HttpGet method = new HttpGet(
                SEARCH_SERVICE_URL + URLEncoder.encode(generateQuery(fieldValues), Consts.UTF_8.name()));
        method.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_JSON.getMimeType());
        try (CloseableHttpResponse httpResponse = this.client.execute(method)) {
            String response = IOUtils.toString(httpResponse.getEntity().getContent(), Consts.UTF_8);
            JSONObject responseJSON = (JSONObject) JSONSerializer.toJSON(response);
            JSONArray docs = responseJSON.getJSONObject(RESPONSE_KEY).getJSONArray(DATA_KEY);
            if (docs.size() >= 1) {
                Set<OntologyTerm> result = new LinkedHashSet<>();
                // The remote service doesn't offer any query control, manually select the right range
                int start = 0;
                if (queryOptions.containsKey(CommonParams.START)
                        && StringUtils.isNumeric(queryOptions.get(CommonParams.START))) {
                    start = Math.max(0, Integer.parseInt(queryOptions.get(CommonParams.START)));
                }
                int end = docs.size();
                if (queryOptions.containsKey(CommonParams.ROWS)
                        && StringUtils.isNumeric(queryOptions.get(CommonParams.ROWS))) {
                    end = Math.min(end, start + Integer.parseInt(queryOptions.get(CommonParams.ROWS)));
                }

                for (int i = start; i < end; ++i) {
                    result.add(new JSONOntologyTerm(docs.getJSONObject(i), this));
                }
                return result;
                // This is too slow, for the moment only return summaries
                // return getTerms(ids);
            }
        } catch (IOException ex) {
            this.logger.warn("Failed to search gene names: {}", ex.getMessage());
        }
    } catch (UnsupportedEncodingException ex) {
        // This will not happen, UTF-8 is always available
    }
    return Collections.emptySet();
}