List of usage examples for org.apache.solr.common.params CommonParams Q
String Q
To view the source code for org.apache.solr.common.params CommonParams Q.
Click Source Link
From source file:org.dspace.statistics.SolrLoggerServiceImpl.java
License:BSD License
@Override public void shardSolrIndex() throws IOException, SolrServerException { /*/*from w w w.ja va 2 s .c o m*/ Start by faceting by year so we can include each year in a separate core ! */ SolrQuery yearRangeQuery = new SolrQuery(); yearRangeQuery.setQuery("*:*"); yearRangeQuery.setRows(0); yearRangeQuery.setFacet(true); yearRangeQuery.add(FacetParams.FACET_RANGE, "time"); //We go back to 2000 the year 2000, this is a bit overkill but this way we ensure we have everything //The alternative would be to sort but that isn't recommended since it would be a very costly query ! yearRangeQuery.add(FacetParams.FACET_RANGE_START, "NOW/YEAR-" + (Calendar.getInstance().get(Calendar.YEAR) - 2000) + "YEARS"); //Add the +0year to ensure that we DO NOT include the current year yearRangeQuery.add(FacetParams.FACET_RANGE_END, "NOW/YEAR+0YEARS"); yearRangeQuery.add(FacetParams.FACET_RANGE_GAP, "+1YEAR"); yearRangeQuery.add(FacetParams.FACET_MINCOUNT, String.valueOf(1)); //Create a temp directory to store our files in ! File tempDirectory = new File( configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator); tempDirectory.mkdirs(); QueryResponse queryResponse = solr.query(yearRangeQuery); //We only have one range query ! List<RangeFacet.Count> yearResults = queryResponse.getFacetRanges().get(0).getCounts(); for (RangeFacet.Count count : yearResults) { long totalRecords = count.getCount(); //Create a range query from this ! //We start with out current year DCDate dcStart = new DCDate(count.getValue()); Calendar endDate = Calendar.getInstance(); //Advance one year for the start of the next one ! endDate.setTime(dcStart.toDate()); endDate.add(Calendar.YEAR, 1); DCDate dcEndDate = new DCDate(endDate.getTime()); StringBuilder filterQuery = new StringBuilder(); filterQuery.append("time:(["); filterQuery.append(ClientUtils.escapeQueryChars(dcStart.toString())); filterQuery.append(" TO "); filterQuery.append(ClientUtils.escapeQueryChars(dcEndDate.toString())); filterQuery.append("]"); //The next part of the filter query excludes the content from midnight of the next year ! filterQuery.append(" NOT ").append(ClientUtils.escapeQueryChars(dcEndDate.toString())); filterQuery.append(")"); Map<String, String> yearQueryParams = new HashMap<String, String>(); yearQueryParams.put(CommonParams.Q, "*:*"); yearQueryParams.put(CommonParams.ROWS, String.valueOf(10000)); yearQueryParams.put(CommonParams.FQ, filterQuery.toString()); yearQueryParams.put(CommonParams.WT, "csv"); //Start by creating a new core String coreName = "statistics-" + dcStart.getYear(); HttpSolrServer statisticsYearServer = createCore(solr, coreName); System.out.println("Moving: " + totalRecords + " into core " + coreName); log.info("Moving: " + totalRecords + " records into core " + coreName); List<File> filesToUpload = new ArrayList<File>(); for (int i = 0; i < totalRecords; i += 10000) { String solrRequestUrl = solr.getBaseURL() + "/select"; solrRequestUrl = generateURL(solrRequestUrl, yearQueryParams); HttpGet get = new HttpGet(solrRequestUrl); HttpResponse response = new DefaultHttpClient().execute(get); InputStream csvInputstream = response.getEntity().getContent(); //Write the csv ouput to a file ! File csvFile = new File(tempDirectory.getPath() + File.separatorChar + "temp." + dcStart.getYear() + "." + i + ".csv"); FileUtils.copyInputStreamToFile(csvInputstream, csvFile); filesToUpload.add(csvFile); //Add 10000 & start over again yearQueryParams.put(CommonParams.START, String.valueOf((i + 10000))); } for (File tempCsv : filesToUpload) { //Upload the data in the csv files to our new solr core ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest( "/update/csv"); contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8"); contentStreamUpdateRequest.setParam("skip", "_version_"); contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8"); statisticsYearServer.request(contentStreamUpdateRequest); } statisticsYearServer.commit(true, true); //Delete contents of this year from our year query ! solr.deleteByQuery(filterQuery.toString()); solr.commit(true, true); log.info("Moved " + totalRecords + " records into core: " + coreName); } FileUtils.deleteDirectory(tempDirectory); }
From source file:org.dspace.statistics.SolrLoggerServiceImpl.java
License:BSD License
@Override public void reindexBitstreamHits(boolean removeDeletedBitstreams) throws Exception { Context context = new Context(); try {//from w ww.j av a 2 s .c o m //First of all retrieve the total number of records to be updated SolrQuery query = new SolrQuery(); query.setQuery("*:*"); query.addFilterQuery("type:" + Constants.BITSTREAM); //Only retrieve records which do not have a bundle name query.addFilterQuery("-bundleName:[* TO *]"); query.setRows(0); addAdditionalSolrYearCores(query); long totalRecords = solr.query(query).getResults().getNumFound(); File tempDirectory = new File( configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator); tempDirectory.mkdirs(); List<File> tempCsvFiles = new ArrayList<File>(); for (int i = 0; i < totalRecords; i += 10000) { Map<String, String> params = new HashMap<String, String>(); params.put(CommonParams.Q, "*:*"); params.put(CommonParams.FQ, "-bundleName:[* TO *] AND type:" + Constants.BITSTREAM); params.put(CommonParams.WT, "csv"); params.put(CommonParams.ROWS, String.valueOf(10000)); params.put(CommonParams.START, String.valueOf(i)); String solrRequestUrl = solr.getBaseURL() + "/select"; solrRequestUrl = generateURL(solrRequestUrl, params); HttpGet get = new HttpGet(solrRequestUrl); HttpResponse response = new DefaultHttpClient().execute(get); InputStream csvOutput = response.getEntity().getContent(); Reader csvReader = new InputStreamReader(csvOutput); List<String[]> rows = new CSVReader(csvReader).readAll(); String[][] csvParsed = rows.toArray(new String[rows.size()][]); String[] header = csvParsed[0]; //Attempt to find the bitstream id index ! int idIndex = 0; for (int j = 0; j < header.length; j++) { if (header[j].equals("id")) { idIndex = j; } } File tempCsv = new File(tempDirectory.getPath() + File.separatorChar + "temp." + i + ".csv"); tempCsvFiles.add(tempCsv); CSVWriter csvp = new CSVWriter(new FileWriter(tempCsv)); //csvp.setAlwaysQuote(false); //Write the header ! csvp.writeNext((String[]) ArrayUtils.add(header, "bundleName")); Map<String, String> bitBundleCache = new HashMap<>(); //Loop over each line (skip the headers though)! for (int j = 1; j < csvParsed.length; j++) { String[] csvLine = csvParsed[j]; //Write the default line ! String bitstreamId = csvLine[idIndex]; //Attempt to retrieve our bundle name from the cache ! String bundleName = bitBundleCache.get(bitstreamId); if (bundleName == null) { //Nothing found retrieve the bitstream Bitstream bitstream = bitstreamService.findByIdOrLegacyId(context, bitstreamId); //Attempt to retrieve our bitstream ! if (bitstream != null) { List<Bundle> bundles = bitstream.getBundles(); if (bundles != null && 0 < bundles.size()) { Bundle bundle = bundles.get(0); bundleName = bundle.getName(); } else { //No bundle found, we are either a collection or a community logo, check for it ! DSpaceObject parentObject = bitstreamService.getParentObject(context, bitstream); if (parentObject instanceof Collection) { bundleName = "LOGO-COLLECTION"; } else if (parentObject instanceof Community) { bundleName = "LOGO-COMMUNITY"; } } //Cache the bundle name bitBundleCache.put(bitstream.getID().toString(), bundleName); //Remove the bitstream from cache } //Check if we don't have a bundlename //If we don't have one & we do not need to delete the deleted bitstreams ensure that a BITSTREAM_DELETED bundle name is given ! if (bundleName == null && !removeDeletedBitstreams) { bundleName = "BITSTREAM_DELETED"; } } csvp.writeNext((String[]) ArrayUtils.add(csvLine, bundleName)); } //Loop over our parsed csv csvp.flush(); csvp.close(); } //Add all the separate csv files for (File tempCsv : tempCsvFiles) { ContentStreamUpdateRequest contentStreamUpdateRequest = new ContentStreamUpdateRequest( "/update/csv"); contentStreamUpdateRequest.setParam("stream.contentType", "text/plain;charset=utf-8"); contentStreamUpdateRequest.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); contentStreamUpdateRequest.addFile(tempCsv, "text/plain;charset=utf-8"); solr.request(contentStreamUpdateRequest); } //Now that all our new bitstream stats are in place, delete all the old ones ! solr.deleteByQuery("-bundleName:[* TO *] AND type:" + Constants.BITSTREAM); //Commit everything to wrap up solr.commit(true, true); //Clean up our directory ! FileUtils.deleteDirectory(tempDirectory); } catch (Exception e) { log.error("Error while updating the bitstream statistics", e); throw e; } finally { context.abort(); } }
From source file:org.dspace.statistics.SolrLoggerServiceImpl.java
License:BSD License
@Override public void exportHits() throws Exception { Context context = new Context(); File tempDirectory = new File( configurationService.getProperty("dspace.dir") + File.separator + "temp" + File.separator); tempDirectory.mkdirs();/*from www.ja va 2s. c o m*/ try { //First of all retrieve the total number of records to be updated SolrQuery query = new SolrQuery(); query.setQuery("*:*"); ModifiableSolrParams solrParams = new ModifiableSolrParams(); solrParams.set(CommonParams.Q, "statistics_type:view OR (*:* AND -statistics_type:*)"); solrParams.set(CommonParams.WT, "javabin"); solrParams.set(CommonParams.ROWS, String.valueOf(10000)); addAdditionalSolrYearCores(query); long totalRecords = solr.query(query).getResults().getNumFound(); System.out.println("There are " + totalRecords + " usage events in SOLR for download/view."); for (int i = 0; i < totalRecords; i += 10000) { solrParams.set(CommonParams.START, String.valueOf(i)); QueryResponse queryResponse = solr.query(solrParams); SolrDocumentList docs = queryResponse.getResults(); File exportOutput = new File( tempDirectory.getPath() + File.separatorChar + "usagestats_" + i + ".csv"); exportOutput.delete(); //export docs addDocumentsToFile(context, docs, exportOutput); System.out.println("Export hits [" + i + " - " + String.valueOf(i + 9999) + "] to " + exportOutput.getCanonicalPath()); } } catch (Exception e) { log.error("Error while exporting SOLR data", e); throw e; } finally { context.abort(); } }
From source file:org.eclipse.orion.internal.server.search.grep.GrepServlet.java
License:Open Source License
private SearchOptions buildSearchOptions(HttpServletRequest req, HttpServletResponse resp) { SearchOptions options = new SearchOptions(); String queryString = getEncodedParameter(req, CommonParams.Q); if (queryString == null) return null; if (queryString.length() > 0) { //divide into search terms delimited by space or plus ('+') character List<String> terms = new ArrayList<String>(Arrays.asList(queryString.split("[\\s\\+]+"))); //$NON-NLS-1$ while (!terms.isEmpty()) { String term = terms.remove(0); if (term.length() == 0) continue; if (isSearchField(term)) { if (term.startsWith("NameLower:")) { //$NON-NLS-1$ //decode the search term, we do not want to decode the location try { term = URLDecoder.decode(term, "UTF-8"); //$NON-NLS-1$ } catch (UnsupportedEncodingException e) { //try with encoded term }//from ww w . j a v a2 s . co m options.setIsCaseSensitive(false); options.setFilenamePattern(term.substring(10)); } else if (term.startsWith("Location:")) { //$NON-NLS-1${ String scope = term.substring(9 + req.getContextPath().length()); try { scope = URLDecoder.decode(scope, "UTF-8"); //$NON-NLS-1$ } catch (UnsupportedEncodingException e) { //try with encoded term } options.setScope(scope); continue; } else if (term.startsWith("Name:")) { //$NON-NLS-1$ try { term = URLDecoder.decode(term, "UTF-8"); //$NON-NLS-1$ } catch (UnsupportedEncodingException e) { //try with encoded term } options.setIsCaseSensitive(true); options.setFilenamePattern(term.substring(5)); } } else { //decode the term string now try { term = URLDecoder.decode(term, "UTF-8"); //$NON-NLS-1$ } catch (UnsupportedEncodingException e) { //try with encoded term } options.setSearchTerm(term); options.setFileSearch(true); } } } String login = req.getRemoteUser(); options.setUsername(login); return options; }
From source file:org.eclipse.orion.internal.server.search.SearchServlet.java
License:Open Source License
private SolrQuery buildSolrQuery(HttpServletRequest req) { SolrQuery query = new SolrQuery(); query.setParam(CommonParams.WT, "json"); query.setParam(CommonParams.FL, "Id,Name,Length,Directory,LastModified,Location"); query.setParam("hl", "true"); String queryString = req.getParameter(CommonParams.Q); queryString += " AND " + ProtocolConstants.KEY_USER_NAME + ':' + req.getRemoteUser(); query.setQuery(queryString);//from w ww . j a v a 2s . co m return query; }
From source file:org.eclipse.orion.internal.server.search.SearchServlet.java
License:Open Source License
/** * Writes the response to the search query to the HTTP response's output stream. *//* w w w . j av a2 s . c om*/ private void writeResponse(SolrQuery query, HttpServletRequest httpRequest, HttpServletResponse httpResponse, QueryResponse queryResponse) throws IOException { SolrCore core = SearchActivator.getInstance().getSolrCore(); //this seems to be the only way to obtain the JSON response representation SolrQueryRequest solrRequest = new LocalSolrQueryRequest(core, query.toNamedList()); SolrQueryResponse solrResponse = new SolrQueryResponse(); //bash the query in the response to remove user info NamedList<Object> params = (NamedList<Object>) queryResponse.getHeader().get("params"); //$NON-NLS-1$ params.remove(CommonParams.Q); params.add(CommonParams.Q, httpRequest.getParameter(CommonParams.Q)); solrResponse.setAllValues(queryResponse.getResponse()); QueryResponseWriter writer = core.getQueryResponseWriter("json"); //$NON-NLS-1$ writer.write(httpResponse.getWriter(), solrRequest, solrResponse); }
From source file:org.mitre.opensextant.extraction.SolrGazetteer.java
License:Apache License
/** *//*from w w w.j av a 2 s.c o m*/ private void initialize() throws IOException { java.io.InputStream io = SolrGazetteer.class.getResourceAsStream("/country-names-2013.csv"); java.io.Reader countryIO = new InputStreamReader(io); CsvMapReader countryMap = new CsvMapReader(countryIO, CsvPreference.EXCEL_PREFERENCE); String[] columns = countryMap.getHeader(true); Map<String, String> country_names = null; while ((country_names = countryMap.read(columns)) != null) { String n = country_names.get("country_name"); String cc = country_names.get("ISO2_cc"); String fips = country_names.get("FIPS_cc"); iso2fips.put(cc, fips); fips2iso.put(fips, cc); if (n == null || cc == null) { continue; } // FIPS could be *, but as long as we use ISO2, we're fine. if ("*".equals(cc)){ cc = fips.toUpperCase(); } // Normalize: "US" => "united states of america" _default_country_names.put(cc.toUpperCase(), n.toLowerCase()); } if (_default_country_names.isEmpty()) { throw new IOException("No data found in country name map"); } String config_solr_home = System.getProperty("solr.solr.home"); solr = new SolrProxy(config_solr_home, "gazetteer"); params.set(CommonParams.Q, "*:*"); params.set(CommonParams.FL, "id,name,cc,adm1,adm2,feat_class,feat_code,lat,lon,place_id,name_bias,id_bias,name_type"); try { loadCountries(); } catch (SolrServerException loadErr) { throw new IOException(loadErr); } }
From source file:org.opencms.main.OpenCmsSolrHandler.java
License:Open Source License
/** * @see org.opencms.main.I_CmsRequestHandler#handle(javax.servlet.http.HttpServletRequest, javax.servlet.http.HttpServletResponse, java.lang.String) *//* www . java 2 s.c om*/ public void handle(HttpServletRequest req, HttpServletResponse res, String name) throws IOException { final HANDLER_NAMES handlerName = HANDLER_NAMES.valueOf(name); if (handlerName != null) { try { Context context = initializeRequest(req, res); if ((context.m_params.get(CommonParams.Q) != null) || (context.m_params.get(CommonParams.FQ) != null)) { switch (handlerName) { case SolrSelect: context.m_index.select(res, context.m_cms, context.m_query, true); break; case SolrSpell: context.m_index.spellCheck(res, context.m_cms, context.m_query); break; default: break; } } } catch (Exception e) { res.setStatus(HttpServletResponse.SC_EXPECTATION_FAILED); String message = Messages.get().getBundle().key(Messages.GUI_SOLR_UNEXPECTED_ERROR_0); String formattedException = CmsException.getStackTraceAsString(e).replace("\n", "<br/>"); res.getWriter().println(Messages.get().getBundle().key(Messages.GUI_SOLR_ERROR_HTML_1, message + formattedException)); } } }
From source file:org.opensextant.extractors.geo.SolrGazetteer.java
License:Apache License
/** * For larger areas choose a higher number of Rows to return. If you choose * to use Solr spatial score-by-distance for sorting or anything, then Solr * appears to want to load entire index into memory. So this sort mechanism * is off by default.// w w w .j a v a 2 s. co m * * @param rows * rows to include in spatial lookups * @return solr params */ protected static ModifiableSolrParams createGeodeticLookupParams(int rows) { /* * Basic parameters for geospatial lookup. These are reused, and only pt * and d are set for each lookup. * */ ModifiableSolrParams p = new ModifiableSolrParams(); p.set(CommonParams.FL, "id,name,cc,adm1,adm2,feat_class,feat_code," + "geo,place_id,name_bias,id_bias,name_type"); p.set(CommonParams.ROWS, rows); p.set(CommonParams.Q, "{!geofilt sfield=geo}"); // p.set(CommonParams.SORT, "score desc"); p.set("spatial", "true"); return p; }
From source file:org.opensextant.extractors.geo.SolrGazetteer.java
License:Apache License
/** * Initialize. Cascading env variables: First use value from constructor, * then opensextant.solr, then solr.solr.home * * @throws ConfigException// w ww . j av a2 s . c o m * Signals that a configuration exception has occurred. */ private void initialize(String solrHome) throws ConfigException { solr = solrHome != null ? new SolrProxy(solrHome, "gazetteer") : new SolrProxy("gazetteer"); params.set(CommonParams.Q, "*:*"); params.set(CommonParams.FL, "id,name,cc,adm1,adm2,feat_class,feat_code,geo,place_id,name_bias,id_bias,name_type"); try { this.countryCodes = loadCountries(solr.getInternalSolrServer()); } catch (SolrServerException loadErr) { throw new ConfigException("SolrGazetteer is unable to load countries due to Solr error", loadErr); } catch (IOException ioErr) { throw new ConfigException("SolrGazetteer is unable to load countries due to IO/file error", ioErr); } }