List of usage examples for org.apache.solr.client.solrj SolrQuery getQuery
public String getQuery()
From source file:at.pagu.soldockr.core.QueryParserTest.java
License:Apache License
@Test public void testWithSimpleStringCriteria() { SimpleStringCriteria criteria = new SimpleStringCriteria("field_1:value_1"); Query query = new SimpleQuery(criteria); SolrQuery solrQuery = queryParser.constructSolrQuery(query); Assert.assertNotNull(solrQuery);//from w ww. j a va 2 s .c o m assertQueryStringPresent(solrQuery); assertPaginationNotPresent(solrQuery); assertProjectionNotPresent(solrQuery); assertGroupingNotPresent(solrQuery); assertFactingNotPresent(solrQuery); Assert.assertEquals(criteria.getQueryString(), solrQuery.getQuery()); }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Writes the index fields to the supplied output stream in CSV format. * <p>/*w ww. ja v a 2 s. co m*/ * DM: refactored to split the query by month to improve performance. * Further enhancements possible: * 1) Multi threaded * 2) More filtering, by year or decade.. * * @param downloadParams * @param out * @param includeSensitive * @param dd The details of the download * @param checkLimit * @param nextExecutor The ExecutorService to use to process results on different threads * @throws Exception */ @Override public ConcurrentMap<String, AtomicInteger> writeResultsFromIndexToStream( final DownloadRequestParams downloadParams, final OutputStream out, final boolean includeSensitive, final DownloadDetailsDTO dd, boolean checkLimit, final ExecutorService nextExecutor) throws Exception { expandRequestedFields(downloadParams, true); if (dd != null) { dd.resetCounts(); } long start = System.currentTimeMillis(); final ConcurrentMap<String, AtomicInteger> uidStats = new ConcurrentHashMap<>(); getServer(); try { SolrQuery solrQuery = new SolrQuery(); queryFormatUtils.formatSearchQuery(downloadParams); String dFields = downloadParams.getFields(); if (includeSensitive) { //include raw latitude and longitudes if (dFields.contains("decimalLatitude.p")) { dFields = dFields.replaceFirst("decimalLatitude.p", "sensitive_latitude,sensitive_longitude,decimalLatitude.p"); } else if (dFields.contains("decimalLatitude")) { dFields = dFields.replaceFirst("decimalLatitude", "sensitive_latitude,sensitive_longitude,decimalLatitude"); } if (dFields.contains(",locality,")) { dFields = dFields.replaceFirst(",locality,", ",locality,sensitive_locality,"); } if (dFields.contains(",locality.p,")) { dFields = dFields.replaceFirst(",locality.p,", ",locality.p,sensitive_locality,"); } } StringBuilder sb = new StringBuilder(dFields); if (!downloadParams.getExtra().isEmpty()) { sb.append(",").append(downloadParams.getExtra()); } String[] requestedFields = sb.toString().split(","); List<String>[] indexedFields; if (downloadFields == null) { //default to include everything java.util.List<String> mappedNames = new java.util.LinkedList<String>(); for (int i = 0; i < requestedFields.length; i++) mappedNames.add(requestedFields[i]); indexedFields = new List[] { mappedNames, new java.util.LinkedList<String>(), mappedNames, mappedNames, new ArrayList(), new ArrayList() }; } else { indexedFields = downloadFields.getIndexFields(requestedFields, downloadParams.getDwcHeaders(), downloadParams.getLayersServiceUrl()); } //apply custom header String[] customHeader = dd.getRequestParams().getCustomHeader().split(","); for (int i = 0; i + 1 < customHeader.length; i += 2) { for (int j = 0; j < indexedFields[0].size(); j++) { if (customHeader[i].equals(indexedFields[0].get(j))) { indexedFields[2].set(j, customHeader[i + 1]); } } for (int j = 0; j < indexedFields[4].size(); j++) { if (customHeader[i].equals(indexedFields[5].get(j))) { indexedFields[4].set(j, customHeader[i + 1]); } } } if (logger.isDebugEnabled()) { logger.debug("Fields included in download: " + indexedFields[0]); logger.debug("Fields excluded from download: " + indexedFields[1]); logger.debug("The headers in downloads: " + indexedFields[2]); logger.debug("Analysis headers: " + indexedFields[4]); logger.debug("Analysis fields: " + indexedFields[5]); } //set the fields to the ones that are available in the index String[] fields = indexedFields[0].toArray(new String[] {}); solrQuery.setFields(fields); StringBuilder qasb = new StringBuilder(); if (!"none".equals(downloadParams.getQa())) { solrQuery.addField("assertions"); if (!"all".equals(downloadParams.getQa()) && !"includeall".equals(downloadParams.getQa())) { //add all the qa fields qasb.append(downloadParams.getQa()); } } solrQuery.addField("institution_uid").addField("collection_uid").addField("data_resource_uid") .addField("data_provider_uid"); solrQuery.setQuery(downloadParams.getFormattedQuery()); solrQuery.setFacetMinCount(1); solrQuery.setFacetLimit(-1); //get the assertion facets to add them to the download fields boolean getAssertionsFromFacets = "all".equals(downloadParams.getQa()) || "includeall".equals(downloadParams.getQa()); SolrQuery monthAssertionsQuery = getAssertionsFromFacets ? solrQuery.getCopy().addFacetField("month", "assertions") : solrQuery.getCopy().addFacetField("month"); if (getAssertionsFromFacets) { //set the order for the facet to be based on the index - this will force the assertions to be returned in the same order each time //based on alphabetical sort. The number of QA's may change between searches so we can't guarantee that the order won't change monthAssertionsQuery.add("f.assertions.facet.sort", "index"); } QueryResponse facetQuery = runSolrQuery(monthAssertionsQuery, downloadParams.getFormattedFq(), 0, 0, "score", "asc"); //set the totalrecords for the download details dd.setTotalRecords(facetQuery.getResults().getNumFound()); //use a separately configured and smaller limit when output will be unzipped final long maxDownloadSize; if (MAX_DOWNLOAD_SIZE > unzippedLimit && out instanceof OptionalZipOutputStream && ((OptionalZipOutputStream) out).getType() == OptionalZipOutputStream.Type.unzipped) { maxDownloadSize = unzippedLimit; } else { maxDownloadSize = MAX_DOWNLOAD_SIZE; } if (checkLimit && dd.getTotalRecords() < maxDownloadSize) { checkLimit = false; } //get the month facets to add them to the download fields get the assertion facets. List<Count> splitByFacet = null; for (FacetField facet : facetQuery.getFacetFields()) { if (facet.getName().equals("assertions") && facet.getValueCount() > 0) { qasb.append(getQAFromFacet(facet)); } if (facet.getName().equals("month") && facet.getValueCount() > 0) { splitByFacet = facet.getValues(); } } if ("includeall".equals(downloadParams.getQa())) { qasb = getAllQAFields(); } String qas = qasb.toString(); //include sensitive fields in the header when the output will be partially sensitive final String[] sensitiveFields; final String[] notSensitiveFields; if (dd.getSensitiveFq() != null) { List<String>[] sensitiveHdr = downloadFields.getIndexFields(sensitiveSOLRHdr, downloadParams.getDwcHeaders(), downloadParams.getLayersServiceUrl()); //header for the output file indexedFields[2].addAll(sensitiveHdr[2]); //lookup for fields from sensitive queries sensitiveFields = org.apache.commons.lang3.ArrayUtils.addAll( indexedFields[0].toArray(new String[] {}), sensitiveHdr[0].toArray(new String[] {})); //use general fields when sensitive data is not permitted notSensitiveFields = org.apache.commons.lang3.ArrayUtils .addAll(indexedFields[0].toArray(new String[] {}), notSensitiveSOLRHdr); } else { sensitiveFields = new String[0]; notSensitiveFields = fields; } //add analysis headers indexedFields[2].addAll(indexedFields[4]); final String[] analysisFields = indexedFields[5].toArray(new String[0]); final String[] qaFields = qas.equals("") ? new String[] {} : qas.split(","); String[] qaTitles = downloadFields.getHeader(qaFields, false, false); String[] header = org.apache.commons.lang3.ArrayUtils.addAll(indexedFields[2].toArray(new String[] {}), qaTitles); //retain output header fields and field names for inclusion of header info in the download StringBuilder infoFields = new StringBuilder("infoFields"); for (String h : indexedFields[3]) infoFields.append(",").append(h); for (String h : qaFields) infoFields.append(",").append(h); StringBuilder infoHeader = new StringBuilder("infoHeaders"); for (String h : header) infoHeader.append(",").append(h); String info = infoFields.toString(); while (info.contains(",,")) info = info.replace(",,", ","); uidStats.put(info, new AtomicInteger(-1)); String hdr = infoHeader.toString(); while (hdr.contains(",,")) hdr = hdr.replace(",,", ","); uidStats.put(hdr, new AtomicInteger(-2)); //construct correct RecordWriter based on the supplied fileType final RecordWriterError rw = downloadParams.getFileType().equals("csv") ? new CSVRecordWriter(out, header, downloadParams.getSep(), downloadParams.getEsc()) : (downloadParams.getFileType().equals("tsv") ? new TSVRecordWriter(out, header) : new ShapeFileRecordWriter(tmpShapefileDir, downloadParams.getFile(), out, (String[]) ArrayUtils.addAll(fields, qaFields))); // Requirement to be able to propagate interruptions to all other threads for this execution // Doing this via this variable final AtomicBoolean interruptFound = dd != null ? dd.getInterrupt() : new AtomicBoolean(false); // Create a fixed length blocking queue for buffering results before they are written // This also creates a push-back effect to throttle the results generating threads // when it fills and offers to it are delayed until the writer consumes elements from the queue final BlockingQueue<String[]> queue = new ArrayBlockingQueue<>(resultsQueueLength); // Create a sentinel that we can check for reference equality to signal the end of the queue final String[] sentinel = new String[0]; // An implementation of RecordWriter that adds to an in-memory queue final RecordWriter concurrentWrapper = new RecordWriter() { private AtomicBoolean finalised = new AtomicBoolean(false); private AtomicBoolean finalisedComplete = new AtomicBoolean(false); @Override public void write(String[] nextLine) { try { if (Thread.currentThread().isInterrupted() || interruptFound.get() || finalised.get()) { finalise(); return; } while (!queue.offer(nextLine, writerTimeoutWaitMillis, TimeUnit.MILLISECONDS)) { if (Thread.currentThread().isInterrupted() || interruptFound.get() || finalised.get()) { finalise(); break; } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); if (logger.isDebugEnabled()) { logger.debug( "Queue failed to accept the next record due to a thread interrupt, calling finalise the cleanup: ", e); } // If we were interrupted then we should call finalise to cleanup finalise(); } } @Override public void finalise() { if (finalised.compareAndSet(false, true)) { try { // Offer the sentinel at least once, even when the thread is interrupted while (!queue.offer(sentinel, writerTimeoutWaitMillis, TimeUnit.MILLISECONDS)) { // If the thread is interrupted then the queue may not have any active consumers, // so don't loop forever waiting for capacity in this case // The hard shutdown phase will use queue.clear to ensure that the // sentinel gets onto the queue at least once if (Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); if (logger.isDebugEnabled()) { logger.debug( "Queue failed to accept the sentinel in finalise due to a thread interrupt: ", e); } } finally { finalisedComplete.set(true); } } } @Override public boolean finalised() { return finalisedComplete.get(); } }; // A single thread that consumes elements put onto the queue until it sees the sentinel, finalising after the sentinel or an interrupt Runnable writerRunnable = new Runnable() { @Override public void run() { try { long counter = 0; while (true) { counter = counter + 1; if (Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } String[] take = queue.take(); // Sentinel object equality check to see if we are done if (take == sentinel || Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } // Otherwise write to the wrapped record writer rw.write(take); //test for errors. This can contain a flush so only test occasionally if (counter % resultsQueueLength == 0 && rw.hasError()) { throw RecordWriterException.newRecordWriterException(dd, downloadParams, true, rw); } } } catch (RecordWriterException e) { //no trace information is available to print for these errors logger.error(e.getMessage()); interruptFound.set(true); } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); } catch (Exception e) { // Reuse interruptFound variable to signal that the writer had issues interruptFound.set(true); logger.error("Download writer failed.", e); } finally { rw.finalise(); } } }; Thread writerThread = new Thread(writerRunnable); writerThread.start(); try { if (rw instanceof ShapeFileRecordWriter) { dd.setHeaderMap(((ShapeFileRecordWriter) rw).getHeaderMappings()); } //order the query by _docid_ for faster paging solrQuery.addSortField("_docid_", ORDER.asc); //for each month create a separate query that pages through 500 records per page List<SolrQuery> queries = new ArrayList<SolrQuery>(); if (splitByFacet != null) { for (Count facet : splitByFacet) { if (facet.getCount() > 0) { SolrQuery splitByFacetQuery; //do not add remainderQuery here if (facet.getName() != null) { splitByFacetQuery = solrQuery.getCopy() .addFilterQuery(facet.getFacetField().getName() + ":" + facet.getName()); splitByFacetQuery.setFacet(false); queries.add(splitByFacetQuery); } } } if (splitByFacet.size() > 0) { SolrQuery remainderQuery = solrQuery.getCopy() .addFilterQuery("-" + splitByFacet.get(0).getFacetField().getName() + ":[* TO *]"); queries.add(0, remainderQuery); } } else { queries.add(0, solrQuery); } //split into sensitive and non-sensitive queries when // - not including all sensitive values // - there is a sensitive fq final List<SolrQuery> sensitiveQ = new ArrayList<SolrQuery>(); if (!includeSensitive && dd.getSensitiveFq() != null) { sensitiveQ.addAll( splitQueries(queries, dd.getSensitiveFq(), sensitiveSOLRHdr, notSensitiveSOLRHdr)); } //Set<Future<Integer>> futures = new HashSet<Future<Integer>>(); final AtomicInteger resultsCount = new AtomicInteger(0); final boolean threadCheckLimit = checkLimit; List<Callable<Integer>> solrCallables = new ArrayList<>(queries.size()); // execute each query, writing the results to stream for (final SolrQuery splitByFacetQuery : queries) { // define a thread Callable<Integer> solrCallable = new Callable<Integer>() { @Override public Integer call() throws Exception { int startIndex = 0; // Randomise the wakeup time so they don't all wakeup on a periodic cycle long localThrottle = throttle + Math.round(Math.random() * throttle); String[] fq = downloadParams.getFormattedFq(); if (splitByFacetQuery.getFilterQueries() != null && splitByFacetQuery.getFilterQueries().length > 0) { if (fq == null) { fq = new String[0]; } fq = org.apache.commons.lang3.ArrayUtils.addAll(fq, splitByFacetQuery.getFilterQueries()); } QueryResponse qr = runSolrQuery(splitByFacetQuery, fq, downloadBatchSize, startIndex, "_docid_", "asc"); AtomicInteger recordsForThread = new AtomicInteger(0); if (logger.isDebugEnabled()) { logger.debug( splitByFacetQuery.getQuery() + " - results: " + qr.getResults().size()); } while (qr != null && !qr.getResults().isEmpty() && !interruptFound.get()) { if (logger.isDebugEnabled()) { logger.debug( "Start index: " + startIndex + ", " + splitByFacetQuery.getQuery()); } int count = 0; if (sensitiveQ.contains(splitByFacetQuery)) { count = processQueryResults(uidStats, sensitiveFields, qaFields, concurrentWrapper, qr, dd, threadCheckLimit, resultsCount, maxDownloadSize, analysisFields); } else { // write non-sensitive values into sensitive fields when not authorised for their sensitive values count = processQueryResults(uidStats, notSensitiveFields, qaFields, concurrentWrapper, qr, dd, threadCheckLimit, resultsCount, maxDownloadSize, analysisFields); } recordsForThread.addAndGet(count); startIndex += downloadBatchSize; // we have already set the Filter query the first time the query was constructed // rerun with the same params but different startIndex if (!threadCheckLimit || resultsCount.get() < maxDownloadSize) { if (!threadCheckLimit) { // throttle the download by sleeping Thread.sleep(localThrottle); } qr = runSolrQuery(splitByFacetQuery, null, downloadBatchSize, startIndex, "_docid_", "asc"); } else { qr = null; } } return recordsForThread.get(); } }; solrCallables.add(solrCallable); } List<Future<Integer>> futures = new ArrayList<>(solrCallables.size()); for (Callable<Integer> nextCallable : solrCallables) { futures.add(nextExecutor.submit(nextCallable)); } // Busy wait because we need to be able to respond to an interrupt on any callable // and propagate it to all of the others for this particular query // Because the executor service is shared to prevent too many concurrent threads being run, // this requires a busy wait loop on the main thread to monitor state boolean waitAgain = false; do { waitAgain = false; for (Future<Integer> future : futures) { if (!future.isDone()) { // Wait again even if an interrupt flag is set, as it may have been set partway through the iteration // The calls to future.cancel will occur next time if the interrupt is setup partway through an iteration waitAgain = true; // If one thread finds an interrupt it is propagated to others using the interruptFound AtomicBoolean if (interruptFound.get()) { future.cancel(true); } } } // Don't trigger the timeout interrupt if we don't have to wait again as we are already done at this point if (waitAgain && (System.currentTimeMillis() - start) > downloadMaxTime) { interruptFound.set(true); break; } if (waitAgain) { Thread.sleep(downloadCheckBusyWaitSleep); } } while (waitAgain); AtomicInteger totalDownload = new AtomicInteger(0); for (Future<Integer> future : futures) { if (future.isDone()) { totalDownload.addAndGet(future.get()); } else { // All incomplete futures that survived the loop above are cancelled here future.cancel(true); } } long finish = System.currentTimeMillis(); long timeTakenInSecs = (finish - start) / 1000; if (timeTakenInSecs <= 0) timeTakenInSecs = 1; if (logger.isInfoEnabled()) { logger.info("Download of " + resultsCount + " records in " + timeTakenInSecs + " seconds. Record/sec: " + resultsCount.intValue() / timeTakenInSecs); } } finally { try { // Once we get here, we need to finalise starting at the concurrent wrapper, // as there are no more non-sentinel records to be added to the queue // This eventually triggers finalisation of the underlying writer when the queue empties // This is a soft shutdown, and hence we wait below for this stage to complete in normal circumstances // Note, this blocks for writerTimeoutWaitMillis trying to legitimately add the sentinel to the end of the queue // We force the sentinel to be added in the hard shutdown phase below concurrentWrapper.finalise(); } finally { try { // Track the current time right now so we can abort after downloadMaxCompletionTime milliseconds in this phase final long completionStartTime = System.currentTimeMillis(); // Busy wait check for finalised to be called in the RecordWriter or something is interrupted // By this stage, there are at maximum download.internal.queue.size items remaining (default 1000) while (writerThread.isAlive() && !writerThread.isInterrupted() && !interruptFound.get() && !Thread.currentThread().isInterrupted() && !rw.finalised() && !((System.currentTimeMillis() - completionStartTime) > downloadMaxCompletionTime)) { Thread.sleep(downloadCheckBusyWaitSleep); } } finally { try { // Attempt all actions that could trigger the writer thread to finalise, as by this stage we are in hard shutdown mode // Signal that we are in hard shutdown mode interruptFound.set(true); // Add the sentinel or clear the queue and try again until it gets onto the queue // We are in hard shutdown mode, so only priority is that the queue either // gets the sentinel or the thread is interrupted to clean up resources while (!queue.offer(sentinel)) { queue.clear(); } // Interrupt the single writer thread writerThread.interrupt(); // Explicitly call finalise on the RecordWriter as a backup // In normal circumstances it is called via the sentinel or the interrupt // This will not block if finalise has been called previously in the current three implementations rw.finalise(); } finally { if (rw != null && rw.hasError()) { throw RecordWriterException.newRecordWriterException(dd, downloadParams, true, rw); } else { // Flush whatever output was still pending for more deterministic debugging out.flush(); } } } } } } catch (SolrServerException ex) { logger.error("Problem communicating with SOLR server while processing download. " + ex.getMessage(), ex); } return uidStats; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Get a distinct list of species and their counts using a facet search * * @param queryString/* w ww . j a va 2s . co m*/ * @param pageSize * @param sortField * @param sortDirection * @return * @throws SolrServerException */ protected List<TaxaCountDTO> getSpeciesCounts(String queryString, List<String> filterQueries, List<String> facetFields, Integer pageSize, Integer startIndex, String sortField, String sortDirection) throws SolrServerException { List<TaxaCountDTO> speciesCounts = new ArrayList<TaxaCountDTO>(); SolrQuery solrQuery = new SolrQuery(); solrQuery.setQueryType("standard"); solrQuery.setQuery(queryString); if (filterQueries != null && filterQueries.size() > 0) { //solrQuery.addFilterQuery("(" + StringUtils.join(filterQueries, " OR ") + ")"); for (String fq : filterQueries) { solrQuery.addFilterQuery(fq); } } solrQuery.setRows(0); solrQuery.setFacet(true); solrQuery.setFacetSort(sortField); for (String facet : facetFields) { solrQuery.addFacetField(facet); if (logger.isDebugEnabled()) { logger.debug("adding facetField: " + facet); } } //set the facet starting point based on the paging information solrQuery.setFacetMinCount(1); solrQuery.setFacetLimit(pageSize); // unlimited = -1 | pageSize solrQuery.add("facet.offset", Integer.toString(startIndex)); if (logger.isDebugEnabled()) { logger.debug("getSpeciesCount query :" + solrQuery.getQuery()); } QueryResponse qr = runSolrQuery(solrQuery, null, 1, 0, "score", sortDirection); if (logger.isInfoEnabled()) { logger.info("SOLR query: " + solrQuery.getQuery() + "; total hits: " + qr.getResults().getNumFound()); } List<FacetField> facets = qr.getFacetFields(); java.util.regex.Pattern p = java.util.regex.Pattern.compile("\\|"); if (facets != null && facets.size() > 0) { if (logger.isDebugEnabled()) { logger.debug("Facets: " + facets.size() + "; facet #1: " + facets.get(0).getName()); } for (FacetField facet : facets) { List<FacetField.Count> facetEntries = facet.getValues(); if ((facetEntries != null) && (facetEntries.size() > 0)) { for (FacetField.Count fcount : facetEntries) { TaxaCountDTO tcDTO = null; String name = fcount.getName() != null ? fcount.getName() : ""; if (fcount.getFacetField().getName().equals(NAMES_AND_LSID)) { String[] values = p.split(name, 5); if (values.length >= 5) { if (!"||||".equals(name)) { tcDTO = new TaxaCountDTO(values[0], fcount.getCount()); tcDTO.setGuid(StringUtils.trimToNull(values[1])); tcDTO.setCommonName(values[2]); tcDTO.setKingdom(values[3]); tcDTO.setFamily(values[4]); if (StringUtils.isNotEmpty(tcDTO.getGuid())) tcDTO.setRank(searchUtils.getTaxonSearch(tcDTO.getGuid())[1].split(":")[0]); } } else { if (logger.isDebugEnabled()) { logger.debug("The values length: " + values.length + " :" + name); } tcDTO = new TaxaCountDTO(name, fcount.getCount()); } //speciesCounts.add(i, tcDTO); if (tcDTO != null && tcDTO.getCount() > 0) speciesCounts.add(tcDTO); } else if (fcount.getFacetField().getName().equals(COMMON_NAME_AND_LSID)) { String[] values = p.split(name, 6); if (values.length >= 5) { if (!"|||||".equals(name)) { tcDTO = new TaxaCountDTO(values[1], fcount.getCount()); tcDTO.setGuid(StringUtils.trimToNull(values[2])); tcDTO.setCommonName(values[0]); //cater for the bug of extra vernacular name in the result tcDTO.setKingdom(values[values.length - 2]); tcDTO.setFamily(values[values.length - 1]); if (StringUtils.isNotEmpty(tcDTO.getGuid())) tcDTO.setRank(searchUtils.getTaxonSearch(tcDTO.getGuid())[1].split(":")[0]); } } else { if (logger.isDebugEnabled()) { logger.debug("The values length: " + values.length + " :" + name); } tcDTO = new TaxaCountDTO(name, fcount.getCount()); } //speciesCounts.add(i, tcDTO); if (tcDTO != null && tcDTO.getCount() > 0) { speciesCounts.add(tcDTO); } } } } } } return speciesCounts; }
From source file:com.doculibre.constellio.services.FacetServicesImpl.java
License:Open Source License
public static SolrQuery toSolrQuery(SimpleSearch simpleSearch, int start, int row, boolean includeSingleValueFacets, boolean notIncludedOnly, List<String> customFieldFacets, List<String> customQueryFacets, ConstellioUser user) { String solrServerName = simpleSearch.getCollectionName(); RecordCollectionServices collectionServices = ConstellioSpringUtils.getRecordCollectionServices(); RecordCollection collection = collectionServices.get(solrServerName); SolrServices solrServices = ConstellioSpringUtils.getSolrServices(); Boolean usesDisMax = solrServices.usesDisMax(collection); SolrQuery query; if (!collection.isOpenSearch()) { query = SearchServicesImpl.toSolrQuery(simpleSearch, usesDisMax, true, includeSingleValueFacets, notIncludedOnly);//from w ww .j a v a 2s.c o m } else { query = SearchServicesImpl.toSolrQuery(simpleSearch, usesDisMax, false, true, false); } query.setParam(ConstellioSolrQueryParams.COLLECTION_NAME, simpleSearch.getCollectionName()); query.setParam(ConstellioSolrQueryParams.LUCENE_QUERY, simpleSearch.getLuceneQuery(includeSingleValueFacets, true)); query.setParam(ConstellioSolrQueryParams.SIMPLE_SEARCH, simpleSearch.toSimpleParams().toString()); if (user != null) { query.setParam(ConstellioSolrQueryParams.USER_ID, "" + user.getId()); } if (StringUtils.isEmpty(query.getQuery())) { query.setQuery(SimpleSearch.SEARCH_ALL); query.setRequestHandler("/elevate"); } query.set("shards.qt", "/elevate"); query.setRequestHandler("/elevate"); query.setRows(row); query.setStart(start); query.setHighlight(true); query.setHighlightFragsize(100); query.setHighlightSnippets(2); query.setFacet(true); query.setFacetLimit(400); query.setFacetMinCount(1); if (collection.isOpenSearch()) { query.setParam("openSearchURL", collection.getOpenSearchURL()); Locale locale = simpleSearch.getSingleSearchLocale(); if (locale != null) { query.setParam("lang", locale.getLanguage()); } } else { for (CollectionFacet collectionFacet : collection.getCollectionFacets()) { if (customFieldFacets == null && collectionFacet.isFieldFacet()) { IndexField indexField = collectionFacet.getFacetField(); String indexFieldName = indexField.getName(); if (!notIncludedOnly) { query.addFacetField(indexFieldName); } else { SearchedFacet searchedFacet = simpleSearch.getSearchedFacet(indexFieldName); if (searchedFacet != null) { if (!searchedFacet.getIncludedValues().isEmpty()) { StringBuffer sbEx = new StringBuffer(); sbEx.append("{!ex=dt}"); // sbEx.append("{!ex="); // boolean first = true; // for (String includedValue : searchedFacet.getIncludedValues()) { // if (first) { // first = false; // } else { // sbEx.append(","); // } // sbEx.append(includedValue); // } // sbEx.append("}"); // query.setParam("facet.field", sbEx.toString() + indexFieldName); query.addFacetField(sbEx.toString() + indexFieldName); } else { query.addFacetField(indexFieldName); } } } } else if (customQueryFacets == null && collectionFacet.isQueryFacet()) { // Modification Rida, remplacement de collectionFacet.getLabels() par // collectionFacet.getLabelledValues() // for (I18NLabel valueLabels : collectionFacet.getLabels()) { for (I18NLabel valueLabels : collectionFacet.getLabelledValues()) { String facetQuery = valueLabels.getKey(); query.addFacetQuery(facetQuery); } } } if (customFieldFacets != null) { for (String facetField : customFieldFacets) { if (!notIncludedOnly) { query.addFacetField(facetField); } else { StringBuffer sbEx = new StringBuffer(); sbEx.append("{!ex=dt}"); // sbEx.append("{!ex="); // boolean first = true; // for (String includedValue : searchedFacet.getIncludedValues()) { // if (first) { // first = false; // } else { // sbEx.append(","); // } // sbEx.append(includedValue); // } // sbEx.append("}"); query.setParam("facet.field", sbEx.toString() + facetField); } } } if (customQueryFacets != null) { for (String facetQuery : customQueryFacets) { query.addFacetQuery(facetQuery); } } } return query; }
From source file:com.doculibre.constellio.services.SearchServicesImpl.java
License:Open Source License
@Override public QueryResponse search(SimpleSearch simpleSearch, int start, int rows, SearchParams searchParams, ConstellioUser user) {/*w w w. jav a 2s.c om*/ QueryResponse queryResponse; String collectionName = simpleSearch.getCollectionName(); if (collectionName != null) { RecordCollectionServices collectionServices = ConstellioSpringUtils.getRecordCollectionServices(); RecordServices recordServices = ConstellioSpringUtils.getRecordServices(); RecordCollection collection = collectionServices.get(collectionName); SolrServices solrServices = ConstellioSpringUtils.getSolrServices(); Boolean usesDisMax = solrServices.usesDisMax(collection); SolrQuery query; if (!collection.isOpenSearch()) { query = toSolrQuery(simpleSearch, usesDisMax, true, true); } else { query = toSolrQuery(simpleSearch, usesDisMax, false, true); } // displayQuery(query); String luceneQuery = simpleSearch.getLuceneQuery(); query.setParam(ConstellioSolrQueryParams.LUCENE_QUERY, luceneQuery); query.setParam(ConstellioSolrQueryParams.SIMPLE_SEARCH, simpleSearch.toSimpleParams().toString()); query.setParam(ConstellioSolrQueryParams.COLLECTION_NAME, collectionName); if (user != null) { query.setParam(ConstellioSolrQueryParams.USER_ID, "" + user.getId()); } String queryString = query.getQuery(); if (StringUtils.isEmpty(queryString)) { queryString = SimpleSearch.SEARCH_ALL; } List<Record> pendingExclusions = recordServices.getPendingExclusions(collection); while (!pendingExclusions.isEmpty()) { IndexingManager indexingManager = IndexingManager.get(collection); if (indexingManager.isActive()) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new RuntimeException(e); } pendingExclusions = recordServices.getPendingExclusions(collection); } else { return null; } } // SolrQuery query = new SolrQuery(); query.set("collectionName", simpleSearch.getCollectionName()); // query.setQuery(luceneQuery); query.set("shards.qt", "/elevate"); query.setRequestHandler("/elevate"); // nb rsultats par page query.setRows(rows); // page de dbut query.setStart(start); query.setHighlight(searchParams.isHighlightingEnabled()); if (searchParams.isHighlightingEnabled()) { query.setHighlightFragsize(searchParams.getFragsize()); query.setHighlightSnippets(searchParams.getSnippets()); } if (simpleSearch.getSortField() != null) { ORDER order = SimpleSearch.SORT_DESCENDING.equals(simpleSearch.getSortOrder()) ? ORDER.desc : ORDER.asc; IndexFieldServices indexFieldServices = ConstellioSpringUtils.getIndexFieldServices(); IndexField indexField = indexFieldServices.get(simpleSearch.getSortField(), collection); if (indexField != null) { IndexField sortIndexField = indexFieldServices.getSortFieldOf(indexField); if (sortIndexField != null) { query.setSort(sortIndexField.getName(), order); } } } if (collection.isOpenSearch()) { query.setParam("openSearchURL", collection.getOpenSearchURL()); Locale locale = simpleSearch.getSingleSearchLocale(); if (locale != null) { query.setParam("lang", locale.getLanguage()); } } if (searchParams.getHighlightedFields() == null) { IndexField defaultSearchField = collection.getDefaultSearchIndexField(); query.addHighlightField(defaultSearchField.getName()); for (CopyField copyFieldDest : defaultSearchField.getCopyFieldsDest()) { IndexField copyIndexFieldSource = copyFieldDest.getIndexFieldSource(); if (copyIndexFieldSource != null && !copyIndexFieldSource.isTitleField() && copyIndexFieldSource.isHighlighted()) { query.addHighlightField(copyIndexFieldSource.getName()); } } IndexField titleField = collection.getTitleIndexField(); if (titleField != null && titleField.isHighlighted()) { query.addHighlightField(titleField.getName()); } } else { for (String highlightedField : searchParams.getHighlightedFields()) { IndexField field = collection.getIndexField(highlightedField); if (field != null) { query.addHighlightField(highlightedField); } } } SolrServer server = SolrCoreContext.getSolrServer(collectionName); if (server != null) { try { // displayQuery(query); queryResponse = server.query(query); } catch (SolrServerException e) { queryResponse = null; } } else { queryResponse = null; } // if (queryResponse != null && !collection.isOpenSearch()) { // StatsCompiler statCompiler = StatsCompiler.getInstance(); // try { // statCompiler.saveStats(collectionName, SolrLogContext.getStatsSolrServer(), // SolrLogContext.getStatsCompileSolrServer(), queryResponse, luceneQuery); // } catch (SolrServerException e) { // throw new RuntimeException(e); // } catch (IOException e) { // throw new RuntimeException(e); // } // } } else { queryResponse = null; } // improveQueryResponse(collectionName, queryResponse); // System.out.println("Response size" + queryResponse.getResults().getNumFound()); return queryResponse; }
From source file:com.liferay.portal.search.solr.internal.SolrIndexSearcher.java
License:Open Source License
protected QueryResponse doSearch(SearchContext searchContext, Query query, int start, int end, boolean count) throws Exception { QueryConfig queryConfig = query.getQueryConfig(); SolrQuery solrQuery = new SolrQuery(); if (!count) { addFacets(solrQuery, searchContext); addHighlights(solrQuery, queryConfig); addPagination(solrQuery, start, end); addSelectedFields(solrQuery, queryConfig); addSort(solrQuery, searchContext.getSorts()); solrQuery.setIncludeScore(queryConfig.isScoreEnabled()); } else {/*w w w . j a v a 2s . c o m*/ solrQuery.setRows(0); } String queryString = translateQuery(query, searchContext); solrQuery.setQuery(queryString); String filterQuery = _filterTranslator.translate(query.getPreBooleanFilter(), searchContext); solrQuery.setFilterQueries(filterQuery); QueryResponse queryResponse = executeSearchRequest(solrQuery); if (_log.isInfoEnabled()) { _log.info("The search engine processed " + solrQuery.getQuery() + " in " + queryResponse.getElapsedTime() + " ms"); } return queryResponse; }
From source file:com.mmj.app.lucene.solr.client.SolrClient.java
License:Open Source License
/** * /*from w w w. j ava2s . c om*/ */ public boolean del(String corename, final SolrQuery solrQuery) { final HttpSolrServer server = getOrCreateSolrServer(corename); final String query = solrQuery.getQuery(); Result result = exec(new Executor() { @Override public Result exec() throws SolrServerException, IOException { UpdateResponse deleteByQuery = server.deleteByQuery(query); server.commit(); return new Result().setSuccess(deleteByQuery.getStatus() == ERROR_STATUS); } }); return result.isSuccess(); }
From source file:com.nridge.ds.solr.SolrDS.java
License:Open Source License
/** * Returns a <i>Document</i> representation of all documents * fetched from the underlying content source (using a wildcard * criteria)./*from w w w . j a v a2 s.com*/ * <p> * <b>Note:</b> Depending on the size of the content source * behind this data source, this method could consume large * amounts of heap memory. Therefore, it should only be * used when the number of column and rows is known to be * small in size. * </p> * * @return Document hierarchy representing all documents in * the content source. * * @throws com.nridge.core.base.ds.DSException Data source related exception. */ @Override public Document fetch() throws DSException { Document solrDocument; Logger appLogger = mAppMgr.getLogger(this, "fetch"); appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER); initialize(); DSCriteria dsCriteria = new DSCriteria("Solr Query"); dsCriteria.add(Solr.FIELD_QUERY_NAME, Field.Operator.EQUAL, Solr.QUERY_ALL_DOCUMENTS); SolrQuery solrQuery = mSolrQueryBuilder.create(dsCriteria); solrQuery.setStart(Solr.QUERY_OFFSET_DEFAULT); solrQuery.setRows(Solr.QUERY_PAGESIZE_DEFAULT); appLogger.debug(String.format("%s: %s %s", dsCriteria.getName(), mSolrIdentity, solrQuery.toString())); SolrResponseBuilder solrResponseBuilder = createResponseBuilder(); QueryResponse queryResponse = queryExecute(solrQuery); solrDocument = solrResponseBuilder.extract(queryResponse); DataBag headerBag = Solr.getHeader(solrDocument); if (headerBag != null) headerBag.setValueByName("collection_name", getCollectionName()); String requestHandler = solrQuery.getRequestHandler(); solrResponseBuilder.updateHeader(mBaseSolrURL, solrQuery.getQuery(), requestHandler, solrQuery.getStart(), solrQuery.getRows()); appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART); return solrDocument; }
From source file:com.nridge.ds.solr.SolrDS.java
License:Open Source License
/** * Returns a <i>Document</i> representation of the documents * that match the <i>DSCriteria</i> specified in the parameter. * <p>// w ww . jav a 2 s .c o m * <b>Note:</b> Depending on the size of the content source * behind this data source and the criteria specified, this * method could consume large amounts of heap memory. * Therefore, the developer is encouraged to use the alternative * method for fetch where an offset and limit parameter can be * specified. * </p> * * @param aDSCriteria Data source criteria. * * @return Document hierarchy representing all documents that * match the criteria in the content source. * * @throws com.nridge.core.base.ds.DSException Data source related exception. * * @see <a href="http://lucene.apache.org/solr/guide/7_6/common-query-parameters.html">Solr Common Query Parametersr</a> * @see <a href="https://lucene.apache.org/solr/guide/7_6/the-standard-query-parser.html">Solr Standard Query Parserr</a> */ @Override public Document fetch(DSCriteria aDSCriteria) throws DSException { Document solrDocument; Logger appLogger = mAppMgr.getLogger(this, "fetch"); appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER); initialize(); SolrQuery solrQuery = mSolrQueryBuilder.create(aDSCriteria); appLogger.debug(String.format("%s: %s %s", aDSCriteria.getName(), mSolrIdentity, solrQuery.toString())); SolrResponseBuilder solrResponseBuilder = createResponseBuilder(); QueryResponse queryResponse = queryExecute(solrQuery); solrDocument = solrResponseBuilder.extract(queryResponse); DataBag headerBag = Solr.getHeader(solrDocument); if (headerBag != null) headerBag.setValueByName("collection_name", getCollectionName()); String requestHandler = solrQuery.getRequestHandler(); Integer startPosition = solrQuery.getStart(); if (startPosition == null) startPosition = Solr.QUERY_OFFSET_DEFAULT; Integer totalRows = solrQuery.getRows(); if (totalRows == null) totalRows = Solr.QUERY_PAGESIZE_DEFAULT; solrResponseBuilder.updateHeader(mBaseSolrURL, solrQuery.getQuery(), requestHandler, startPosition, totalRows); if (Solr.isCriteriaParentChild(aDSCriteria)) { SolrParentChild solrParentChild = new SolrParentChild(mAppMgr, this); solrParentChild.expand(solrDocument, aDSCriteria); } appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART); return solrDocument; }
From source file:com.nridge.ds.solr.SolrDS.java
License:Open Source License
/** * Returns a <i>Document</i> representation of the documents * that match the <i>DSCriteria</i> specified in the parameter. * In addition, this method offers a paging mechanism where the * starting offset and a fetch limit can be applied to each * content fetch query.//from ww w. ja va 2s. c o m * * @param aDSCriteria Data source criteria. * @param anOffset Starting offset into the matching content rows. * @param aLimit Limit on the total number of rows to extract from * the content source during this fetch operation. * * @return Document hierarchy representing all documents that * match the criteria in the content source. (based on the offset * and limit values). * * @throws com.nridge.core.base.ds.DSException Data source related exception. * * @see <a href="http://lucene.apache.org/solr/guide/7_6/common-query-parameters.html">Solr Common Query Parametersr</a> * @see <a href="https://lucene.apache.org/solr/guide/7_6/the-standard-query-parser.html">Solr Standard Query Parserr</a> */ @Override public Document fetch(DSCriteria aDSCriteria, int anOffset, int aLimit) throws DSException { Document solrDocument; Logger appLogger = mAppMgr.getLogger(this, "fetch"); appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER); initialize(); SolrQuery solrQuery = mSolrQueryBuilder.create(aDSCriteria); solrQuery.setStart(anOffset); solrQuery.setRows(aLimit); appLogger.debug(String.format("%s: %s %s", aDSCriteria.getName(), mSolrIdentity, solrQuery.toString())); SolrResponseBuilder solrResponseBuilder = createResponseBuilder(); QueryResponse queryResponse = queryExecute(solrQuery); solrDocument = solrResponseBuilder.extract(queryResponse, anOffset, aLimit); DataBag headerBag = Solr.getHeader(solrDocument); if (headerBag != null) headerBag.setValueByName("collection_name", getCollectionName()); String requestHandler = solrQuery.getRequestHandler(); solrResponseBuilder.updateHeader(mBaseSolrURL, solrQuery.getQuery(), requestHandler, anOffset, aLimit); if (Solr.isCriteriaParentChild(aDSCriteria)) { SolrParentChild solrParentChild = new SolrParentChild(mAppMgr, this); solrParentChild.expand(solrDocument, aDSCriteria); } appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART); return solrDocument; }