List of usage examples for org.apache.solr.client.solrj SolrQuery setFields
public SolrQuery setFields(String... fields)
From source file:at.newmedialab.lmf.search.services.indexing.SolrCoreRuntime.java
License:Apache License
/** * Ask the server to retrieve all documents that depend on the resource passed as argument; this * query is/* w ww . ja v a 2 s. c o m*/ * carried out by querying the dependencies field of a document. * * @param resource * @return */ public Collection<URI> listDependent(ValueFactory valueFactory, URI resource) { SolrQuery query = new SolrQuery(); query.setQuery("lmf.dependencies:\"" + resource.stringValue() + "\""); query.setFields("lmf.uri"); query.setRows(Integer.MAX_VALUE); try { SolrDocumentList docs = server.query(query).getResults(); Set<URI> result = new HashSet<URI>(); for (SolrDocument doc : docs) { result.add(valueFactory.createURI((String) doc.getFirstValue("lmf.uri"))); } return result; } catch (SolrServerException e) { return Collections.emptyList(); } }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Writes the index fields to the supplied output stream in CSV format. * <p>/*from ww w .j a v a 2 s . com*/ * DM: refactored to split the query by month to improve performance. * Further enhancements possible: * 1) Multi threaded * 2) More filtering, by year or decade.. * * @param downloadParams * @param out * @param includeSensitive * @param dd The details of the download * @param checkLimit * @param nextExecutor The ExecutorService to use to process results on different threads * @throws Exception */ @Override public ConcurrentMap<String, AtomicInteger> writeResultsFromIndexToStream( final DownloadRequestParams downloadParams, final OutputStream out, final boolean includeSensitive, final DownloadDetailsDTO dd, boolean checkLimit, final ExecutorService nextExecutor) throws Exception { expandRequestedFields(downloadParams, true); if (dd != null) { dd.resetCounts(); } long start = System.currentTimeMillis(); final ConcurrentMap<String, AtomicInteger> uidStats = new ConcurrentHashMap<>(); getServer(); try { SolrQuery solrQuery = new SolrQuery(); queryFormatUtils.formatSearchQuery(downloadParams); String dFields = downloadParams.getFields(); if (includeSensitive) { //include raw latitude and longitudes if (dFields.contains("decimalLatitude.p")) { dFields = dFields.replaceFirst("decimalLatitude.p", "sensitive_latitude,sensitive_longitude,decimalLatitude.p"); } else if (dFields.contains("decimalLatitude")) { dFields = dFields.replaceFirst("decimalLatitude", "sensitive_latitude,sensitive_longitude,decimalLatitude"); } if (dFields.contains(",locality,")) { dFields = dFields.replaceFirst(",locality,", ",locality,sensitive_locality,"); } if (dFields.contains(",locality.p,")) { dFields = dFields.replaceFirst(",locality.p,", ",locality.p,sensitive_locality,"); } } StringBuilder sb = new StringBuilder(dFields); if (!downloadParams.getExtra().isEmpty()) { sb.append(",").append(downloadParams.getExtra()); } String[] requestedFields = sb.toString().split(","); List<String>[] indexedFields; if (downloadFields == null) { //default to include everything java.util.List<String> mappedNames = new java.util.LinkedList<String>(); for (int i = 0; i < requestedFields.length; i++) mappedNames.add(requestedFields[i]); indexedFields = new List[] { mappedNames, new java.util.LinkedList<String>(), mappedNames, mappedNames, new ArrayList(), new ArrayList() }; } else { indexedFields = downloadFields.getIndexFields(requestedFields, downloadParams.getDwcHeaders(), downloadParams.getLayersServiceUrl()); } //apply custom header String[] customHeader = dd.getRequestParams().getCustomHeader().split(","); for (int i = 0; i + 1 < customHeader.length; i += 2) { for (int j = 0; j < indexedFields[0].size(); j++) { if (customHeader[i].equals(indexedFields[0].get(j))) { indexedFields[2].set(j, customHeader[i + 1]); } } for (int j = 0; j < indexedFields[4].size(); j++) { if (customHeader[i].equals(indexedFields[5].get(j))) { indexedFields[4].set(j, customHeader[i + 1]); } } } if (logger.isDebugEnabled()) { logger.debug("Fields included in download: " + indexedFields[0]); logger.debug("Fields excluded from download: " + indexedFields[1]); logger.debug("The headers in downloads: " + indexedFields[2]); logger.debug("Analysis headers: " + indexedFields[4]); logger.debug("Analysis fields: " + indexedFields[5]); } //set the fields to the ones that are available in the index String[] fields = indexedFields[0].toArray(new String[] {}); solrQuery.setFields(fields); StringBuilder qasb = new StringBuilder(); if (!"none".equals(downloadParams.getQa())) { solrQuery.addField("assertions"); if (!"all".equals(downloadParams.getQa()) && !"includeall".equals(downloadParams.getQa())) { //add all the qa fields qasb.append(downloadParams.getQa()); } } solrQuery.addField("institution_uid").addField("collection_uid").addField("data_resource_uid") .addField("data_provider_uid"); solrQuery.setQuery(downloadParams.getFormattedQuery()); solrQuery.setFacetMinCount(1); solrQuery.setFacetLimit(-1); //get the assertion facets to add them to the download fields boolean getAssertionsFromFacets = "all".equals(downloadParams.getQa()) || "includeall".equals(downloadParams.getQa()); SolrQuery monthAssertionsQuery = getAssertionsFromFacets ? solrQuery.getCopy().addFacetField("month", "assertions") : solrQuery.getCopy().addFacetField("month"); if (getAssertionsFromFacets) { //set the order for the facet to be based on the index - this will force the assertions to be returned in the same order each time //based on alphabetical sort. The number of QA's may change between searches so we can't guarantee that the order won't change monthAssertionsQuery.add("f.assertions.facet.sort", "index"); } QueryResponse facetQuery = runSolrQuery(monthAssertionsQuery, downloadParams.getFormattedFq(), 0, 0, "score", "asc"); //set the totalrecords for the download details dd.setTotalRecords(facetQuery.getResults().getNumFound()); //use a separately configured and smaller limit when output will be unzipped final long maxDownloadSize; if (MAX_DOWNLOAD_SIZE > unzippedLimit && out instanceof OptionalZipOutputStream && ((OptionalZipOutputStream) out).getType() == OptionalZipOutputStream.Type.unzipped) { maxDownloadSize = unzippedLimit; } else { maxDownloadSize = MAX_DOWNLOAD_SIZE; } if (checkLimit && dd.getTotalRecords() < maxDownloadSize) { checkLimit = false; } //get the month facets to add them to the download fields get the assertion facets. List<Count> splitByFacet = null; for (FacetField facet : facetQuery.getFacetFields()) { if (facet.getName().equals("assertions") && facet.getValueCount() > 0) { qasb.append(getQAFromFacet(facet)); } if (facet.getName().equals("month") && facet.getValueCount() > 0) { splitByFacet = facet.getValues(); } } if ("includeall".equals(downloadParams.getQa())) { qasb = getAllQAFields(); } String qas = qasb.toString(); //include sensitive fields in the header when the output will be partially sensitive final String[] sensitiveFields; final String[] notSensitiveFields; if (dd.getSensitiveFq() != null) { List<String>[] sensitiveHdr = downloadFields.getIndexFields(sensitiveSOLRHdr, downloadParams.getDwcHeaders(), downloadParams.getLayersServiceUrl()); //header for the output file indexedFields[2].addAll(sensitiveHdr[2]); //lookup for fields from sensitive queries sensitiveFields = org.apache.commons.lang3.ArrayUtils.addAll( indexedFields[0].toArray(new String[] {}), sensitiveHdr[0].toArray(new String[] {})); //use general fields when sensitive data is not permitted notSensitiveFields = org.apache.commons.lang3.ArrayUtils .addAll(indexedFields[0].toArray(new String[] {}), notSensitiveSOLRHdr); } else { sensitiveFields = new String[0]; notSensitiveFields = fields; } //add analysis headers indexedFields[2].addAll(indexedFields[4]); final String[] analysisFields = indexedFields[5].toArray(new String[0]); final String[] qaFields = qas.equals("") ? new String[] {} : qas.split(","); String[] qaTitles = downloadFields.getHeader(qaFields, false, false); String[] header = org.apache.commons.lang3.ArrayUtils.addAll(indexedFields[2].toArray(new String[] {}), qaTitles); //retain output header fields and field names for inclusion of header info in the download StringBuilder infoFields = new StringBuilder("infoFields"); for (String h : indexedFields[3]) infoFields.append(",").append(h); for (String h : qaFields) infoFields.append(",").append(h); StringBuilder infoHeader = new StringBuilder("infoHeaders"); for (String h : header) infoHeader.append(",").append(h); String info = infoFields.toString(); while (info.contains(",,")) info = info.replace(",,", ","); uidStats.put(info, new AtomicInteger(-1)); String hdr = infoHeader.toString(); while (hdr.contains(",,")) hdr = hdr.replace(",,", ","); uidStats.put(hdr, new AtomicInteger(-2)); //construct correct RecordWriter based on the supplied fileType final RecordWriterError rw = downloadParams.getFileType().equals("csv") ? new CSVRecordWriter(out, header, downloadParams.getSep(), downloadParams.getEsc()) : (downloadParams.getFileType().equals("tsv") ? new TSVRecordWriter(out, header) : new ShapeFileRecordWriter(tmpShapefileDir, downloadParams.getFile(), out, (String[]) ArrayUtils.addAll(fields, qaFields))); // Requirement to be able to propagate interruptions to all other threads for this execution // Doing this via this variable final AtomicBoolean interruptFound = dd != null ? dd.getInterrupt() : new AtomicBoolean(false); // Create a fixed length blocking queue for buffering results before they are written // This also creates a push-back effect to throttle the results generating threads // when it fills and offers to it are delayed until the writer consumes elements from the queue final BlockingQueue<String[]> queue = new ArrayBlockingQueue<>(resultsQueueLength); // Create a sentinel that we can check for reference equality to signal the end of the queue final String[] sentinel = new String[0]; // An implementation of RecordWriter that adds to an in-memory queue final RecordWriter concurrentWrapper = new RecordWriter() { private AtomicBoolean finalised = new AtomicBoolean(false); private AtomicBoolean finalisedComplete = new AtomicBoolean(false); @Override public void write(String[] nextLine) { try { if (Thread.currentThread().isInterrupted() || interruptFound.get() || finalised.get()) { finalise(); return; } while (!queue.offer(nextLine, writerTimeoutWaitMillis, TimeUnit.MILLISECONDS)) { if (Thread.currentThread().isInterrupted() || interruptFound.get() || finalised.get()) { finalise(); break; } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); if (logger.isDebugEnabled()) { logger.debug( "Queue failed to accept the next record due to a thread interrupt, calling finalise the cleanup: ", e); } // If we were interrupted then we should call finalise to cleanup finalise(); } } @Override public void finalise() { if (finalised.compareAndSet(false, true)) { try { // Offer the sentinel at least once, even when the thread is interrupted while (!queue.offer(sentinel, writerTimeoutWaitMillis, TimeUnit.MILLISECONDS)) { // If the thread is interrupted then the queue may not have any active consumers, // so don't loop forever waiting for capacity in this case // The hard shutdown phase will use queue.clear to ensure that the // sentinel gets onto the queue at least once if (Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); if (logger.isDebugEnabled()) { logger.debug( "Queue failed to accept the sentinel in finalise due to a thread interrupt: ", e); } } finally { finalisedComplete.set(true); } } } @Override public boolean finalised() { return finalisedComplete.get(); } }; // A single thread that consumes elements put onto the queue until it sees the sentinel, finalising after the sentinel or an interrupt Runnable writerRunnable = new Runnable() { @Override public void run() { try { long counter = 0; while (true) { counter = counter + 1; if (Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } String[] take = queue.take(); // Sentinel object equality check to see if we are done if (take == sentinel || Thread.currentThread().isInterrupted() || interruptFound.get()) { break; } // Otherwise write to the wrapped record writer rw.write(take); //test for errors. This can contain a flush so only test occasionally if (counter % resultsQueueLength == 0 && rw.hasError()) { throw RecordWriterException.newRecordWriterException(dd, downloadParams, true, rw); } } } catch (RecordWriterException e) { //no trace information is available to print for these errors logger.error(e.getMessage()); interruptFound.set(true); } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptFound.set(true); } catch (Exception e) { // Reuse interruptFound variable to signal that the writer had issues interruptFound.set(true); logger.error("Download writer failed.", e); } finally { rw.finalise(); } } }; Thread writerThread = new Thread(writerRunnable); writerThread.start(); try { if (rw instanceof ShapeFileRecordWriter) { dd.setHeaderMap(((ShapeFileRecordWriter) rw).getHeaderMappings()); } //order the query by _docid_ for faster paging solrQuery.addSortField("_docid_", ORDER.asc); //for each month create a separate query that pages through 500 records per page List<SolrQuery> queries = new ArrayList<SolrQuery>(); if (splitByFacet != null) { for (Count facet : splitByFacet) { if (facet.getCount() > 0) { SolrQuery splitByFacetQuery; //do not add remainderQuery here if (facet.getName() != null) { splitByFacetQuery = solrQuery.getCopy() .addFilterQuery(facet.getFacetField().getName() + ":" + facet.getName()); splitByFacetQuery.setFacet(false); queries.add(splitByFacetQuery); } } } if (splitByFacet.size() > 0) { SolrQuery remainderQuery = solrQuery.getCopy() .addFilterQuery("-" + splitByFacet.get(0).getFacetField().getName() + ":[* TO *]"); queries.add(0, remainderQuery); } } else { queries.add(0, solrQuery); } //split into sensitive and non-sensitive queries when // - not including all sensitive values // - there is a sensitive fq final List<SolrQuery> sensitiveQ = new ArrayList<SolrQuery>(); if (!includeSensitive && dd.getSensitiveFq() != null) { sensitiveQ.addAll( splitQueries(queries, dd.getSensitiveFq(), sensitiveSOLRHdr, notSensitiveSOLRHdr)); } //Set<Future<Integer>> futures = new HashSet<Future<Integer>>(); final AtomicInteger resultsCount = new AtomicInteger(0); final boolean threadCheckLimit = checkLimit; List<Callable<Integer>> solrCallables = new ArrayList<>(queries.size()); // execute each query, writing the results to stream for (final SolrQuery splitByFacetQuery : queries) { // define a thread Callable<Integer> solrCallable = new Callable<Integer>() { @Override public Integer call() throws Exception { int startIndex = 0; // Randomise the wakeup time so they don't all wakeup on a periodic cycle long localThrottle = throttle + Math.round(Math.random() * throttle); String[] fq = downloadParams.getFormattedFq(); if (splitByFacetQuery.getFilterQueries() != null && splitByFacetQuery.getFilterQueries().length > 0) { if (fq == null) { fq = new String[0]; } fq = org.apache.commons.lang3.ArrayUtils.addAll(fq, splitByFacetQuery.getFilterQueries()); } QueryResponse qr = runSolrQuery(splitByFacetQuery, fq, downloadBatchSize, startIndex, "_docid_", "asc"); AtomicInteger recordsForThread = new AtomicInteger(0); if (logger.isDebugEnabled()) { logger.debug( splitByFacetQuery.getQuery() + " - results: " + qr.getResults().size()); } while (qr != null && !qr.getResults().isEmpty() && !interruptFound.get()) { if (logger.isDebugEnabled()) { logger.debug( "Start index: " + startIndex + ", " + splitByFacetQuery.getQuery()); } int count = 0; if (sensitiveQ.contains(splitByFacetQuery)) { count = processQueryResults(uidStats, sensitiveFields, qaFields, concurrentWrapper, qr, dd, threadCheckLimit, resultsCount, maxDownloadSize, analysisFields); } else { // write non-sensitive values into sensitive fields when not authorised for their sensitive values count = processQueryResults(uidStats, notSensitiveFields, qaFields, concurrentWrapper, qr, dd, threadCheckLimit, resultsCount, maxDownloadSize, analysisFields); } recordsForThread.addAndGet(count); startIndex += downloadBatchSize; // we have already set the Filter query the first time the query was constructed // rerun with the same params but different startIndex if (!threadCheckLimit || resultsCount.get() < maxDownloadSize) { if (!threadCheckLimit) { // throttle the download by sleeping Thread.sleep(localThrottle); } qr = runSolrQuery(splitByFacetQuery, null, downloadBatchSize, startIndex, "_docid_", "asc"); } else { qr = null; } } return recordsForThread.get(); } }; solrCallables.add(solrCallable); } List<Future<Integer>> futures = new ArrayList<>(solrCallables.size()); for (Callable<Integer> nextCallable : solrCallables) { futures.add(nextExecutor.submit(nextCallable)); } // Busy wait because we need to be able to respond to an interrupt on any callable // and propagate it to all of the others for this particular query // Because the executor service is shared to prevent too many concurrent threads being run, // this requires a busy wait loop on the main thread to monitor state boolean waitAgain = false; do { waitAgain = false; for (Future<Integer> future : futures) { if (!future.isDone()) { // Wait again even if an interrupt flag is set, as it may have been set partway through the iteration // The calls to future.cancel will occur next time if the interrupt is setup partway through an iteration waitAgain = true; // If one thread finds an interrupt it is propagated to others using the interruptFound AtomicBoolean if (interruptFound.get()) { future.cancel(true); } } } // Don't trigger the timeout interrupt if we don't have to wait again as we are already done at this point if (waitAgain && (System.currentTimeMillis() - start) > downloadMaxTime) { interruptFound.set(true); break; } if (waitAgain) { Thread.sleep(downloadCheckBusyWaitSleep); } } while (waitAgain); AtomicInteger totalDownload = new AtomicInteger(0); for (Future<Integer> future : futures) { if (future.isDone()) { totalDownload.addAndGet(future.get()); } else { // All incomplete futures that survived the loop above are cancelled here future.cancel(true); } } long finish = System.currentTimeMillis(); long timeTakenInSecs = (finish - start) / 1000; if (timeTakenInSecs <= 0) timeTakenInSecs = 1; if (logger.isInfoEnabled()) { logger.info("Download of " + resultsCount + " records in " + timeTakenInSecs + " seconds. Record/sec: " + resultsCount.intValue() / timeTakenInSecs); } } finally { try { // Once we get here, we need to finalise starting at the concurrent wrapper, // as there are no more non-sentinel records to be added to the queue // This eventually triggers finalisation of the underlying writer when the queue empties // This is a soft shutdown, and hence we wait below for this stage to complete in normal circumstances // Note, this blocks for writerTimeoutWaitMillis trying to legitimately add the sentinel to the end of the queue // We force the sentinel to be added in the hard shutdown phase below concurrentWrapper.finalise(); } finally { try { // Track the current time right now so we can abort after downloadMaxCompletionTime milliseconds in this phase final long completionStartTime = System.currentTimeMillis(); // Busy wait check for finalised to be called in the RecordWriter or something is interrupted // By this stage, there are at maximum download.internal.queue.size items remaining (default 1000) while (writerThread.isAlive() && !writerThread.isInterrupted() && !interruptFound.get() && !Thread.currentThread().isInterrupted() && !rw.finalised() && !((System.currentTimeMillis() - completionStartTime) > downloadMaxCompletionTime)) { Thread.sleep(downloadCheckBusyWaitSleep); } } finally { try { // Attempt all actions that could trigger the writer thread to finalise, as by this stage we are in hard shutdown mode // Signal that we are in hard shutdown mode interruptFound.set(true); // Add the sentinel or clear the queue and try again until it gets onto the queue // We are in hard shutdown mode, so only priority is that the queue either // gets the sentinel or the thread is interrupted to clean up resources while (!queue.offer(sentinel)) { queue.clear(); } // Interrupt the single writer thread writerThread.interrupt(); // Explicitly call finalise on the RecordWriter as a backup // In normal circumstances it is called via the sentinel or the interrupt // This will not block if finalise has been called previously in the current three implementations rw.finalise(); } finally { if (rw != null && rw.hasError()) { throw RecordWriterException.newRecordWriterException(dd, downloadParams, true, rw); } else { // Flush whatever output was still pending for more deterministic debugging out.flush(); } } } } } } catch (SolrServerException ex) { logger.error("Problem communicating with SOLR server while processing download. " + ex.getMessage(), ex); } return uidStats; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Helper method to create SolrQuery object and add facet settings * * @return solrQuery the SolrQuery/* www . j av a 2 s .c o m*/ */ protected SolrQuery initSolrQuery(SearchRequestParams searchParams, boolean substituteDefaultFacetOrder, Map<String, String[]> extraSolrParams) { SolrQuery solrQuery = new SolrQuery(); solrQuery.setQueryType("standard"); boolean rangeAdded = false; // Facets solrQuery.setFacet(searchParams.getFacet()); if (searchParams.getFacet()) { for (String facet : searchParams.getFacets()) { if (facet.equals("date") || facet.equals("decade")) { String fname = facet.equals("decade") ? OCCURRENCE_YEAR_INDEX_FIELD : "occurrence_" + facet; initDecadeBasedFacet(solrQuery, fname); } else if (facet.equals("uncertainty")) { Map<String, String> rangeMap = rangeBasedFacets.getRangeMap("uncertainty"); for (String range : rangeMap.keySet()) { solrQuery.add("facet.query", range); } } else if (facet.endsWith(RANGE_SUFFIX)) { //this facte need to have it ranges included. if (!rangeAdded) { solrQuery.add("facet.range.other", "before"); solrQuery.add("facet.range.other", "after"); } String field = facet.replaceAll(RANGE_SUFFIX, ""); StatsIndexFieldDTO details = getRangeFieldDetails(field); if (details != null) { solrQuery.addNumericRangeFacet(field, details.getStart(), details.getEnd(), details.getGap()); } } else { solrQuery.addFacetField(facet); if ("".equals(searchParams.getFsort()) && substituteDefaultFacetOrder && FacetThemes.getFacetsMap().containsKey(facet)) { //now check if the sort order is different to supplied String thisSort = FacetThemes.getFacetsMap().get(facet).getSort(); if (!searchParams.getFsort().equalsIgnoreCase(thisSort)) solrQuery.add("f." + facet + ".facet.sort", thisSort); } } } solrQuery.setFacetMinCount(1); solrQuery.setFacetLimit(searchParams.getFlimit()); //include this so that the default fsort is still obeyed. String fsort = "".equals(searchParams.getFsort()) ? "count" : searchParams.getFsort(); solrQuery.setFacetSort(fsort); if (searchParams.getFoffset() > 0) solrQuery.add("facet.offset", Integer.toString(searchParams.getFoffset())); if (StringUtils.isNotEmpty(searchParams.getFprefix())) solrQuery.add("facet.prefix", searchParams.getFprefix()); } solrQuery.setRows(10); solrQuery.setStart(0); if (searchParams.getFl().length() > 0) { solrQuery.setFields(searchParams.getFl()); } //add the extra SOLR params if (extraSolrParams != null) { //automatically include the before and after params... if (!rangeAdded) { solrQuery.add("facet.range.other", "before"); solrQuery.add("facet.range.other", "after"); } for (String key : extraSolrParams.keySet()) { String[] values = extraSolrParams.get(key); solrQuery.add(key, values); } } return solrQuery; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Returns the count of distinct values for the facets. Uses groups for group counts. * Supports foffset and flimit for paging. Supports fsort 'count' or 'index'. * <p/>//w ww .j a va 2 s .co m * TODO work out whether or not we should allow facet ranges to be downloaded.... */ public List<FacetResultDTO> getFacetCounts(SpatialSearchRequestParams searchParams) throws Exception { Collection<FacetResultDTO> facetResults = new ArrayList<FacetResultDTO>(); queryFormatUtils.formatSearchQuery(searchParams); String queryString = searchParams.getFormattedQuery(); searchParams.setFacet(false); searchParams.setPageSize(0); //get facet group counts SolrQuery query = initSolrQuery(searchParams, false, null); query.setQuery(queryString); query.setFields(null); query.setRows(0); searchParams.setPageSize(0); //exclude multivalue fields Set<IndexFieldDTO> fields = getIndexedFields(); List<String> multivalueFields = new ArrayList<String>(); for (String facet : searchParams.getFacets()) { boolean added = false; for (IndexFieldDTO f : fields) { if (f.getName().equals(facet) && !f.isMultivalue()) { query.add("group.field", facet); added = true; } } if (!added) { multivalueFields.add(facet); } } if (multivalueFields.size() > 0) { facetResults.addAll(getMultivalueFacetCounts(query, searchParams, multivalueFields)); } if (multivalueFields.size() < searchParams.getFacets().length) { //now use the supplied facets to add groups to the query // facet totals are slow, so only fetch when foffset == 0 and flimit != -1 Map<String, Integer> ngroups = new HashMap<String, Integer>(); GroupResponse groupResponse = null; if (searchParams.getFlimit() != -1 && searchParams.getFoffset() == 0) { query.add("group", "true"); query.add("group.ngroups", "true"); query.add("group.limit", "0"); QueryResponse response = runSolrQuery(query, searchParams); groupResponse = response.getGroupResponse(); for (GroupCommand gc : groupResponse.getValues()) { ngroups.put(gc.getName(), gc.getNGroups()); } } //include paged facets when flimit > 0 or flimit == -1 if (searchParams.getFlimit() != 0) { searchParams.setFacet(true); SolrQuery facetQuery = initSolrQuery(searchParams, false, null); facetQuery.setQuery(queryString); facetQuery.setFields(null); facetQuery.setSortField(searchParams.getSort(), ORDER.valueOf(searchParams.getDir())); QueryResponse qr = runSolrQuery(facetQuery, searchParams); SearchResultDTO searchResults = processSolrResponse(searchParams, qr, facetQuery, OccurrenceIndex.class); facetResults = searchResults.getFacetResults(); if (facetResults != null) { for (FacetResultDTO fr : facetResults) { if (searchParams.getFlimit() == -1) { fr.setCount(fr.getFieldResult().size()); } else { Integer count = ngroups.get(fr.getFieldName()); if (count != null) fr.setCount(count); } } } } else if (groupResponse != null) { //only return group counts for (GroupCommand gc : groupResponse.getValues()) { facetResults.add(new FacetResultDTO(gc.getName(), null, gc.getNGroups())); } } } return new ArrayList<FacetResultDTO>(facetResults); }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * @see au.org.ala.biocache.dao.SearchDAO#findByFulltext(SpatialSearchRequestParams) *///from www . ja v a2 s .c om @Override public SolrDocumentList findByFulltext(SpatialSearchRequestParams searchParams) throws Exception { SolrDocumentList sdl = null; try { queryFormatUtils.formatSearchQuery(searchParams); String queryString = searchParams.getFormattedQuery(); SolrQuery solrQuery = new SolrQuery(); solrQuery.setQuery(queryString); solrQuery.setFields(searchParams.getFl()); solrQuery.setFacet(false); solrQuery.setRows(searchParams.getPageSize()); sdl = runSolrQuery(solrQuery, searchParams).getResults(); } catch (SolrServerException ex) { logger.error("Problem communicating with SOLR server. " + ex.getMessage(), ex); } return sdl; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * Perform grouped facet query./*www.j ava 2 s . com*/ * <p> * facets is the list of grouped facets required * flimit restricts the number of groups returned * pageSize restricts the number of docs in each group returned * fl is the list of fields in the returned docs */ public List<GroupFacetResultDTO> searchGroupedFacets(SpatialSearchRequestParams searchParams) throws Exception { queryFormatUtils.formatSearchQuery(searchParams); String queryString = searchParams.getFormattedQuery(); searchParams.setFacet(false); //get facet group counts SolrQuery query = initSolrQuery(searchParams, false, null); query.setQuery(queryString); query.setFields(null); //now use the supplied facets to add groups to the query query.add("group", "true"); query.add("group.ngroups", "true"); query.add("group.limit", String.valueOf(searchParams.getPageSize())); query.setRows(searchParams.getFlimit()); query.setFields(searchParams.getFl()); for (String facet : searchParams.getFacets()) { query.add("group.field", facet); } QueryResponse response = runSolrQuery(query, searchParams); GroupResponse groupResponse = response.getGroupResponse(); List<GroupFacetResultDTO> output = new ArrayList(); for (GroupCommand gc : groupResponse.getValues()) { List<GroupFieldResultDTO> list = new ArrayList<GroupFieldResultDTO>(); String facet = gc.getName(); for (Group v : gc.getValues()) { List<OccurrenceIndex> docs = (new DocumentObjectBinder()).getBeans(OccurrenceIndex.class, v.getResult()); //build facet displayName and fq String value = v.getGroupValue(); Long count = v.getResult() != null ? v.getResult().getNumFound() : 0L; if (value == null) { list.add(new GroupFieldResultDTO("", count, "-" + facet + ":*", docs)); } else { list.add(new GroupFieldResultDTO(getFacetValueDisplayName(facet, value), count, facet + ":\"" + value + "\"", docs)); } } output.add(new GroupFacetResultDTO(gc.getName(), list, gc.getNGroups())); } return output; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * @see au.org.ala.biocache.dao.SearchDAO#searchPivot(au.org.ala.biocache.dto.SpatialSearchRequestParams) *//* w w w .j a v a 2 s. c om*/ public List<FacetPivotResultDTO> searchPivot(SpatialSearchRequestParams searchParams) throws Exception { String pivot = StringUtils.join(searchParams.getFacets(), ","); searchParams.setFacets(new String[] {}); queryFormatUtils.formatSearchQuery(searchParams); String queryString = searchParams.getFormattedQuery(); searchParams.setFacet(true); //get facet group counts SolrQuery query = initSolrQuery(searchParams, false, null); query.setQuery(queryString); query.setFields(null); //now use the supplied facets to add groups to the query query.add("facet.pivot", pivot); query.add("facet.pivot.mincount", "1"); query.add("facet.missing", "true"); query.setRows(0); searchParams.setPageSize(0); QueryResponse response = runSolrQuery(query, searchParams); NamedList<List<PivotField>> result = response.getFacetPivot(); List<FacetPivotResultDTO> output = new ArrayList(); for (Entry<String, List<PivotField>> pfl : result) { List<PivotField> list = pfl.getValue(); if (list != null && list.size() > 0) { output.add(new FacetPivotResultDTO(list.get(0).getField(), getFacetPivotResults(list), null, (int) response.getResults().getNumFound())); } //should only be one result break; } return output; }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
License:Open Source License
/** * @see au.org.ala.biocache.dao.SearchDAO#searchStat *///from ww w .j a v a 2 s .c om public List<FieldStatsItem> searchStat(SpatialSearchRequestParams searchParams, String field, String facet) throws Exception { searchParams.setFacets(new String[] {}); queryFormatUtils.formatSearchQuery(searchParams); String queryString = searchParams.getFormattedQuery(); if (facet != null) searchParams.setFacet(true); //get facet group counts SolrQuery query = initSolrQuery(searchParams, false, null); query.setQuery(queryString); query.setFields(null); //query.setFacetLimit(-1); //stats parameters query.add("stats", "true"); if (facet != null) query.add("stats.facet", facet); query.add("stats.field", field); query.setRows(0); searchParams.setPageSize(0); QueryResponse response = runSolrQuery(query, searchParams); List<FieldStatsItem> output = new ArrayList(); if (facet != null && response.getFieldStatsInfo().size() > 0) { for (FieldStatsInfo f : response.getFieldStatsInfo().values().iterator().next().getFacets().values() .iterator().next()) { FieldStatsItem item = new FieldStatsItem(f); if (f.getName() == null) { item.setFq("-" + facet + ":*"); } else { item.setFq(facet + ":\"" + f.getName() + "\""); } item.setLabel(f.getName()); output.add(item); } } else { if (response.getFieldStatsInfo().size() > 0) { output.add(new FieldStatsItem(response.getFieldStatsInfo().values().iterator().next())); } } return output; }
From source file:au.org.intersect.dms.catalogue.db.SolrIndexFacade.java
License:Open Source License
/** * Returns all datasets owned by the user matching specified full text search query * /*from w ww . ja va 2 s.co m*/ * @param username * username of the owner in DMS system * @param userProjects * list of project codes from booking system this username belongs to * @param query * full text search query * @param startIndex * index of the first dataset to display on the curect page * @param pageSize * max number of datasets to display on each page * @return datasets of this user (one page) */ public DatasetSearchResult findDatasets(String username, List<Long> projects, String query, int startIndex, int pageSize) { SolrQuery solrQuery = new SolrQuery(); solrQuery.setFields(ID); solrQuery.setStart(startIndex); solrQuery.setRows(pageSize); solrQuery.setSortField(ID, ORDER.asc); StringBuilder queryString = new StringBuilder(); if (query == null || "".equals(query)) { queryString.append("dataset.metadata_t:*"); } else { queryString.append(query); } queryString.append(" AND (dataset.owner_s:").append(username); String projectQuery = buildProjectCriteria(projects); if (!projectQuery.isEmpty()) { queryString.append(" OR ").append(projectQuery); } queryString.append(")"); solrQuery.setQuery(queryString.toString()); QueryResponse solrResponse = DbDataset.search(solrQuery); SolrDocumentList docs = solrResponse.getResults(); List<Dataset> datasets = convertSolrDocuments2Datasets(docs); DatasetSearchResult result = new DatasetSearchResult(); result.setDatasets(datasets); result.setTotalSize(docs != null ? docs.getNumFound() : 0); return result; }
From source file:bamboo.trove.rule.RuleChangeUpdateManager.java
License:Apache License
private SolrQuery createQuery(String query) { SolrQuery q = new SolrQuery("*:*"); // TODO: Should we add a request handler to the solr cluster to get metrics // on the volume and/or performance of these searches in their own bucket? q.setFilterQueries(query);/*from www .j a va2s . c o m*/ q.setFields(SOLR_FIELDS); q.setSort(SortClause.asc(SolrEnum.ID.toString())); q.setRows(solrReadSize); return q; }