List of usage examples for org.apache.solr.schema SchemaField getName
public String getName()
From source file:com.github.le11.nls.solr.UIMAAsyncUpdateRequestProcessor.java
License:Apache License
@Override public void processAdd(AddUpdateCommand cmd) throws IOException { String text = null;//from w ww. j a v a 2 s. co m try { /* get Solr document */ SolrInputDocument solrInputDocument = cmd.getSolrInputDocument(); /* get the fields to analyze */ String[] texts = getTextsToAnalyze(solrInputDocument); for (int i = 0; i < texts.length; i++) { text = texts[i]; if (text != null && text.length() > 0) { /* process the text value */ JCas jcas = UIMAAnalyzersUtils.getInstance() .analyzeAsynchronously(new StringReader(text), solrUIMAConfiguration.getAePath()) .getJCas(); UIMAToSolrMapper uimaToSolrMapper = new UIMAToSolrMapper(solrInputDocument, jcas); /* get field mapping from config */ /* map type features on fields */ for (String typeFQN : solrUIMAConfiguration.getTypesFeaturesFieldsMapping().keySet()) { uimaToSolrMapper.map(typeFQN, solrUIMAConfiguration.getTypesFeaturesFieldsMapping().get(typeFQN)); } } } } catch (Exception e) { String logField = solrUIMAConfiguration.getLogField(); if (logField == null) { SchemaField uniqueKeyField = solrCore.getSchema().getUniqueKeyField(); if (uniqueKeyField != null) { logField = uniqueKeyField.getName(); } } String optionalFieldInfo = logField == null ? "." : new StringBuilder(". ").append(logField).append("=") .append((String) cmd.getSolrInputDocument().getField(logField).getValue()).append(", ") .toString(); int len = Math.min(text.length(), 100); if (solrUIMAConfiguration.isIgnoreErrors()) { log.warn(new StringBuilder("skip the text processing due to ").append(e.getLocalizedMessage()) .append(optionalFieldInfo).append(" text=\"").append(text.substring(0, len)).append("...\"") .toString()); } else { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, new StringBuilder("processing error: ").append(e.getLocalizedMessage()) .append(optionalFieldInfo).append(" text=\"").append(text.substring(0, len)) .append("...\"").toString(), e); } } super.processAdd(cmd); }
From source file:com.grantingersoll.intell.clustering.KMeansClusteringEngine.java
License:Apache License
private void cluster(SolrIndexSearcher searcher, int k) { log.info("Clustering"); //go and do the clustering. First, we need to export the fields SchemaField keyField = searcher.getSchema().getUniqueKeyField(); //TODO: should we prevent overlaps here if there are too many commits? Clustering isn't something that has to be fresh all the time // and we likely can't sustain that anyway. if (keyField != null) {//we must have a key field //do this part synchronously here, and then spawn off a thread to do the clustering, otherwise it will take too long String idName = keyField.getName(); Weight weight = new TFIDF(); SolrIndexReader reader = searcher.getReader(); try {//from w w w. jav a 2 s .co m TermInfo termInfo = new CachedTermInfo(reader, "content", 1, 100); LuceneIterable li = new LuceneIterable(reader, idName, inputField, new TFDFMapper(reader, weight, termInfo)); Date now = new Date(); String jobDir = clusterBaseDir.getAbsolutePath() + File.separator + "clusters-" + now.getTime(); log.info("Dumping {} to {}", inputField, clusterBaseDir); File outFile = new File(jobDir, "index-" + inputField + ".vec"); VectorWriter vectorWriter = getSeqFileWriter(outFile.getAbsolutePath()); long numDocs = vectorWriter.write(li, Integer.MAX_VALUE); vectorWriter.close(); log.info("Wrote: {} vectors", numDocs); File dictOutFile = new File(jobDir, "dict-" + inputField + ".txt"); log.info("Dictionary Output file: {}", dictOutFile); BufferedWriter writer = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(dictOutFile), Charset.forName("UTF8"))); JWriterTermInfoWriter tiWriter = new JWriterTermInfoWriter(writer, "\t", inputField); tiWriter.write(termInfo); tiWriter.close(); writer.close(); //OK, the dictionary is dumped, now we can cluster, do this via a thread in the background. //when it's done, we can switch to it ClusterJob clusterJob = new ClusterJob(k, jobDir, new Path(outFile.getAbsolutePath()), new Path(jobDir + File.separator + "clusters"), new Path(jobDir + File.separator + "output"), new Path(dictOutFile.getAbsolutePath())); writeJobDetails(clusterJob); theFuture = execService.submit(new ClusterCallable(clusterJob)); } catch (IOException e) { log.error("Exception", e); } } }
From source file:com.o19s.solr.swan.highlight.SwanHighlighter.java
License:Apache License
/** * Generates a list of Highlighted query fragments for each item in a list * of documents, or returns null if highlighting is disabled. * * @param docs query results// w w w. ja va2 s. c o m * @param query the query * @param req the current request * @param defaultFields default list of fields to summarize * * @return NamedList containing a NamedList for each document, which in * turns contains sets (field, summary) pairs. */ @Override @SuppressWarnings("unchecked") public NamedList<Object> doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException { NamedList fragments = new SimpleOrderedMap(); SolrParams params = req.getParams(); if (!isHighlightingEnabled(params)) return null; SolrIndexSearcher searcher = req.getSearcher(); IndexSchema schema = searcher.getSchema(); String[] fieldNames = getHighlightFields(query, req, defaultFields); Set<String> fset = new HashSet<String>(); { // pre-fetch documents using the Searcher's doc cache Collections.addAll(fset, fieldNames); // fetch unique key if one exists. SchemaField keyField = schema.getUniqueKeyField(); if (null != keyField) fset.add(keyField.getName()); } //CHANGE start // int[] docIds = new int[docs.swordize()]; TreeSet<Integer> docIds = new TreeSet<Integer>(); DocIterator iterator = docs.iterator(); for (int i = 0; i < docs.size(); i++) { docIds.add(iterator.nextDoc()); } // Get Frag list builder String fragListBuilderString = params.get(HighlightParams.FRAG_LIST_BUILDER).toLowerCase(); FragListBuilder fragListBuilder; if (fragListBuilderString.equals("single")) { fragListBuilder = new SingleFragListBuilder(); } else { fragListBuilder = new com.o19s.solr.swan.highlight.SimpleFragListBuilder(); } // get FastVectorHighlighter instance out of the processing loop SpanAwareFastVectorHighlighter safvh = new SpanAwareFastVectorHighlighter( // FVH cannot process hl.usePhraseHighlighter parameter per-field basis params.getBool(HighlightParams.USE_PHRASE_HIGHLIGHTER, true), // FVH cannot process hl.requireFieldMatch parameter per-field basis params.getBool(HighlightParams.FIELD_MATCH, false), fragListBuilder, //new com.o19s.solr.swan.highlight.ScoreOrderFragmentsBuilder(), new WordHashFragmentsBuilder(), // List of docIds to filter spans docIds); safvh.setPhraseLimit(params.getInt(HighlightParams.PHRASE_LIMIT, Integer.MAX_VALUE)); SpanAwareFieldQuery fieldQuery = safvh.getFieldQuery(query, searcher.getIndexReader(), docIds); // Highlight each document for (int docId : docIds) { Document doc = searcher.doc(docId, fset); NamedList docSummaries = new SimpleOrderedMap(); for (String fieldName : fieldNames) { fieldName = fieldName.trim(); if (useFastVectorHighlighter(params, schema, fieldName)) doHighlightingByFastVectorHighlighter(safvh, fieldQuery, req, docSummaries, docId, doc, fieldName); else doHighlightingByHighlighter(query, req, docSummaries, docId, doc, fieldName); } String printId = schema.printableUniqueKey(doc); fragments.add(printId == null ? null : printId, docSummaries); } //CHANGE end return fragments; }
From source file:com.search.MySearchHandler.java
License:Apache License
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { // int sleep = req.getParams().getInt("sleep",0); // if (sleep > 0) {log.error("SLEEPING for " + sleep); // Thread.sleep(sleep);} /*** Pre-processing of the Query by REGEX starts here -------------- ***/ SolrParams originalParams = req.getOriginalParams(); SolrParams psuedoParams = req.getParams(); // These psuedoParams keep // changing//www . j a v a 2s . c om if (originalParams.get(CommonParams.Q) != null) { String finalQuery; String originalQuery = originalParams.get(CommonParams.Q); rsp.add("Original query", originalQuery); SchemaField keyField = null; keyField = req.getCore().getLatestSchema().getUniqueKeyField(); if (keyField != null) { fieldSet.add(keyField.getName()); } /*** * START CODE TO PARSE QUERY * * Arafath's code to prepare query starts here The query should be * in the following format -> * * * * Example : Original Query: * "Which musical object did russ conway play" Temporary Query : * "relations:instrument AND entity:enty" // Generate the relation * Final Query : "name:"russ conway" AND occupation:musician" */ ParsedQuestion parsedq = new ParsedQuestion(); parsedq = parseQues(originalQuery); if (parsedq != null) { System.out.println(parsedq); Map<Integer, String> relationstr = getRelation(parsedq.getRelationKeyWord(), parsedq.getWhtype(), req); /** * END CODE TO PARSE QUERY */ /*** Final Phase starts here ***/ finalQuery = "title:\"" + parsedq.getSearchName() + "\""; NamedList finalparamsList = req.getParams().toNamedList(); finalparamsList.setVal(finalparamsList.indexOf(CommonParams.Q, 0), finalQuery); psuedoParams = SolrParams.toSolrParams(finalparamsList); if (psuedoParams.get(CommonParams.SORT) == null) { finalparamsList.add(CommonParams.SORT, "score desc"); } else { finalparamsList.setVal(finalparamsList.indexOf(CommonParams.SORT, 0), "score desc"); } int documentsRetrieved = 0; if (relationstr != null) { rsp.add("total relations retrieved", relationstr.size()); rsp.add("relations", relationstr); NamedList finaldocresults = new NamedList(); NamedList forwarddocresults = new NamedList(); Set<String> checkDocuments = new HashSet<String>(); for (int i = 0; i < relationstr.size() && (documentsRetrieved < 10); i++) { NamedList relationdocresults = new NamedList(); String desiredField = relationstr.get(i); Set<String> tempFieldSet = new HashSet<String>(); int docsRetrievedforThisRelation = 0; tempFieldSet.add(desiredField); psuedoParams = SolrParams.toSolrParams(finalparamsList); if (psuedoParams.get(CommonParams.FL) == null) { finalparamsList.add(CommonParams.FL, desiredField); } else { finalparamsList.setVal(finalparamsList.indexOf(CommonParams.FL, 0), desiredField); } SolrQueryRequest finalreq = new LocalSolrQueryRequest(req.getCore(), finalparamsList); rsp.add("Final Query", finalreq.getParams().get(CommonParams.Q)); SolrQueryResponse finalrsp = new SolrQueryResponse(); ResponseBuilder finalrb = new ResponseBuilder(finalreq, finalrsp, components); for (SearchComponent c : components) { c.prepare(finalrb); c.process(finalrb); } DocList finaldocs = finalrb.getResults().docList; if (finaldocs == null || finaldocs.size() == 0) { log.debug("No results"); // support for reverse query } else { DocIterator finaliterator = finaldocs.iterator(); Document finaldoc; for (int j = 0; j < finaldocs.size(); j++) { try { if (finaliterator.hasNext()) { int finaldocid = finaliterator.nextDoc(); finaldoc = finalrb.req.getSearcher().doc(finaldocid, tempFieldSet); if (!checkDocuments.contains(finaldoc.get("id"))) { if (finaldoc.get(desiredField) != null) { checkDocuments.add(finaldoc.get("id")); docsRetrievedforThisRelation++; documentsRetrieved++; relationdocresults.add(finaldoc.get("title"), finaldoc); if (documentsRetrieved >= 10) { break; } } } } } catch (IOException ex) { java.util.logging.Logger.getLogger(MySearchHandler.class.getName()) .log(Level.SEVERE, null, ex); } } if (docsRetrievedforThisRelation > 0) { rsp.add("docs retrieved for : " + desiredField, docsRetrievedforThisRelation); forwarddocresults.add(desiredField, relationdocresults); } } finalreq.close(); if (documentsRetrieved > 0) { rsp.add("type", "forward"); rsp.add("final results", forwarddocresults); } } if (documentsRetrieved == 0) { NamedList reversedocresults = new NamedList(); relationstr = getRelationReverse(parsedq.getRelationKeyWord(), req); System.out.println(relationstr); StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_46); String reversequery = ""; for (int i = 0; i < relationstr.size(); i++) { QueryParser relationsparser = new QueryParser(Version.LUCENE_46, relationstr.get(i), analyzer); try { reversequery += relationsparser.parse(parsedq.getSearchName()).toString() + " "; } catch (ParseException e) { e.printStackTrace(); } } QueryParser relationsparser = new QueryParser(Version.LUCENE_46, "infotype", analyzer); reversequery += relationsparser.parse(parsedq.getWhtype().firstKey().toLowerCase()); NamedList reverseList = req.getParams().toNamedList(); psuedoParams = SolrParams.toSolrParams(reverseList); reverseList.setVal(reverseList.indexOf(CommonParams.Q, 0), reversequery); SolrQueryRequest reversereq = new LocalSolrQueryRequest(req.getCore(), reverseList); SolrQueryResponse reversersp = new SolrQueryResponse(); ResponseBuilder reverserb = new ResponseBuilder(reversereq, reversersp, components); for (SearchComponent c : components) { try { c.prepare(reverserb); c.process(reverserb); } catch (IOException ex) { java.util.logging.Logger.getLogger(MySearchHandler.class.getName()) .log(Level.SEVERE, null, ex); } } DocList reversedocs = reverserb.getResults().docList; if (reversedocs == null || reversedocs.size() == 0) { log.debug("No results"); // GET SECOND entry from WHTYPE .. search with that .. } else { // NamedList docresults = new NamedList(); DocIterator reverseiterator = reversedocs.iterator(); Document reversedoc; int docScore = 0; for (int m = 0; m < reversedocs.size(); m++) { try { int reversedocid = reverseiterator.nextDoc(); reversedoc = reverserb.req.getSearcher().doc(reversedocid, fieldSet); if (reversedoc.get("title") != null) { documentsRetrieved++; reversedocresults.add(reversedoc.get("title"), reversedoc); if (documentsRetrieved >= 10) { break; } } } catch (IOException ex) { java.util.logging.Logger.getLogger(MySearchHandler.class.getName()) .log(Level.SEVERE, null, ex); } } } if (documentsRetrieved == 0) { rsp.add("message", "No Results found. Try another query!"); } else { rsp.add("type", "reverse"); rsp.add("final results", reversedocresults); } reversereq.close(); } } else { if (documentsRetrieved == 0) { rsp.add("message", "No Results found. Please rephrase the query!"); } } } else { rsp.add("message", "This is not a valid query!"); } } else { rsp.add("message", "User should provide at least one word as a query!"); } }
From source file:com.searchbox.solr.SenseLikeThisHandler.java
License:Apache License
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { NamedList<Object> timinginfo = new NamedList<Object>(); numRequests++;/*from w w w . ja v a2s. co m*/ long startTime = System.currentTimeMillis(); long lstartTime = System.currentTimeMillis(); if (!keystate) { LOGGER.error( "License key failure, not performing sense query. Please email contact@searchbox.com for more information."); return; } boolean fromcache = false; try { SolrParams params = req.getParams(); int start = params.getInt(CommonParams.START, 0); int rows = params.getInt(CommonParams.ROWS, 10); HashSet<String> toIgnore = (new HashSet<String>()); toIgnore.add("start"); toIgnore.add("rows"); toIgnore.add("fl"); toIgnore.add("wt"); toIgnore.add("indent"); SolrCacheKey key = new SolrCacheKey(params, toIgnore); // Set field flags ReturnFields returnFields = new SolrReturnFields(req); rsp.setReturnFields(returnFields); int flags = 0; if (returnFields.wantsScore()) { flags |= SolrIndexSearcher.GET_SCORES; } String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE); String q = params.get(CommonParams.Q); Query query = null; QueryReductionFilter qr = null; SortSpec sortSpec = null; List<Query> filters = new ArrayList<Query>(); try { if (q != null) { QParser parser = QParser.getParser(q, defType, req); query = parser.getQuery(); sortSpec = parser.getSort(true); } String[] fqs = req.getParams().getParams(CommonParams.FQ); if (fqs != null && fqs.length != 0) { for (String fq : fqs) { if (fq != null && fq.trim().length() != 0) { QParser fqp = QParser.getParser(fq, null, req); filters.add(fqp.getQuery()); } } } } catch (Exception e) { numErrors++; throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); } timinginfo.add("Parse Query time", System.currentTimeMillis() - lstartTime); LOGGER.debug("Parsed Query Time:\t" + (System.currentTimeMillis() - lstartTime)); lstartTime = System.currentTimeMillis(); SolrIndexSearcher searcher = req.getSearcher(); SchemaField uniqueKeyField = searcher.getSchema().getUniqueKeyField(); // Parse Required Params // This will either have a single Reader or valid query // Find documents SenseLikeThis - either with a reader or a query // -------------------------------------------------------------------------------- SenseQuery slt = null; if (q == null) { numErrors++; throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SenseLikeThis requires either a query (?q=) or text to find similar documents."); } // Matching options boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true); int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0); // Find the base match DocList match = searcher.getDocList(query, null, null, matchOffset, 1, flags); // only get the first one... if (includeMatch) { rsp.add("match", match); } DocIterator iterator = match.iterator(); if (!iterator.hasNext()) { numErrors++; throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SenseLikeThis no document found matching request."); } int id = iterator.nextDoc(); timinginfo.add("Find Query Doc", System.currentTimeMillis() - lstartTime); LOGGER.debug("Find Query Doc:\t" + (System.currentTimeMillis() - lstartTime)); lstartTime = System.currentTimeMillis(); SolrCache sc = searcher.getCache("com.searchbox.sltcache"); DocListAndSet sltDocs = null; if (sc != null) { //try to get from cache sltDocs = (DocListAndSet) sc.get(key.getSet()); } else { LOGGER.error("com.searchbox.sltcache not defined, can't cache slt queries"); } sltDocs = (DocListAndSet) sc.get(key.getSet()); if (start + rows > 1000 || sltDocs == null || !params.getBool(CommonParams.CACHE, true)) { //not in cache, need to do search BooleanQuery bq = new BooleanQuery(); Document doc = searcher.getIndexReader().document(id); bq.add(new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), BooleanClause.Occur.MUST_NOT); filters.add(bq); String[] senseFields = splitList .split(params.get(SenseParams.SENSE_FIELD, SenseParams.DEFAULT_SENSE_FIELD)); String senseField = (senseFields[0] != null) ? senseFields[0] : SenseParams.DEFAULT_SENSE_FIELD; //TODO more intelligent handling of multiple fields , can probably do a boolean junction of multiple sensequeries, but this will be slow long maxlength = -1; for (String possibleField : senseFields) { try { long flength = doc.getField(possibleField).stringValue().length(); if (flength > maxlength) { senseField = possibleField; maxlength = flength; } } catch (Exception e) { System.out.println("Error: " + e.getMessage()); } } LOGGER.debug("Using sense field :\t" + (senseField)); String CKBid = params.get(SenseParams.SENSE_CKB, SenseParams.SENSE_CKB_DEFAULT); RealTermFreqVector rtv = new RealTermFreqVector(id, searcher.getIndexReader(), senseField); timinginfo.add("Make real term freq vector", System.currentTimeMillis() - lstartTime); lstartTime = System.currentTimeMillis(); qr = new QueryReductionFilter(rtv, CKBid, searcher, senseField); qr.setNumtermstouse(params.getInt(SenseParams.SENSE_QR_NTU, SenseParams.SENSE_QR_NTU_DEFAULT)); qr.setThreshold(params.getInt(SenseParams.SENSE_QR_THRESH, SenseParams.SENSE_QR_THRESH_DEFAULT)); qr.setMaxDocSubSet(params.getInt(SenseParams.SENSE_QR_MAXDOC, SenseParams.SENSE_QR_MAXDOC_DEFAULT)); qr.setMinDocSetSizeForFilter( params.getInt(SenseParams.SENSE_MINDOC4QR, SenseParams.SENSE_MINDOC4QR_DEFAULT)); numTermsUsed += qr.getNumtermstouse(); numTermsConsidered += rtv.getSize(); timinginfo.add("Setup SLT query", System.currentTimeMillis() - lstartTime); LOGGER.debug("Setup SLT query:\t" + (System.currentTimeMillis() - lstartTime)); lstartTime = System.currentTimeMillis(); DocList subFiltered = qr.getSubSetToSearchIn(filters); timinginfo.add("Do Query Redux", System.currentTimeMillis() - lstartTime); LOGGER.debug("Do query redux:\t" + (System.currentTimeMillis() - lstartTime)); lstartTime = System.currentTimeMillis(); numFiltered += qr.getFiltered().docList.size(); numSubset += subFiltered.size(); LOGGER.info("Number of documents to search:\t" + subFiltered.size()); slt = new SenseQuery(rtv, senseField, CKBid, params.getFloat(SenseParams.SENSE_WEIGHT, SenseParams.DEFAULT_SENSE_WEIGHT), null); LOGGER.debug("Setup sense query:\t" + (System.currentTimeMillis() - lstartTime)); timinginfo.add("Setup sense query", System.currentTimeMillis() - lstartTime); lstartTime = System.currentTimeMillis(); sltDocs = searcher.getDocListAndSet(slt, subFiltered, Sort.RELEVANCE, 0, 1000, flags); timinginfo.add("Do sense query", System.currentTimeMillis() - lstartTime); lstartTime = System.currentTimeMillis(); LOGGER.debug("Adding this keyto cache:\t" + key.getSet().toString()); searcher.getCache("com.searchbox.sltcache").put(key.getSet(), sltDocs); } else { fromcache = true; timinginfo.add("Getting from cache", System.currentTimeMillis() - lstartTime); LOGGER.debug("Got result from cache"); lstartTime = System.currentTimeMillis(); } if (sltDocs == null) { numEmpty++; sltDocs = new DocListAndSet(); // avoid NPE } rsp.add("response", sltDocs.docList.subset(start, rows)); // maybe facet the results if (params.getBool(FacetParams.FACET, false)) { if (sltDocs.docSet == null) { rsp.add("facet_counts", null); } else { SimpleFacets f = new SimpleFacets(req, sltDocs.docSet, params); rsp.add("facet_counts", f.getFacetCounts()); } } timinginfo.add("Facet parts", System.currentTimeMillis() - lstartTime); LOGGER.debug("Facet parts:\t" + (System.currentTimeMillis() - lstartTime)); // Debug info, not doing it for the moment. boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false); boolean dbgQuery = false, dbgResults = false; if (dbg == false) {//if it's true, we are doing everything anyway. String[] dbgParams = req.getParams().getParams(CommonParams.DEBUG); if (dbgParams != null) { for (int i = 0; i < dbgParams.length; i++) { if (dbgParams[i].equals(CommonParams.QUERY)) { dbgQuery = true; } else if (dbgParams[i].equals(CommonParams.RESULTS)) { dbgResults = true; } } } } else { dbgQuery = true; dbgResults = true; } // Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug? if (dbg == true) { try { lstartTime = System.currentTimeMillis(); NamedList<Object> dbgInfo = SolrPluginUtils.doStandardDebug(req, q, slt, sltDocs.docList.subset(start, rows), dbgQuery, dbgResults); dbgInfo.add("Query freqs", slt.getAllTermsasString()); if (null != dbgInfo) { if (null != filters) { dbgInfo.add("filter_queries", req.getParams().getParams(CommonParams.FQ)); List<String> fqs = new ArrayList<String>(filters.size()); for (Query fq : filters) { fqs.add(QueryParsing.toString(fq, req.getSchema())); } dbgInfo.add("parsed_filter_queries", fqs); } if (null != qr) { dbgInfo.add("QueryReduction", qr.getDbgInfo()); } if (null != slt) { dbgInfo.add("SLT", slt.getDbgInfo()); } dbgInfo.add("fromcache", fromcache); rsp.add("debug", dbgInfo); timinginfo.add("Debugging parts", System.currentTimeMillis() - lstartTime); dbgInfo.add("timings", timinginfo); } } catch (Exception e) { SolrException.log(SolrCore.log, "Exception during debug", e); rsp.add("exception_during_debug", SolrException.toStr(e)); } } } catch (Exception e) { numErrors++; e.printStackTrace(); } finally { totalTime += System.currentTimeMillis() - startTime; } }
From source file:com.searchbox.solr.SenseLikeThisHandlerNoReduction.java
License:Apache License
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { SolrParams params = req.getParams(); if (!keystate) { LOGGER.error(/*from w w w . j a va 2s. c o m*/ "License key failure, not performing sense query. Please email contact@searchbox.com for more information."); return; } int docID; // Set field flags ReturnFields returnFields = new SolrReturnFields(req); rsp.setReturnFields(returnFields); int flags = 0; if (returnFields.wantsScore()) { flags |= SolrIndexSearcher.GET_SCORES; } String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE); String q = params.get(CommonParams.Q); Query query = null; SortSpec sortSpec = null; List<Query> filters = new ArrayList<Query>(); try { if (q != null) { QParser parser = QParser.getParser(q, defType, req); query = parser.getQuery(); sortSpec = parser.getSort(true); } String[] fqs = req.getParams().getParams(CommonParams.FQ); if (fqs != null && fqs.length != 0) { for (String fq : fqs) { if (fq != null && fq.trim().length() != 0) { QParser fqp = QParser.getParser(fq, null, req); filters.add(fqp.getQuery()); } } } } catch (Exception e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); } SolrIndexSearcher searcher = req.getSearcher(); SchemaField uniqueKeyField = searcher.getSchema().getUniqueKeyField(); DocListAndSet sltDocs = null; // Parse Required Params // This will either have a single Reader or valid query Reader reader = null; try { if (q == null || q.trim().length() < 1) { Iterable<ContentStream> streams = req.getContentStreams(); if (streams != null) { Iterator<ContentStream> iter = streams.iterator(); if (iter.hasNext()) { reader = iter.next().getReader(); } if (iter.hasNext()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SenseLikeThis does not support multiple ContentStreams"); } } } int start = params.getInt(CommonParams.START, 0); int rows = params.getInt(CommonParams.ROWS, 10); // Find documents SenseLikeThis - either with a reader or a query // -------------------------------------------------------------------------------- SenseQuery slt = null; if (reader != null) { throw new RuntimeException("SLT based on a reader is not yet implemented"); } else if (q != null) { // Matching options boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true); int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0); // Find the base match DocList match = searcher.getDocList(query, null, null, matchOffset, 1, flags); // only get the first one... if (includeMatch) { rsp.add("match", match); } // Get docID DocIterator iterator = match.iterator(); docID = iterator.nextDoc(); BooleanQuery bq = new BooleanQuery(); Document doc = searcher.getIndexReader().document(docID); bq.add(new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), BooleanClause.Occur.MUST_NOT); filters.add(bq); } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SenseLikeThis requires either a query (?q=) or text to find similar documents."); } String CKBid = params.get(SenseParams.SENSE_CKB, SenseParams.SENSE_CKB_DEFAULT); String senseField = params.get(SenseParams.SENSE_FIELD, SenseParams.DEFAULT_SENSE_FIELD); slt = new SenseQuery(new RealTermFreqVector(docID, searcher.getIndexReader(), senseField), senseField, CKBid, params.getFloat(SenseParams.SENSE_WEIGHT, SenseParams.DEFAULT_SENSE_WEIGHT), null); //Execute the SLT query //DocSet filtered = searcher.getDocSet(filters); //System.out.println("Number of documents to search:\t" + filtered.size()); //sltDocs = searcher.getDocListAndSet(slt, filtered, Sort.RELEVANCE, start, rows, flags); sltDocs = searcher.getDocListAndSet(slt, filters, Sort.RELEVANCE, start, rows, flags); } finally { if (reader != null) { reader.close(); } } if (sltDocs == null) { sltDocs = new DocListAndSet(); // avoid NPE } rsp.add("response", sltDocs.docList); // maybe facet the results if (params.getBool(FacetParams.FACET, false)) { if (sltDocs.docSet == null) { rsp.add("facet_counts", null); } else { SimpleFacets f = new SimpleFacets(req, sltDocs.docSet, params); rsp.add("facet_counts", f.getFacetCounts()); } } // Debug info, not doing it for the moment. boolean dbg = req.getParams().getBool(CommonParams.DEBUG_QUERY, false); boolean dbgQuery = false, dbgResults = false; if (dbg == false) {//if it's true, we are doing everything anyway. String[] dbgParams = req.getParams().getParams(CommonParams.DEBUG); if (dbgParams != null) { for (int i = 0; i < dbgParams.length; i++) { if (dbgParams[i].equals(CommonParams.QUERY)) { dbgQuery = true; } else if (dbgParams[i].equals(CommonParams.RESULTS)) { dbgResults = true; } } } } else { dbgQuery = true; dbgResults = true; } // Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug? if (dbg == true) { try { NamedList<Object> dbgInfo = SolrPluginUtils.doStandardDebug(req, q, query, sltDocs.docList, dbgQuery, dbgResults); if (null != dbgInfo) { if (null != filters) { dbgInfo.add("filter_queries", req.getParams().getParams(CommonParams.FQ)); List<String> fqs = new ArrayList<String>(filters.size()); for (Query fq : filters) { fqs.add(QueryParsing.toString(fq, req.getSchema())); } dbgInfo.add("parsed_filter_queries", fqs); } rsp.add("debug", dbgInfo); } } catch (Exception e) { SolrException.log(SolrCore.log, "Exception during debug", e); rsp.add("exception_during_debug", SolrException.toStr(e)); } } }
From source file:com.searchbox.TaggerComponent.java
License:Apache License
private NamedList doDocuments(ResponseBuilder rb, SolrParams params, SolrIndexSearcher searcher, int lcount) { /*-----------------*/ String[] localfields = params.getParams(TaggerComponentParams.QUERY_FIELDS); String[] fields = null;/* w w w. ja v a 2 s . c om*/ if (gfields != null) { fields = gfields; } if (localfields != null) { fields = localfields; } if (fields == null) { LOGGER.error("Fields aren't defined, not performing tagging."); return null; } DocList docs = rb.getResults().docList; if (docs == null || docs.size() == 0) { LOGGER.debug("No results"); } LOGGER.debug("Doing This many docs:\t" + docs.size()); Set<String> fset = new HashSet<String>(); SchemaField keyField = rb.req.getCore().getSchema().getUniqueKeyField(); if (null != keyField) { fset.add(keyField.getName()); } for (String field : fields) { fset.add(field); } NamedList response = new SimpleOrderedMap(); DocIterator iterator = docs.iterator(); for (int i = 0; i < docs.size(); i++) { try { int docId = iterator.nextDoc(); Document doc = searcher.doc(docId, fset); StringBuilder sb = new StringBuilder(); for (String field : fields) { IndexableField[] multifield = doc.getFields(field); for (IndexableField singlefield : multifield) { sb.append(singlefield.stringValue() + ". "); } } String q = sb.toString(); String id = doc.getField(keyField.getName()).stringValue(); // do work here TaggerResultSet trs = dfb.tagText(q, lcount); NamedList docresponse = new SimpleOrderedMap(); for (TaggerResult tr : trs.suggestions) { docresponse.add(tr.suggestion, tr.score); } response.add(id, docresponse); } catch (IOException ex) { java.util.logging.Logger.getLogger(TaggerComponent.class.getName()).log(Level.SEVERE, null, ex); } } // response.add(suggestion.suggestion, suggestion.probability); return response; }
From source file:com.sindicetech.siren.solr.handler.mapper.FieldMappersHandler.java
License:Open Source License
private void addSchemaField(FieldMapper mapper, FieldEntry entry) { if (!core.getLatestSchema().isMutable()) { final String message = "This IndexSchema is not mutable."; throw new SolrException(BAD_REQUEST, message); }/*from w ww.j a v a 2 s. c om*/ for (;;) { final IndexSchema oldSchema = core.getLatestSchema(); if (oldSchema.getFieldTypeNoEx(mapper.getTargetFieldname(entry)) != null) { return; // the field already exists in the schema } try { SchemaField field = mapper.getSchemaField(core.getLatestSchema(), entry); IndexSchema newSchema = oldSchema.addField(field); if (newSchema != null) { core.setLatestSchema(newSchema); logger.debug("Successfully added field '{}' to the schema.", field.getName()); return; // success - exit from the retry loop } else { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed to add fields."); } } catch (ManagedIndexSchema.FieldExistsException e) { logger.debug("The field to be added already exists in the schema - retrying."); // No action: at least one field to be added already exists in the schema, so retry // We should never get here, since oldSchema.getFieldTypeNoEx(field) will exclude already existing fields } catch (ManagedIndexSchema.SchemaChangedInZkException e) { logger.debug("Schema changed while processing request - retrying."); } } }
From source file:com.sindicetech.siren.solr.schema.ExtendedJsonField.java
License:Open Source License
@Override public IndexableField createField(final SchemaField field, final Object value, final float boost) { if (!field.indexed()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances must be indexed: " + field.getName()); }//from w w w. ja v a 2 s .com if (field.multiValued()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances can not be multivalued: " + field.getName()); } if (!field.omitNorms()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances must omit norms: " + field.getName()); } if (field.omitTermFreqAndPositions()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances must not omit term " + "frequencies and positions: " + field.getName()); } if (field.omitPositions()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances must not omit term " + "positions: " + field.getName()); } if (field.storeTermVector()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ExtendedJsonField instances can not store term vectors: " + field.getName()); } return super.createField(field, value, boost); }
From source file:com.sindicetech.siren.solr.schema.ExtendedJsonField.java
License:Open Source License
@Override public SortField getSortField(final SchemaField field, final boolean reverse) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unsupported operation. Can not sort on SIREn field: " + field.getName()); }