List of usage examples for org.apache.lucene.search IndexSearcher doc
public Document doc(int docID) throws IOException
.getIndexReader().document(docID)
From source file:di.uniba.it.tri.shell.Command.java
License:Open Source License
private void sset(String command) throws Exception { String[] split = command.split("\\s+"); if (split.length > 3) { if (reader == null) { throw new Exception("no index in memory"); } else {/* ww w . j av a2 s . c om*/ Set<String> set = setmap.get(split[1]); if (set == null) { throw new Exception("no set for: " + split[1]); } if (!split[2].matches("[0-9]+")) { throw new Exception("no valid number of results"); } StringBuilder qs = new StringBuilder(); for (int i = 3; i < split.length; i++) { qs.append(split[i]).append(" "); } //String q = QueryParser.escape(qs.toString().trim()); Query query = parser.parse(qs.toString().trim()); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(query, Integer.parseInt(split[2])); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { TriShell.print("add to " + split[1] + "\t"); String word = searcher.doc(scoreDoc.doc).get("word"); TriShell.print(word); TriShell.print("\t"); TriShell.println(String.valueOf(scoreDoc.score)); set.add(word); } } } else { throw new Exception("sset syntax error"); } }
From source file:dk.defxws.fgslucene.Statement.java
License:Open Source License
public ResultSet executeQuery(IndexSearcher searcher, String queryString, int startRecord, int maxResults, int snippetsMax, int fieldMaxLength, Analyzer analyzer, String defaultQueryFields, boolean allowLeadingWildcard, boolean lowercaseExpandedTerms, String indexPath, String indexName, String snippetBegin, String snippetEnd, String sortFields) throws GenericSearchException { if (logger.isDebugEnabled()) logger.debug("executeQuery" + " query=" + queryString + " startRecord=" + startRecord + " maxResults=" + maxResults + " snippetsMax=" + snippetsMax + " fieldMaxLength=" + fieldMaxLength + " indexName=" + indexName + " sortFields=" + sortFields + " defaultQueryFields=" + defaultQueryFields + " allowLeadingWildcard=" + allowLeadingWildcard + " lowercaseExpandedTerms=" + lowercaseExpandedTerms); this.searcher = searcher; ResultSet rs = null;//from w ww . java2 s. com StringTokenizer defaultFieldNames = new StringTokenizer(defaultQueryFields); int countFields = defaultFieldNames.countTokens(); String[] defaultFields = new String[countFields]; for (int i = 0; i < countFields; i++) { defaultFields[i] = defaultFieldNames.nextToken(); } Query query = null; if (defaultFields.length == 1) { QueryParser queryParser = new QueryParser(Version.LUCENE_36, defaultFields[0], analyzer); queryParser.setAllowLeadingWildcard(allowLeadingWildcard); queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms); if (logger.isDebugEnabled()) logger.debug("executeQuery queryParser" + " allowLeadingWildcard=" + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms=" + queryParser.getLowercaseExpandedTerms()); try { query = queryParser.parse(queryString); } catch (ParseException e) { throw new GenericSearchException(e.toString()); } } else { MultiFieldQueryParser queryParser = new MultiFieldQueryParser(Version.LUCENE_36, defaultFields, analyzer); queryParser.setAllowLeadingWildcard(allowLeadingWildcard); queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms); if (logger.isDebugEnabled()) logger.debug("executeQuery mfqueryParser" + " allowLeadingWildcard=" + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms=" + queryParser.getLowercaseExpandedTerms()); try { query = queryParser.parse(queryString); } catch (ParseException e) { throw new GenericSearchException(e.toString()); } } if (logger.isDebugEnabled()) logger.debug("executeQuery after parse query=" + query); try { query.rewrite(searcher.getIndexReader()); } catch (Exception e) { throw new GenericSearchException(e.toString()); } if (logger.isDebugEnabled()) logger.debug("executeQuery after rewrite query=" + query); int start = Integer.parseInt(Integer.toString(startRecord)); TopDocs hits = getHits(query, start + maxResults - 1, sortFields); ScoreDoc[] docs = hits.scoreDocs; int end = Math.min(hits.totalHits, start + maxResults - 1); if (logger.isDebugEnabled()) logger.debug("executeQuery hits.totalHits=" + hits.totalHits); StringBuffer resultXml = new StringBuffer(); resultXml.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"); String queryStringEncoded = null; try { queryStringEncoded = URLEncoder.encode(queryString, "UTF-8"); } catch (UnsupportedEncodingException e) { errorExit(e.toString()); } resultXml.append("<lucenesearch " + " xmlns:dc=\"http://purl.org/dc/elements/1.1/" + "\" query=\"" + queryStringEncoded + "\" indexName=\"" + indexName + "\" sortFields=\"" + sortFields + "\" hitPageStart=\"" + startRecord + "\" hitPageSize=\"" + maxResults + "\" hitTotal=\"" + hits.totalHits + "\">"); ScoreDoc hit = null; Document doc = null; String hitsScore = null; for (int i = start; i <= end; i++) { try { hit = docs[i - 1]; doc = searcher.doc(hit.doc); hitsScore = "" + hit.score; } catch (CorruptIndexException e) { errorExit(e.toString()); } catch (IOException e) { errorExit(e.toString()); } resultXml.append("<hit no=\"" + i + "\" score=\"" + hitsScore + "\">"); for (ListIterator li = doc.getFields().listIterator(); li.hasNext();) { Fieldable f = (Fieldable) li.next(); resultXml.append("<field name=\"" + f.name() + "\""); String snippets = null; if (snippetsMax > 0) { SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("!!!SNIPPETBEGIN", "!!!SNIPPETEND"); QueryScorer scorer = new QueryScorer(query, f.name()); Highlighter highlighter = new Highlighter(formatter, scorer); Fragmenter fragmenter = new SimpleFragmenter(fieldMaxLength); highlighter.setTextFragmenter(fragmenter); TokenStream tokenStream = analyzer.tokenStream(f.name(), new StringReader(f.stringValue())); try { snippets = highlighter.getBestFragments(tokenStream, f.stringValue(), snippetsMax, " ... "); } catch (Exception e) { // all Exceptions to be caught, not just IOException errorExit(e.toString()); } snippets = checkTruncatedWords(snippets, " ... "); snippets = StreamUtility.enc(snippets); snippets = snippets.replaceAll("!!!SNIPPETBEGIN", snippetBegin); snippets = snippets.replaceAll("!!!SNIPPETEND", snippetEnd); if (snippets != null && !snippets.equals("")) { resultXml.append(" snippet=\"yes\">" + snippets); } } if (snippets == null || snippets.equals("")) if (fieldMaxLength > 0 && f.stringValue().length() > fieldMaxLength) { String snippet = f.stringValue().substring(0, fieldMaxLength); int iamp = snippet.lastIndexOf("&"); if (iamp > -1 && iamp > fieldMaxLength - 8) snippet = snippet.substring(0, iamp); resultXml.append(">" + StreamUtility.enc(snippet) + " ... "); } else resultXml.append(">" + StreamUtility.enc(f.stringValue())); resultXml.append("</field>"); } resultXml.append("</hit>"); } resultXml.append("</lucenesearch>"); if (logger.isDebugEnabled()) { int size = 500; if (resultXml.length() < size) size = resultXml.length(); String debugString = resultXml.substring(0, size); if (resultXml.length() > size) debugString += "..."; logger.debug("executeQuery resultXml=" + debugString); } rs = new ResultSet(resultXml); return rs; }
From source file:dk.defxws.fgssolr.Statement.java
License:Open Source License
public ResultSet executeQuery(IndexSearcher searcher, String queryString, int startRecord, int maxResults, int snippetsMax, int fieldMaxLength, Analyzer analyzer, String defaultQueryFields, String indexPath, String indexName, String snippetBegin, String snippetEnd, String sortFields) throws GenericSearchException { boolean allowLeadingWildcard = true; boolean lowercaseExpandedTerms = true; if (logger.isDebugEnabled()) logger.debug("executeQuery" + " query=" + queryString + " startRecord=" + startRecord + " maxResults=" + maxResults + " snippetsMax=" + snippetsMax + " fieldMaxLength=" + fieldMaxLength + " indexName=" + indexName + " sortFields=" + sortFields + " defaultQueryFields=" + defaultQueryFields + " allowLeadingWildcard=" + allowLeadingWildcard + " lowercaseExpandedTerms=" + lowercaseExpandedTerms); this.searcher = searcher; ResultSet rs = null;/*from www . jav a2 s .com*/ StringTokenizer defaultFieldNames = new StringTokenizer(defaultQueryFields); int countFields = defaultFieldNames.countTokens(); String[] defaultFields = new String[countFields]; for (int i = 0; i < countFields; i++) { defaultFields[i] = defaultFieldNames.nextToken(); } Query query = null; if (defaultFields.length == 1) { QueryParser queryParser = new QueryParser(Version.LUCENE_36, defaultFields[0], analyzer); queryParser.setAllowLeadingWildcard(allowLeadingWildcard); queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms); if (logger.isDebugEnabled()) logger.debug("executeQuery queryParser" + " allowLeadingWildcard=" + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms=" + queryParser.getLowercaseExpandedTerms()); try { query = queryParser.parse(queryString); } catch (ParseException e) { throw new GenericSearchException(e.toString()); } } else { MultiFieldQueryParser queryParser = new MultiFieldQueryParser(Version.LUCENE_36, defaultFields, analyzer); queryParser.setAllowLeadingWildcard(allowLeadingWildcard); queryParser.setLowercaseExpandedTerms(lowercaseExpandedTerms); if (logger.isDebugEnabled()) logger.debug("executeQuery mfqueryParser" + " allowLeadingWildcard=" + queryParser.getAllowLeadingWildcard() + " lowercaseExpandedTerms=" + queryParser.getLowercaseExpandedTerms()); try { query = queryParser.parse(queryString); } catch (ParseException e) { throw new GenericSearchException(e.toString()); } } if (logger.isDebugEnabled()) logger.debug("executeQuery after parse query=" + query); try { query.rewrite(searcher.getIndexReader()); } catch (Exception e) { throw new GenericSearchException(e.toString()); } if (logger.isDebugEnabled()) logger.debug("executeQuery after rewrite query=" + query); int start = Integer.parseInt(Integer.toString(startRecord)); TopDocs hits = getHits(query, start + maxResults - 1, sortFields); ScoreDoc[] docs = hits.scoreDocs; int end = Math.min(hits.totalHits, start + maxResults - 1); if (logger.isDebugEnabled()) logger.debug("executeQuery hits.totalHits=" + hits.totalHits); StringBuffer resultXml = new StringBuffer(); resultXml.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"); String queryStringEncoded = null; try { queryStringEncoded = URLEncoder.encode(queryString, "UTF-8"); } catch (UnsupportedEncodingException e) { errorExit(e.toString()); } resultXml.append("<solrsearch " + " xmlns:dc=\"http://purl.org/dc/elements/1.1/" + "\" query=\"" + queryStringEncoded + "\" indexName=\"" + indexName + "\" sortFields=\"" + sortFields + "\" hitPageStart=\"" + startRecord + "\" hitPageSize=\"" + maxResults + "\" hitTotal=\"" + hits.totalHits + "\">"); ScoreDoc hit = null; Document doc = null; String hitsScore = null; for (int i = start; i <= end; i++) { try { hit = docs[i - 1]; doc = searcher.doc(hit.doc); hitsScore = "" + hit.score; } catch (CorruptIndexException e) { errorExit(e.toString()); } catch (IOException e) { errorExit(e.toString()); } resultXml.append("<hit no=\"" + i + "\" score=\"" + hitsScore + "\">"); for (ListIterator li = doc.getFields().listIterator(); li.hasNext();) { Fieldable f = (Fieldable) li.next(); resultXml.append("<field name=\"" + f.name() + "\""); String snippets = null; if (snippetsMax > 0) { SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("!!!SNIPPETBEGIN", "!!!SNIPPETEND"); QueryScorer scorer = new QueryScorer(query, f.name()); Highlighter highlighter = new Highlighter(formatter, scorer); Fragmenter fragmenter = new SimpleFragmenter(fieldMaxLength); highlighter.setTextFragmenter(fragmenter); TokenStream tokenStream = analyzer.tokenStream(f.name(), new StringReader(f.stringValue())); try { snippets = highlighter.getBestFragments(tokenStream, f.stringValue(), snippetsMax, " ... "); } catch (Exception e) { // all Exceptions to be caught, not just IOException errorExit(e.toString()); } snippets = checkTruncatedWords(snippets, " ... "); snippets = StreamUtility.enc(snippets); snippets = snippets.replaceAll("!!!SNIPPETBEGIN", snippetBegin); snippets = snippets.replaceAll("!!!SNIPPETEND", snippetEnd); if (snippets != null && !snippets.equals("")) { resultXml.append(" snippet=\"yes\">" + snippets); } } if (snippets == null || snippets.equals("")) if (fieldMaxLength > 0 && f.stringValue().length() > fieldMaxLength) { String snippet = f.stringValue().substring(0, fieldMaxLength); int iamp = snippet.lastIndexOf("&"); if (iamp > -1 && iamp > fieldMaxLength - 8) snippet = snippet.substring(0, iamp); resultXml.append(">" + StreamUtility.enc(snippet) + " ... "); } else resultXml.append(">" + StreamUtility.enc(f.stringValue())); resultXml.append("</field>"); } resultXml.append("</hit>"); } resultXml.append("</solrsearch>"); if (logger.isDebugEnabled()) { int size = 500; if (resultXml.length() < size) size = resultXml.length(); String debugString = resultXml.substring(0, size); if (resultXml.length() > size) debugString += "..."; logger.debug("executeQuery resultXml=" + debugString); } rs = new ResultSet(resultXml); return rs; }
From source file:dk.dma.msinm.lucene.AbstractLuceneIndex.java
License:Open Source License
/** * Performs a search in the index and returns the ids of matching entities * * @param freeTextSearch the search string * @param field the field to search/*from ww w. j av a2 s . co m*/ * @param filter an optional filter * @param maxHits the max number of hits to return * @return the matching ids */ public List<Long> searchIndex(String freeTextSearch, String field, Filter filter, int maxHits) throws IOException, ParseException { Query query; if (StringUtils.isNotBlank(freeTextSearch) && StringUtils.isNotBlank(field)) { // Normalize query text freeTextSearch = LuceneUtils.normalizeQuery(freeTextSearch); // Create a query parser with "and" operator as the default QueryParser parser = new ComplexPhraseQueryParser(LuceneUtils.LUCENE_VERSION, field, new StandardAnalyzer(LuceneUtils.LUCENE_VERSION)); parser.setDefaultOperator(QueryParser.OR_OPERATOR); parser.setAllowLeadingWildcard(true); // NB: Expensive! query = parser.parse(freeTextSearch); } else { query = new MatchAllDocsQuery(); } // Perform the search and collect the ids IndexSearcher searcher = new IndexSearcher(getIndexReader()); TopDocs results = (filter == null) ? searcher.search(query, maxHits) : searcher.search(query, filter, maxHits); List<Long> ids = new ArrayList<>(); for (ScoreDoc hit : results.scoreDocs) { Document d = searcher.doc(hit.doc); ids.add(Long.valueOf(d.get(ID_FIELD))); } return ids; }
From source file:dk.dma.msinm.lucene.SpatialLuceneTest.java
License:Open Source License
private void assertDocMatchedIds(IndexSearcher indexSearcher, TopDocs docs, int... ids) throws IOException { int[] gotIds = new int[docs.totalHits]; for (int i = 0; i < gotIds.length; i++) { gotIds[i] = indexSearcher.doc(docs.scoreDocs[i].doc).getField("id").numericValue().intValue(); }/*from w w w.jav a2s. c o m*/ assertArrayEquals(ids, gotIds); }
From source file:dk.netarkivet.harvester.indexserver.DedupCrawlLogIndexCacheTester.java
License:Open Source License
private void verifySearchResult(Map<String, String> origins, IndexSearcher index) throws IOException { Set<String> urls = new HashSet<String>(origins.keySet()); for (String urlValue : urls) { BytesRef uriRef = new BytesRef(urlValue); Query q = new ConstantScoreQuery( new TermRangeFilter(DigestIndexer.FIELD_URL, uriRef, uriRef, true, true)); AllDocsCollector collector = new AllDocsCollector(); index.search(q, collector);//from w w w . j a v a 2 s .c o m List<ScoreDoc> hits = collector.getHits(); for (ScoreDoc hit : hits) { int docID = hit.doc; Document doc = index.doc(docID); String url = doc.get("url"); String origin = doc.get("origin"); assertEquals("Should have correct origin for url " + url, origins.get(url), origin); // Ensure that each occurs only once. String removedValue = origins.remove(url); if (removedValue == null) { // System.out.println("'" + url + "' not found in origins map"); } else { // System.out.println("'" + url + "' was found in origins map"); } } } }
From source file:Dl4j.TermInfo.java
public LuceneDocFetcher(Directory dir, ArrayList<String> docIds) throws Exception { globalTermId = 0;//from www . j a v a 2 s.co m termSeen = new HashMap<>(); IndexReader reader = DirectoryReader.open(dir); // totalExamples = reader.numDocs(); //++Procheta totalExamples = docIds.size(); docWordMaps = new ArrayList<>(totalExamples); // build the per-doc word maps for (int i = 0; i < totalExamples; i++) { IndexSearcher searcher = new IndexSearcher(reader); Similarity sm = new DefaultSimilarity(); searcher.setSimilarity(sm); Analyzer analyzer = new KeywordAnalyzer(); //System.out.println(id); QueryParser queryParser = new QueryParser("id", analyzer); Query query = queryParser.parse(docIds.get(i)); TopDocs topDocs = searcher.search(query, 3); //System.out.println(query.toString()); ScoreDoc[] hits = topDocs.scoreDocs; // System.out.println(hits.length); Document doc = searcher.doc(hits[0].doc); docWordMaps.add(buildTerms(reader, hits[0].doc)); } // iterate through the word maps and build the one-hot vectors List<DataSet> allDocVecs = new ArrayList<>(totalExamples); for (Map<String, TermInfo> docwordMap : docWordMaps) { allDocVecs.add(constructTermVector(docwordMap)); } // Merge all doc vecs into one dataset this.dataSet = DataSet.merge(allDocVecs); reader.close(); }
From source file:Dl4j.TermInfo.java
public LuceneDocFetcher(Directory dir, ArrayList<String> docIds, ArrayList<String> labels) throws Exception { globalTermId = 0;//from w w w .j a va2 s . c o m termSeen = new HashMap<>(); IndexReader reader = DirectoryReader.open(dir); // totalExamples = reader.numDocs(); //++Procheta totalExamples = docIds.size(); docWordMaps = new ArrayList<>(totalExamples); // build the per-doc word maps for (int i = 0; i < totalExamples; i++) { IndexSearcher searcher = new IndexSearcher(reader); Similarity sm = new DefaultSimilarity(); searcher.setSimilarity(sm); Analyzer analyzer = new KeywordAnalyzer(); //System.out.println(id); QueryParser queryParser = new QueryParser("id", analyzer); Query query = queryParser.parse(docIds.get(i)); TopDocs topDocs = searcher.search(query, 3); //System.out.println(query.toString()); ScoreDoc[] hits = topDocs.scoreDocs; // System.out.println(hits.length); Document doc = searcher.doc(hits[0].doc); docWordMaps.add(buildTerms(reader, hits[0].doc)); } // iterate through the word maps and build the one-hot vectors List<DataSet> allDocVecs = new ArrayList<>(totalExamples); for (Map<String, TermInfo> docwordMap : docWordMaps) { allDocVecs.add(constructTermVector(docwordMap, labels)); } // Merge all doc vecs into one dataset this.dataSet = DataSet.merge(allDocVecs); reader.close(); }
From source file:docet.engine.SimpleDocetDocSearcher.java
License:Apache License
@Override public List<DocetPage> searchForMatchingDocuments(final String searchText, final String lang, final int maxNumResults) throws DocetDocumentSearchException { final List<DocetPage> results = new ArrayList<>(); final String fallbackLang = this.getFallbackLangForLang(lang); final String actualSearchLang; if (fallbackLang.isEmpty()) { actualSearchLang = lang;/*from w w w.j av a2 s. co m*/ } else { actualSearchLang = fallbackLang; } try { final IndexSearcher searcher = new IndexSearcher(reader); final Analyzer analyzer = new AnalyzerBuilder().language(actualSearchLang).build(); QueryParser queryParser = new QueryParser(LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang, analyzer); final Query query = queryParser.parse(constructLucenePhraseTermSearchQuery(searchText)); final QueryScorer queryScorer = new QueryScorer(query, LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang); final Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer); final Highlighter highlighter = new Highlighter(queryScorer); highlighter.setMaxDocCharsToAnalyze(Integer.MAX_VALUE); highlighter.setTextFragmenter(fragmenter); final TopDocs res = searcher.search(query, maxNumResults); final float maxScore = res.getMaxScore(); final List<ScoreDoc> scoreDocs = Arrays.asList(res.scoreDocs); Map<org.apache.lucene.document.Document, String> docs = new HashMap<>(); Map<String, ScoreDoc> scoresForDocs = new HashMap<>(); for (final ScoreDoc sd : scoreDocs) { final org.apache.lucene.document.Document doc = searcher.doc(sd.doc); final String contents = doc.get(LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang); final String docId = doc.get("id"); final String[] fragments = highlighter.getBestFragments(analyzer, LUCENE_QUERY_CONTENT_PREFIX + actualSearchLang, contents, MAX_NUM_FRAGMENTS); List<String> fragmentList = Arrays.asList(fragments); fragmentList = fragmentList.stream().map(s1 -> s1.trim().split("\n")) .map(s1 -> Arrays.asList(s1).stream().filter(s -> !s.trim().isEmpty()) .reduce((sa, sb) -> sa + MACHING_EXCERPTS_SEPARATOR + sb) .orElse(MACHING_EXCERPTS_SEPARATOR)) .collect(Collectors.toList()); docs.put(doc, MACHING_EXCERPTS_SEPARATOR + fragmentList.stream().filter(s -> !s.isEmpty()) .reduce((s1, s2) -> s1 + "..." + s2).orElse("") + MACHING_EXCERPTS_SEPARATOR); scoresForDocs.putIfAbsent(docId, sd); } docs.entrySet().stream().forEach(e -> { final int relevance = Math.round((scoresForDocs.get(e.getKey().get("id")).score / maxScore) * 100); results.add(DocetPage.toDocetDocument(e.getKey(), e.getValue(), relevance)); }); return results; } catch (ParseException | IOException | InvalidTokenOffsetsException ex) { throw new DocetDocumentSearchException( "Error on searching query " + searchText + " for lang " + actualSearchLang, ex); } }
From source file:dynamicrefactoring.interfaz.wizard.search.internal.SimpleElementSearcher.java
License:Open Source License
/** * Realiza una consulta sobre el indice de un tipo de elemento. * /*from www.ja v a 2 s. c o m*/ * @param element tipo de elemento a buscar * @param userQuery consulta a realizar * @param dir directorio del indice * @return resultados de la busqueda * @throws ParseException si la consulta no es valida */ protected Set<QueryResult> search(SearchableType element, String userQuery, Directory dir) throws ParseException { try { IndexSearcher is = new IndexSearcher(dir); QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_30, new String[] { SearchableTypeIndexer.CLASS_DESCRIPTION_FIELD, SearchableTypeIndexer.CLASS_NAME_FIELD, SearchableTypeIndexer.PACKAGE_FIELD }, SearchableTypeIndexer.getTermsAnalyzer()); TopDocs hits = is.search(parser.parse(userQuery), 30); Set<QueryResult> results = new HashSet<QueryResult>(hits.totalHits); for (ScoreDoc scoreDoc : hits.scoreDocs) { Document doc = is.doc(scoreDoc.doc); results.add(new QueryResult(doc.get(SearchableTypeIndexer.FULLY_QUALIFIED_CLASS_NAME_FIELD), doc.get(SearchableTypeIndexer.CLASS_DESCRIPTION_FIELD))); } is.close(); return results; } catch (IOException e) { throw Throwables.propagate(e); } }