List of usage examples for org.apache.lucene.store RAMDirectory RAMDirectory
public RAMDirectory()
From source file:net.conquiris.search.EmptyIndex.java
License:Apache License
/** Constructor. */ private EmptyIndex() throws IOException { this.directory = new RAMDirectory(); IndexWriter w = new IndexWriter(directory, Conquiris.writerConfig()); w.close();/*from w w w . ja v a 2s .c o m*/ }
From source file:net.conquiris.search.EmptyIndexTest.java
License:Apache License
/** Missing index. */ @Test(expectedExceptions = IndexNotFoundException.class) public void missingRAM() throws Exception { final Closer closer = Closer.create(); try {/*from w ww .jav a 2s . c om*/ IndexReader reader = closer.register(IndexReader.open(new RAMDirectory())); IndexSearcher s = closer.register(new IndexSearcher(reader)); s.search(new MatchAllDocsQuery(), 5); } finally { closer.close(); } }
From source file:net.conquiris.search.EmptyIndexTest.java
License:Apache License
/** Missing index. */ @Test/*w w w . j ava 2 s .c om*/ public void missingRAMnrt() throws Exception { final Directory d = new RAMDirectory(); final IndexWriter w = new IndexWriter(d, new IndexWriterConfig(Version.LUCENE_34, new StandardAnalyzer(Version.LUCENE_34))); try { final IndexReader r = IndexReader.open(w, true); try { IndexSearcher s = new IndexSearcher(r); s.search(new MatchAllDocsQuery(), 5); s.close(); } finally { r.close(); } } finally { } }
From source file:net.conquiris.search.SearchersTest.java
License:Apache License
private void create() { directory = new RAMDirectory(); supplier = ReaderSuppliers.directory(directory); unmanaged = ReaderSuppliers.directory(directory); managed = ReaderSuppliers.managed(unmanaged, 50L); unmanagedSearcher = Searchers.service(supplier); managedSearcher = Searchers.service(managed); }
From source file:net.conquiris.support.TestSupport.java
License:Apache License
public static Directory createRAMDirectory(String base, int from, int to) throws IOException { Directory directory = new RAMDirectory(); write(directory, base, from, to);//from w w w .j a va 2 s .com return directory; }
From source file:net.di2e.ecdr.libs.result.relevance.RelevanceNormalizer.java
License:Apache License
/** * Normalize the relevance score for the results in the query response based on the contextual query criteria * * @param results//from ww w . j ava 2 s. c om * @param originalQuery * @return */ public List<Result> normalize(List<Result> results, Query originalQuery) { SortBy sortBy = originalQuery.getSortBy(); // We want to do relevance sort if no sort order was specfied or if Relevance sort was specified if (sortBy == null || sortBy.getPropertyName() == null || sortBy.getPropertyName().getPropertyName() == null || Result.RELEVANCE.equals(sortBy.getPropertyName().getPropertyName())) { Map<String, String> filterParameters = getFilterParameters(originalQuery); if (canNormalizeQuery(filterParameters)) { LOGGER.debug( "Query contained search phrase and will be sorted by relevance, performing re-indexing to normalize relevance."); Directory directory = null; DirectoryReader iReader = null; Map<String, Result> docMap = new HashMap<>(); List<Result> updatedResults = new ArrayList<>(); StopWatch stopWatch = new StopWatch(); stopWatch.start(); try { Analyzer analyzer = new StandardAnalyzer(); // create memory-stored index directory = new RAMDirectory(); IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer); IndexWriter iWriter = new IndexWriter(directory, config); // loop through all of the results and add them to the index for (Result curResult : results) { Document doc = new Document(); String text = TextParser.parseTextFrom(curResult.getMetacard().getMetadata()); String uuid = UUID.randomUUID().toString(); doc.add(new Field(METADATA_FIELD, text, TextField.TYPE_STORED)); doc.add(new Field(ID_FIELD, uuid, TextField.TYPE_STORED)); iWriter.addDocument(doc); docMap.put(uuid, curResult); } IOUtils.closeQuietly(iWriter); LOGGER.debug("{} Document indexing finished in {} seconds.", RELEVANCE_TIMER, (double) stopWatch.getTime() / 1000.0); // Now search the index: iReader = DirectoryReader.open(directory); IndexSearcher iSearcher = new IndexSearcher(iReader); // Parse a simple query that searches for "text": QueryParser parser = new QueryParser(METADATA_FIELD, analyzer); org.apache.lucene.search.Query query = getQuery(parser, filterParameters); ScoreDoc[] hits = iSearcher.search(query, null, docMap.size()).scoreDocs; LOGGER.debug("Got back {} results", hits.length); // loop through the indexed search results and update the scores in the original query results for (ScoreDoc curHit : hits) { Document doc = iSearcher.doc(curHit.doc); String uuid = doc.getField(ID_FIELD).stringValue(); Result result = docMap.get(uuid); docMap.remove(uuid); updatedResults.add(updateResult(result, curHit.score)); LOGGER.debug("Relevance for result {} was changed FROM {} TO {}", result.getMetacard().getId(), result.getRelevanceScore(), curHit.score); } // check if there are any results left that did not match the keyword query for (Map.Entry<String, Result> curEntry : docMap.entrySet()) { // add result in with 0 relevance score updatedResults.add(updateResult(curEntry.getValue(), 0)); } // create new query response return updatedResults; } catch (ParseException | IOException | RuntimeException e) { LOGGER.warn( "Received an exception while trying to perform re-indexing, sending original queryResponse on.", e); return results; } finally { IOUtils.closeQuietly(iReader); IOUtils.closeQuietly(directory); stopWatch.stop(); LOGGER.debug("{} Total relevance process took {} seconds.", RELEVANCE_TIMER, (double) stopWatch.getTime() / 1000.0); } } else { LOGGER.debug( "Query is not sorted based on relevance with contextual criteria. Skipping relevance normalization."); } } else { LOGGER.debug( "Query is not sorted based on relevance with contextual criteria. Skipping relevance normalization."); } return results; }
From source file:net.mad.ads.db.db.index.AdDBLuceneIndex.java
License:Open Source License
@Override public void open() throws IOException { index = new RAMDirectory(); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_33, new KeywordAnalyzer()); config.setOpenMode(OpenMode.CREATE); writer = new IndexWriter(index, config); this.reader = IndexReader.open(this.writer, true); this.searcher = new IndexSearcher(this.reader); }
From source file:net.schweerelos.parrot.model.TextSearchEngine.java
License:Open Source License
public TextSearchEngine() { index = new RAMDirectory(); analyser = new StandardAnalyzer(); try {/*from w ww . j ava2 s .com*/ writer = new IndexWriter(index, analyser, true); } catch (CorruptIndexException e) { // ignore e.printStackTrace(); } catch (LockObtainFailedException e) { // ignore e.printStackTrace(); } catch (IOException e) { // ignore e.printStackTrace(); } hashToNodeWrapper = new HashMap<Integer, NodeWrapper>(); }
From source file:net.sf.jtmt.summarizers.LuceneSummarizer.java
License:Apache License
/** * Summarize.//from ww w . jav a 2 s . c o m * * @param text the text * @return the string * @throws Exception the exception */ public String summarize(String text) throws Exception { RAMDirectory ramdir = new RAMDirectory(); buildIndex(ramdir, text); Query topTermQuery = computeTopTermQuery(ramdir); String[] sentences = searchIndex(ramdir, topTermQuery); return StringUtils.join(sentences, " ... "); }
From source file:net.sf.lucis.core.impl.EmptyDirectory.java
License:Apache License
private static synchronized void create() throws IOException { if (directory != null) { return;/*from ww w. j av a2s .com*/ } final RAMDirectory ram = new RAMDirectory(); IndexWriter w = new IndexWriter(ram, Factory.get().writerConfig()); w.commit(); w.close(); directory = ram; }