List of usage examples for org.apache.lucene.store RAMDirectory RAMDirectory
public RAMDirectory()
From source file:org.apache.carbondata.datamap.lucene.LuceneDataMapWriter.java
License:Apache License
/** * Start of new blocklet notification.//from w w w . j a va 2s . com */ public void onBlockletStart(int blockletId) throws IOException { if (null == analyzer) { if (CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.CARBON_LUCENE_INDEX_STOP_WORDS, CarbonCommonConstants.CARBON_LUCENE_INDEX_STOP_WORDS_DEFAULT) .equalsIgnoreCase("true")) { analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); } else { analyzer = new StandardAnalyzer(); } } // save index data into ram, write into disk after one page finished ramDir = new RAMDirectory(); ramIndexWriter = new IndexWriter(ramDir, new IndexWriterConfig(analyzer)); if (indexWriter != null) { return; } // get index path, put index data into segment's path String dataMapPath; if (storeBlockletWise) { dataMapPath = this.dataMapPath + File.separator + blockletId; } else { dataMapPath = this.dataMapPath; } Path indexPath = FileFactory.getPath(dataMapPath); FileSystem fs = FileFactory.getFileSystem(indexPath); // if index path not exists, create it if (!fs.exists(indexPath)) { if (!fs.mkdirs(indexPath)) { throw new IOException("Failed to create directory " + dataMapPath); } } // the indexWriter closes the FileSystem on closing the writer, so for a new configuration // and disable the cache for the index writer, it will be closed on closing the writer Configuration conf = FileFactory.getConfiguration(); conf.set("fs.hdfs.impl.disable.cache", "true"); // create a index writer Directory indexDir = new HdfsDirectory(indexPath, conf); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer); if (CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE, CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT) .equalsIgnoreCase(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT)) { indexWriterConfig.setCodec(speedCodec); } else { indexWriterConfig.setCodec(compressionCodec); } indexWriter = new IndexWriter(indexDir, indexWriterConfig); }
From source file:org.apache.clerezza.rdf.cris.GraphIndexer.java
License:Apache License
/** * Creates a new in-memory index with default {@code maxHits}. * * The {@code GraphIndexer} looks for specifications of what properties on * what resources to index in the {@code definitionGraph}. * * The {@code baseGraph} specifies the graph on which the index is built. * * <p>//from ww w. j a v a2 s. c o m * Notes: * * <p> * This is an expensive operation and it is advisable to call * {@link #closeLuceneIndex()} when this instance is no longer needed. * </p><p> * The GraphIndexer must have write-access to the index directory specified. * </p> * * @param definitionGraph where index definitions are stored * @param baseGraph where the resources to index are stored */ public GraphIndexer(TripleCollection definitionGraph, TripleCollection baseGraph) { this(definitionGraph, baseGraph, new RAMDirectory(), true); }
From source file:org.apache.cxf.jaxrs.ext.search.lucene.AbstractLuceneQueryVisitorTest.java
License:Apache License
@Before public void setUp() throws Exception { analyzer = new StandardAnalyzer(Version.LUCENE_4_9); directory = new RAMDirectory(); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer); IndexWriter iwriter = new IndexWriter(directory, config); Document doc = new Document(); doc.add(new Field("contents", "name=text", TextField.TYPE_STORED)); IntField intField = new IntField("intfield", 4, Field.Store.YES); doc.add(intField);//from w w w. j av a 2 s. co m iwriter.addDocument(doc); iwriter.close(); ireader = DirectoryReader.open(directory); isearcher = new IndexSearcher(ireader); }
From source file:org.apache.cxf.jaxrs.ext.search.tika.TikaLuceneContentExtractorTest.java
License:Apache License
@Before public void setUp() throws Exception { final Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9); directory = new RAMDirectory(); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer); writer = new IndexWriter(directory, config); writer.commit();//w w w. j av a 2 s . c o m parser = new FiqlParser<SearchBean>(SearchBean.class); extractor = new TikaLuceneContentExtractor(new PDFParser()); }
From source file:org.apache.geode.cache.lucene.internal.RawIndexRepositoryFactory.java
License:Apache License
public IndexRepository createIndexRepository(final Integer bucketId, LuceneSerializer serializer, LuceneIndexImpl index, PartitionedRegion userRegion) throws IOException { final IndexRepository repo; LuceneRawIndex indexForRaw = (LuceneRawIndex) index; BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId); Directory dir = null;//from w w w . j ava 2 s. c om if (indexForRaw.withPersistence()) { String bucketLocation = LuceneServiceImpl.getUniqueIndexName(index.getName(), index.getRegionPath() + "_" + bucketId); File location = new File(index.getName(), bucketLocation); if (!location.exists()) { location.mkdirs(); } dir = new NIOFSDirectory(location.toPath()); } else { dir = new RAMDirectory(); } IndexWriterConfig config = new IndexWriterConfig(indexForRaw.getAnalyzer()); IndexWriter writer = new IndexWriter(dir, config); return new IndexRepositoryImpl(null, writer, serializer, indexForRaw.getIndexStats(), dataBucket); }
From source file:org.apache.geode.cache.lucene.internal.repository.IndexRepositoryImplPerformanceTest.java
License:Apache License
@Test public void testLucene() throws Exception { doTest("Lucene", new TestCallbacks() { private IndexWriter writer; private SearcherManager searcherManager; @Override// w w w. j av a2 s. c om public void init() throws Exception { RAMDirectory dir = new RAMDirectory(); IndexWriterConfig config = new IndexWriterConfig(analyzer); writer = new IndexWriter(dir, config); searcherManager = new SearcherManager(writer, true, true, null); } @Override public void addObject(String key, String text) throws Exception { Document doc = new Document(); doc.add(new TextField("key", key, Store.YES)); doc.add(new TextField("text", text, Store.NO)); writer.addDocument(doc); } @Override public void commit() throws Exception { writer.commit(); searcherManager.maybeRefresh(); } @Override public void cleanup() throws Exception { writer.close(); } @Override public void waitForAsync() throws Exception { // do nothing } @Override public int query(Query query) throws Exception { IndexSearcher searcher = searcherManager.acquire(); try { return searcher.count(query); } finally { searcherManager.release(searcher); } } }); }
From source file:org.apache.geode_examples.luceneSpatial.SpatialHelperTest.java
License:Apache License
@Test public void queryFindsADocumentThatWasAdded() throws IOException { // Create an in memory lucene index to add a document to RAMDirectory directory = new RAMDirectory(); IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig()); // Add a document to the lucene index Document document = new Document(); document.add(new TextField("name", "name", Field.Store.YES)); Field[] fields = SpatialHelper.getIndexableFields(-122.8515139, 45.5099231); for (Field field : fields) { document.add(field);/*from ww w. j a va 2 s .c o m*/ } writer.addDocument(document); writer.commit(); // Make sure a findWithin query locates the document Query query = SpatialHelper.findWithin(-122.8515239, 45.5099331, 1); SearcherManager searcherManager = new SearcherManager(writer, null); IndexSearcher searcher = searcherManager.acquire(); TopDocs results = searcher.search(query, 100); assertEquals(1, results.totalHits); }
From source file:org.apache.hadoop.contrib.index.lucene.TestMixedDirectory.java
License:Apache License
public void testMixedDirectoryAndPolicy() throws IOException { Directory readDir = new RAMDirectory(); updateIndex(readDir, 0, numDocsPerUpdate, new KeepOnlyLastCommitDeletionPolicy()); verify(readDir, numDocsPerUpdate);/*from ww w .j ava 2 s .c om*/ IndexOutput out = readDir.createOutput("_" + (numDocsPerUpdate / maxBufferedDocs + 2) + ".cfs"); out.writeInt(0); out.close(); Directory writeDir = new RAMDirectory(); Directory mixedDir = new MixedDirectory(readDir, writeDir); updateIndex(mixedDir, numDocsPerUpdate, numDocsPerUpdate, new MixedDeletionPolicy()); verify(readDir, numDocsPerUpdate); verify(mixedDir, 2 * numDocsPerUpdate); }
From source file:org.apache.hadoop.contrib.index.mapred.IntermediateForm.java
License:Apache License
/** * Constructor//from ww w .j av a2s . c o m * @throws IOException */ public IntermediateForm() throws IOException { deleteList = new ConcurrentLinkedQueue<Term>(); dir = new RAMDirectory(); writer = null; numDocs = 0; }
From source file:org.apache.hadoop.contrib.index.mapred.IntermediateForm.java
License:Apache License
private void resetForm() throws IOException { deleteList.clear();//w ww . j av a 2s. co m if (dir.sizeInBytes() > 0) { // it's ok if we don't close a ram directory dir.close(); // an alternative is to delete all the files and reuse the ram directory dir = new RAMDirectory(); } assert (writer == null); numDocs = 0; }