List of usage examples for org.apache.lucene.store NRTCachingDirectory NRTCachingDirectory
public NRTCachingDirectory(Directory delegate, double maxMergeSizeMB, double maxCachedMB)
From source file:com.lithium.flow.filer.lucene.LuceneFiler.java
License:Apache License
public LuceneFiler(@Nonnull Filer delegate, @Nonnull Config config) throws IOException { super(delegate); String path = config.getString("index.path"); maxAge = config.getTime("index.maxAge", "-1"); double maxMergeMb = config.getDouble("index.maxMergeMb", 4); double maxCachedMb = config.getDouble("index.maxCacheMb", 64); long targetMaxStale = config.getTime("index.targetMaxStale", "5s"); long targetMinStale = config.getTime("index.targetMinStale", "1s"); Version version = Version.LATEST;//from w ww . jav a 2 s. com Directory dir = FSDirectory.open(new File(path)); NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, maxMergeMb, maxCachedMb); IndexWriterConfig writerConfig = new IndexWriterConfig(version, null); writerConfig.setOpenMode(OpenMode.CREATE_OR_APPEND); writer = new TrackingIndexWriter(new IndexWriter(cachingDir, writerConfig)); manager = new SearcherManager(writer.getIndexWriter(), true, new SearcherFactory()); thread = new ControlledRealTimeReopenThread<>(writer, manager, targetMaxStale, targetMinStale); thread.start(); }
From source file:com.stratio.cassandra.index.LuceneIndex.java
License:Apache License
/** * Initializes this using the specified {@link Sort} for trying to keep the {@link Document}s sorted. * * @param sort The {@link Sort} to be used. *///from w w w . j av a 2 s .c om public void init(Sort sort) { Log.debug("Initializing index"); try { this.sort = sort; // Get directory file file = new File(path); // Open or create directory FSDirectory fsDirectory = FSDirectory.open(file); directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB); // Setup index writer IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_48, analyzer); config.setRAMBufferSizeMB(ramBufferMB); config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); config.setUseCompoundFile(true); config.setMergePolicy(new SortingMergePolicy(config.getMergePolicy(), sort)); indexWriter = new IndexWriter(directory, config); // Setup NRT search SearcherFactory searcherFactory = new SearcherFactory() { public IndexSearcher newSearcher(IndexReader reader) throws IOException { IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new NoIDFSimilarity()); return searcher; } }; TrackingIndexWriter trackingIndexWriter = new TrackingIndexWriter(indexWriter); searcherManager = new SearcherManager(indexWriter, true, searcherFactory); searcherReopener = new ControlledRealTimeReopenThread<>(trackingIndexWriter, searcherManager, refreshSeconds, refreshSeconds); searcherReopener.start(); // Start the refresher thread } catch (IOException e) { Log.error(e, "Error while initializing index"); throw new RuntimeException(e); } }
From source file:com.stratio.cassandra.lucene.index.FSIndex.java
License:Apache License
/** * Builds a new {@link FSIndex}./*ww w . ja v a 2s. c o m*/ * * @param name the index name * @param mbeanName the JMX MBean object name * @param path the directory path * @param analyzer the index writer analyzer * @param refresh the index reader refresh frequency in seconds * @param ramBufferMB the index writer RAM buffer size in MB * @param maxMergeMB the directory max merge size in MB * @param maxCachedMB the directory max cache size in MB * @param refreshTask action to be done during refresh */ public FSIndex(String name, String mbeanName, Path path, Analyzer analyzer, double refresh, int ramBufferMB, int maxMergeMB, int maxCachedMB, Runnable refreshTask) { try { this.path = path; this.name = name; // Open or create directory FSDirectory fsDirectory = FSDirectory.open(path); directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB); // Setup index writer IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer); indexWriterConfig.setRAMBufferSizeMB(ramBufferMB); indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); indexWriterConfig.setUseCompoundFile(true); indexWriterConfig.setMergePolicy(new TieredMergePolicy()); indexWriter = new IndexWriter(directory, indexWriterConfig); // Setup NRT search SearcherFactory searcherFactory = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) { if (refreshTask != null) { refreshTask.run(); } IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new NoIDFSimilarity()); return searcher; } }; TrackingIndexWriter trackingWriter = new TrackingIndexWriter(indexWriter); searcherManager = new SearcherManager(indexWriter, true, searcherFactory); searcherReopener = new ControlledRealTimeReopenThread<>(trackingWriter, searcherManager, refresh, refresh); searcherReopener.start(); // Register JMX MBean mbean = new ObjectName(mbeanName); ManagementFactory.getPlatformMBeanServer().registerMBean(this, this.mbean); } catch (Exception e) { throw new IndexException(logger, e, "Error while creating index %s", name); } }
From source file:com.stratio.cassandra.lucene.service.LuceneIndex.java
License:Apache License
/** * Builds a new {@code RowDirectory} using the specified directory path and analyzer. * * @param keyspace The keyspace name. * @param table The table name. * @param name The index name. * @param path The path of the directory in where the Lucene files will be stored. * @param ramBufferMB The index writer buffer size in MB. * @param maxMergeMB NRTCachingDirectory max merge size in MB. * @param maxCachedMB NRTCachingDirectory max cached MB. * @param analyzer The default {@link Analyzer}. * @param refreshSeconds The index readers refresh time in seconds. Writings are not visible until this time. * @param refreshCallback A runnable to be run on index refresh. * @throws IOException If Lucene throws IO errors. */// w ww. j ava 2 s .c o m public LuceneIndex(String keyspace, String table, String name, Path path, Integer ramBufferMB, Integer maxMergeMB, Integer maxCachedMB, Analyzer analyzer, Double refreshSeconds, Runnable refreshCallback) throws IOException { this.path = path; this.refreshCallback = refreshCallback; this.logName = String.format("Lucene index %s.%s.%s", keyspace, table, name); // Open or create directory FSDirectory fsDirectory = FSDirectory.open(path); directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB); // Setup index writer IndexWriterConfig config = new IndexWriterConfig(analyzer); config.setRAMBufferSizeMB(ramBufferMB); config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); config.setUseCompoundFile(true); config.setMergePolicy(new TieredMergePolicy()); indexWriter = new IndexWriter(directory, config); // Setup NRT search SearcherFactory searcherFactory = new SearcherFactory() { public IndexSearcher newSearcher(IndexReader reader) throws IOException { LuceneIndex.this.refreshCallBack(); IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new NoIDFSimilarity()); return searcher; } }; TrackingIndexWriter trackingIndexWriter = new TrackingIndexWriter(indexWriter); searcherManager = new SearcherManager(indexWriter, true, searcherFactory); searcherReopener = new ControlledRealTimeReopenThread<>(trackingIndexWriter, searcherManager, refreshSeconds, refreshSeconds); searcherReopener.start(); // Start the refresher thread // Register JMX MBean try { objectName = new ObjectName( String.format("com.stratio.cassandra.lucene:type=LuceneIndexes,keyspace=%s,table=%s,index=%s", keyspace, table, name)); ManagementFactory.getPlatformMBeanServer().registerMBean(this, objectName); } catch (MBeanException | OperationsException e) { Log.error(e, "Error while registering MBean"); } }
From source file:com.tuplejump.stargate.lucene.NearRealTimeIndexer.java
License:Apache License
private IndexWriter getIndexWriter(Version luceneV) throws IOException { file = Utils.getDirectory(keyspaceName, cfName, indexName, vNodeName); IndexWriterConfig config = new IndexWriterConfig(luceneV, analyzer); config.setRAMBufferSizeMB(256);/*from ww w.j a v a 2s. c om*/ config.setOpenMode(OPEN_MODE); directory = new NRTCachingDirectory(FSDirectory.open(file), 100, 100); logger.warn( indexName + " SG Index - Opened dir[" + file.getAbsolutePath() + "] - Openmode[" + OPEN_MODE + "]"); return new IndexWriter(directory, config); }
From source file:com.xiaomi.linden.core.search.LindenCoreImpl.java
License:Apache License
public Directory createIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException { switch (indexType) { case RAM:/*from ww w. j a v a 2s .com*/ return new RAMDirectory(); default: Preconditions.checkNotNull(directory, "index directory can not be null"); return new NRTCachingDirectory(FSDirectory.open(new File(directory)), maxMergeSizeMB, maxCachedMB); } }
From source file:com.xiaomi.linden.core.search.LindenCoreImpl.java
License:Apache License
public Directory createTaxoIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException { switch (indexType) { case RAM:/* w w w . j a v a 2s. c om*/ return new RAMDirectory(); default: Preconditions.checkNotNull(directory, "index directory can not be null"); return new NRTCachingDirectory(FSDirectory.open(new File(directory + ".taxonomy")), maxMergeSizeMB, maxCachedMB); } }
From source file:de.mirkosertic.desktopsearch.LuceneIndexHandler.java
License:Open Source License
public LuceneIndexHandler(Configuration aConfiguration, AnalyzerCache aAnalyzerCache, ExecutorPool aExecutorPool, PreviewProcessor aPreviewProcessor) throws IOException { previewProcessor = aPreviewProcessor; configuration = aConfiguration;/*from www .j ava2s .c om*/ analyzerCache = aAnalyzerCache; executorPool = aExecutorPool; contentFieldType = new FieldType(); contentFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); contentFieldType.setStored(true); contentFieldType.setTokenized(true); contentFieldType.setStoreTermVectorOffsets(true); contentFieldType.setStoreTermVectorPayloads(true); contentFieldType.setStoreTermVectorPositions(true); contentFieldType.setStoreTermVectors(true); analyzer = analyzerCache.getAnalyzer(); File theIndexDirectory = new File(aConfiguration.getConfigDirectory(), "index"); theIndexDirectory.mkdirs(); Directory theIndexFSDirectory = new NRTCachingDirectory(FSDirectory.open(theIndexDirectory.toPath()), 100, 100); IndexWriterConfig theConfig = new IndexWriterConfig(analyzer); theConfig.setSimilarity(new CustomSimilarity()); indexWriter = new IndexWriter(theIndexFSDirectory, theConfig); searcherManager = new SearcherManager(indexWriter, true, new SearcherFactory()); commitThread = new Thread("Lucene Commit Thread") { @Override public void run() { while (!isInterrupted()) { if (indexWriter.hasUncommittedChanges()) { try { indexWriter.commit(); } catch (IOException e) { throw new RuntimeException(e); } } try { Thread.sleep(2000); } catch (InterruptedException e) { // Do nothing here } } } }; commitThread.start(); facetsConfig = new FacetsConfig(); }
From source file:io.puntanegra.fhir.index.lucene.LuceneService.java
License:Apache License
/** * Builds a new {@link FSIndex}./* w ww. j a v a 2s .com*/ * * @param name * the index name * @param mbeanName * the JMX MBean object name * @param path * the directory path * @param analyzer * the index writer analyzer * @param refresh * the index reader refresh frequency in seconds * @param ramBufferMB * the index writer RAM buffer size in MB * @param maxMergeMB * the directory max merge size in MB * @param maxCachedMB * the directory max cache size in MB * @param refreshTask * action to be done during refresh */ public void init(String name, String mbeanName, Path path, Analyzer analyzer, double refresh, int ramBufferMB, int maxMergeMB, int maxCachedMB, Runnable refreshTask) { try { this.path = path; this.name = name; // Open or create directory FSDirectory fsDirectory = FSDirectory.open(path); this.directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB); // Setup index writer IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer); indexWriterConfig.setRAMBufferSizeMB(ramBufferMB); indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); indexWriterConfig.setUseCompoundFile(true); indexWriterConfig.setMergePolicy(new TieredMergePolicy()); this.indexWriter = new IndexWriter(this.directory, indexWriterConfig); // Setup NRT search SearcherFactory searcherFactory = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) { if (refreshTask != null) { refreshTask.run(); } IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new NoIDFSimilarity()); return searcher; } }; TrackingIndexWriter trackingWriter = new TrackingIndexWriter(this.indexWriter); this.searcherManager = new SearcherManager(this.indexWriter, true, searcherFactory); this.searcherReopener = new ControlledRealTimeReopenThread<>(trackingWriter, this.searcherManager, refresh, refresh); this.searcherReopener.start(); // Register JMX MBean // mbean = new ObjectName(mbeanName); // ManagementFactory.getPlatformMBeanServer().registerMBean(service, // this.mbean); } catch (Exception e) { throw new FhirIndexException(e, "Error while creating index %s", name); } }
From source file:org.apache.solr.core.HdfsDirectoryFactory.java
License:Apache License
@Override protected Directory create(String path, DirContext dirContext) throws IOException { LOG.info("creating directory factory for path {}", path); Configuration conf = getConf(); if (metrics == null) { metrics = new Metrics(conf); }// w ww.j av a 2 s . c o m boolean blockCacheEnabled = params.getBool(BLOCKCACHE_ENABLED, true); boolean blockCacheReadEnabled = params.getBool(BLOCKCACHE_READ_ENABLED, true); boolean blockCacheWriteEnabled = params.getBool(BLOCKCACHE_WRITE_ENABLED, true); Directory dir = null; if (blockCacheEnabled && dirContext != DirContext.META_DATA) { int numberOfBlocksPerBank = params.getInt(NUMBEROFBLOCKSPERBANK, 16384); int blockSize = BlockDirectory.BLOCK_SIZE; int bankCount = params.getInt(BLOCKCACHE_SLAB_COUNT, 1); boolean directAllocation = params.getBool(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true); BlockCache blockCache; int slabSize = numberOfBlocksPerBank * blockSize; LOG.info("Number of slabs of block cache [{}] with direct memory allocation set to [{}]", bankCount, directAllocation); LOG.info( "Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes", new Object[] { slabSize, bankCount, ((long) bankCount * (long) slabSize) }); int _1024Size = params.getInt("solr.hdfs.blockcache.bufferstore.1024", 8192); int _8192Size = params.getInt("solr.hdfs.blockcache.bufferstore.8192", 8192); BufferStore.init(_1024Size, _8192Size, metrics); long totalMemory = (long) bankCount * (long) numberOfBlocksPerBank * (long) blockSize; try { blockCache = new BlockCache(metrics, directAllocation, totalMemory, slabSize, blockSize); } catch (OutOfMemoryError e) { throw new RuntimeException( "The max direct memory is likely too low. Either increase it (by adding -XX:MaxDirectMemorySize=<size>g -XX:+UseLargePages to your containers startup args)" + " or disable direct allocation using solr.hdfs.blockcache.direct.memory.allocation=false in solrconfig.xml. If you are putting the block cache on the heap," + " your java heap size might not be large enough." + " Failed allocating ~" + totalMemory / 1000000.0 + " MB.", e); } Cache cache = new BlockDirectoryCache(blockCache, metrics); HdfsDirectory hdfsDirectory = new HdfsDirectory(new Path(path), conf); dir = new BlockDirectory("solrcore", hdfsDirectory, cache, null, blockCacheReadEnabled, blockCacheWriteEnabled); } else { dir = new HdfsDirectory(new Path(path), conf); } boolean nrtCachingDirectory = params.getBool(NRTCACHINGDIRECTORY_ENABLE, true); if (nrtCachingDirectory) { double nrtCacheMaxMergeSizeMB = params.getInt(NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 16); double nrtCacheMaxCacheMB = params.getInt(NRTCACHINGDIRECTORY_MAXCACHEMB, 192); return new NRTCachingDirectory(dir, nrtCacheMaxMergeSizeMB, nrtCacheMaxCacheMB); } return dir; }