List of usage examples for org.apache.lucene.index IndexWriterConfig setMergeScheduler
public IndexWriterConfig setMergeScheduler(MergeScheduler mergeScheduler)
From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditorContext.java
License:Apache License
static IndexWriterConfig getIndexWriterConfig(IndexDefinition definition, boolean remoteDir) { // FIXME: Hack needed to make Lucene work in an OSGi environment Thread thread = Thread.currentThread(); ClassLoader loader = thread.getContextClassLoader(); thread.setContextClassLoader(IndexWriterConfig.class.getClassLoader()); try {/*from w w w . j a va 2s .c om*/ Analyzer definitionAnalyzer = definition.getAnalyzer(); Map<String, Analyzer> analyzers = new HashMap<String, Analyzer>(); analyzers.put(FieldNames.SPELLCHECK, new ShingleAnalyzerWrapper(LuceneIndexConstants.ANALYZER, 3)); if (!definition.isSuggestAnalyzed()) { analyzers.put(FieldNames.SUGGEST, SuggestHelper.getAnalyzer()); } Analyzer analyzer = new PerFieldAnalyzerWrapper(definitionAnalyzer, analyzers); IndexWriterConfig config = new IndexWriterConfig(VERSION, analyzer); if (remoteDir) { config.setMergeScheduler(new SerialMergeScheduler()); } if (definition.getCodec() != null) { config.setCodec(definition.getCodec()); } return config; } finally { thread.setContextClassLoader(loader); } }
From source file:org.apache.maven.index.context.NexusIndexWriter.java
License:Apache License
public static IndexWriterConfig defaultConfig() { final IndexWriterConfig config = new IndexWriterConfig(new NexusAnalyzer()); // default open mode is CreateOrAppend which suits us config.setRAMBufferSizeMB(2.0); // old default config.setMergeScheduler(new SerialMergeScheduler()); // merging serially config.setWriteLockTimeout(IndexWriterConfig.WRITE_LOCK_TIMEOUT); return config; }
From source file:org.apache.nifi.provenance.lucene.SimpleIndexManager.java
License:Apache License
private IndexWriterCount createWriter(final File indexDirectory) throws IOException { final List<Closeable> closeables = new ArrayList<>(); final Directory directory = FSDirectory.open(indexDirectory); closeables.add(directory);/*from w ww .ja v a 2 s .c o m*/ try { final Analyzer analyzer = new StandardAnalyzer(); closeables.add(analyzer); final IndexWriterConfig config = new IndexWriterConfig(LuceneUtil.LUCENE_VERSION, analyzer); final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); final int mergeThreads = repoConfig.getConcurrentMergeThreads(); mergeScheduler.setMaxMergesAndThreads(mergeThreads, mergeThreads); config.setMergeScheduler(mergeScheduler); final IndexWriter indexWriter = new IndexWriter(directory, config); final EventIndexWriter eventIndexWriter = new LuceneEventIndexWriter(indexWriter, indexDirectory); final IndexWriterCount writerCount = new IndexWriterCount(eventIndexWriter, analyzer, directory, 1, false); logger.debug("Providing new index writer for {}", indexDirectory); return writerCount; } catch (final IOException ioe) { for (final Closeable closeable : closeables) { try { closeable.close(); } catch (final IOException ioe2) { ioe.addSuppressed(ioe2); } } throw ioe; } }
From source file:org.apache.nifi.provenance.lucene.StandardIndexManager.java
License:Apache License
private IndexWriterCount createWriter(final File indexDirectory) throws IOException { final List<Closeable> closeables = new ArrayList<>(); final Directory directory = FSDirectory.open(indexDirectory.toPath()); closeables.add(directory);/* w ww.j a v a 2s . c om*/ try { final Analyzer analyzer = new StandardAnalyzer(); closeables.add(analyzer); final IndexWriterConfig config = new IndexWriterConfig(analyzer); final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); final int mergeThreads = repoConfig.getConcurrentMergeThreads(); mergeScheduler.setMaxMergesAndThreads(mergeThreads, mergeThreads); config.setMergeScheduler(mergeScheduler); final IndexWriter indexWriter = new IndexWriter(directory, config); final EventIndexWriter eventIndexWriter = new LuceneEventIndexWriter(indexWriter, indexDirectory); final IndexWriterCount writerCount = new IndexWriterCount(eventIndexWriter, analyzer, directory, 1, false); logger.debug("Providing new index writer for {}", indexDirectory); return writerCount; } catch (final IOException ioe) { for (final Closeable closeable : closeables) { try { closeable.close(); } catch (final IOException ioe2) { ioe.addSuppressed(ioe2); } } throw ioe; } }
From source file:org.apache.solr.update.SolrIndexConfig.java
License:Apache License
public IndexWriterConfig toIndexWriterConfig(IndexSchema schema) { // so that we can update the analyzer on core reload, we pass null // for the default analyzer, and explicitly pass an analyzer on // appropriate calls to IndexWriter IndexWriterConfig iwc = new IndexWriterConfig(luceneVersion, null); if (maxBufferedDocs != -1) iwc.setMaxBufferedDocs(maxBufferedDocs); if (ramBufferSizeMB != -1) iwc.setRAMBufferSizeMB(ramBufferSizeMB); if (termIndexInterval != -1) iwc.setTermIndexInterval(termIndexInterval); if (writeLockTimeout != -1) iwc.setWriteLockTimeout(writeLockTimeout); iwc.setSimilarity(schema.getSimilarity()); iwc.setMergePolicy(buildMergePolicy(schema)); iwc.setMergeScheduler(buildMergeScheduler(schema)); iwc.setInfoStream(infoStream);/*from www . j av a 2s .co m*/ // do this after buildMergePolicy since the backcompat logic // there may modify the effective useCompoundFile iwc.setUseCompoundFile(getUseCompoundFile()); if (maxIndexingThreads != -1) { iwc.setMaxThreadStates(maxIndexingThreads); } if (mergedSegmentWarmerInfo != null) { // TODO: add infostream -> normal logging system (there is an issue somewhere) IndexReaderWarmer warmer = schema.getResourceLoader().newInstance(mergedSegmentWarmerInfo.className, IndexReaderWarmer.class, null, new Class[] { InfoStream.class }, new Object[] { iwc.getInfoStream() }); iwc.setMergedSegmentWarmer(warmer); } return iwc; }
From source file:org.d2.plugins.lucene.LuceneIndexer.java
License:Apache License
/** * TODO stop the background thread and lock everything (including all reads) *///from www. ja v a 2 s. c om public void rebuildIndex(Collection<Object> objList) { try { manager.close(); manager = new LuceneManager(index); //writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer); if (index instanceof NRTCachingDirectory) config.setMergeScheduler(((NRTCachingDirectory) index).getMergeScheduler()); IndexWriter writer = new IndexWriter(manager.getIndex(), config); for (Object obj : objList) { Document d = (Document) docBuilder.toDocument(obj); writer.addDocument(d); } //writer.optimize(); writer.commit(); manager.close(); manager = new LuceneManager(index); } catch (CorruptIndexException e) { throw Util.wrap(e); } catch (IOException e) { throw Util.wrap(e); } }
From source file:org.eclipse.dltk.internal.core.index.lucene.IndexContainer.java
License:Open Source License
private IndexWriter createWriter(Path path) throws IOException { Directory indexDir = new IndexDirectory(path, SimpleFSLockFactory.INSTANCE); purgeLocks(path);// w w w . j a va 2 s . co m IndexWriterConfig config = new IndexWriterConfig(new SimpleAnalyzer()); ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); mergeScheduler.setDefaultMaxMergesAndThreads(true); config.setMergeScheduler(mergeScheduler); config.setOpenMode(OpenMode.CREATE_OR_APPEND); config.setWriteLockTimeout(WRITE_LOCK_TIMEOUT); config.setCommitOnClose(false); return new IndexWriter(indexDir, config); }
From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java
License:Apache License
private IndexWriter createWriter() throws IOException { try {/*from w ww. j av a 2s .co m*/ boolean create = !Lucene.indexExists(store.directory()); IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, analysisService.defaultIndexAnalyzer()); config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); config.setIndexDeletionPolicy(deletionPolicy); config.setInfoStream(new LoggerInfoStream(indexSettings, shardId)); config.setMergeScheduler(mergeScheduler.newMergeScheduler()); MergePolicy mergePolicy = mergePolicyProvider.getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges mergePolicy = new ElasticsearchMergePolicy(mergePolicy); config.setMergePolicy(mergePolicy); config.setSimilarity(similarityService.similarity()); config.setRAMBufferSizeMB(indexingBufferSize.mbFrac()); config.setMaxThreadStates(indexConcurrency); config.setCodec(codecService.codec(codecName)); /* We set this timeout to a highish value to work around * the default poll interval in the Lucene lock that is * 1000ms by default. We might need to poll multiple times * here but with 1s poll this is only executed twice at most * in combination with the default writelock timeout*/ config.setWriteLockTimeout(5000); config.setUseCompoundFile(this.compoundOnFlush); config.setCheckIntegrityAtMerge(checksumOnMerge); // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end // of the merge operation and won't slow down _refresh config.setMergedSegmentWarmer(new IndexReaderWarmer() { @Override public void warm(AtomicReader reader) throws IOException { try { assert isMergedSegment(reader); if (warmer != null) { final Engine.Searcher searcher = new SimpleSearcher("warmer", new IndexSearcher(reader)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); warmer.warmNewReaders(context); } } catch (Throwable t) { // Don't fail a merge if the warm-up failed if (!closed) { logger.warn("Warm-up failed", t); } if (t instanceof Error) { // assertion/out-of-memory error, don't ignore those throw (Error) t; } } } }); return new IndexWriter(store.directory(), config); } catch (LockObtainFailedException ex) { boolean isLocked = IndexWriter.isLocked(store.directory()); logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked); throw ex; } }
From source file:org.elasticsearch.index.engine.internal.InternalEngine.java
License:Apache License
private IndexWriter createWriter() throws IOException { try {/*ww w . jav a 2 s . c om*/ // release locks when started if (IndexWriter.isLocked(store.directory())) { logger.warn("shard is locked, releasing lock"); IndexWriter.unlock(store.directory()); } boolean create = !Lucene.indexExists(store.directory()); IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, analysisService.defaultIndexAnalyzer()); config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); config.setIndexDeletionPolicy(deletionPolicy); config.setMergeScheduler(mergeScheduler.newMergeScheduler()); MergePolicy mergePolicy = mergePolicyProvider.newMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges mergePolicy = new IndexUpgraderMergePolicy(mergePolicy); config.setMergePolicy(mergePolicy); config.setSimilarity(similarityService.similarity()); config.setRAMBufferSizeMB(indexingBufferSize.mbFrac()); config.setMaxThreadStates(indexConcurrency); config.setCodec(codecService.codec(codecName)); /* We set this timeout to a highish value to work around * the default poll interval in the Lucene lock that is * 1000ms by default. We might need to poll multiple times * here but with 1s poll this is only executed twice at most * in combination with the default writelock timeout*/ config.setWriteLockTimeout(5000); config.setUseCompoundFile(this.compoundOnFlush); // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end // of the merge operation and won't slow down _refresh config.setMergedSegmentWarmer(new IndexReaderWarmer() { @Override public void warm(AtomicReader reader) throws IOException { try { assert isMergedSegment(reader); final Engine.Searcher searcher = new SimpleSearcher("warmer", new IndexSearcher(reader)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); if (warmer != null) warmer.warm(context); } catch (Throwable t) { // Don't fail a merge if the warm-up failed if (!closed) { logger.warn("Warm-up failed", t); } if (t instanceof Error) { // assertion/out-of-memory error, don't ignore those throw (Error) t; } } } }); return new IndexWriter(store.directory(), config); } catch (LockObtainFailedException ex) { boolean isLocked = IndexWriter.isLocked(store.directory()); logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked); throw ex; } }
From source file:org.elasticsearch.index.engine.InternalEngine.java
License:Apache License
private IndexWriter createWriter(boolean create) throws IOException { try {//from w w w.j av a 2 s. c om final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND); iwc.setIndexDeletionPolicy(deletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; try { verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); } catch (Throwable ignore) { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); MergePolicy mergePolicy = config().getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges mergePolicy = new ElasticsearchMergePolicy(mergePolicy); iwc.setMergePolicy(mergePolicy); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac()); iwc.setCodec(engineConfig.getCodec()); /* We set this timeout to a highish value to work around * the default poll interval in the Lucene lock that is * 1000ms by default. We might need to poll multiple times * here but with 1s poll this is only executed twice at most * in combination with the default writelock timeout*/ iwc.setWriteLockTimeout(5000); iwc.setUseCompoundFile(this.engineConfig.isCompoundOnFlush()); // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end // of the merge operation and won't slow down _refresh iwc.setMergedSegmentWarmer(new IndexReaderWarmer() { @Override public void warm(LeafReader reader) throws IOException { try { LeafReader esLeafReader = new ElasticsearchLeafReader(reader, shardId); assert isMergedSegment(esLeafReader); if (warmer != null) { final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(esLeafReader, null)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); warmer.warmNewReaders(context); } } catch (Throwable t) { // Don't fail a merge if the warm-up failed if (isClosed.get() == false) { logger.warn("Warm-up failed", t); } if (t instanceof Error) { // assertion/out-of-memory error, don't ignore those throw (Error) t; } } } }); return new IndexWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { boolean isLocked = IndexWriter.isLocked(store.directory()); logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked); throw ex; } }