List of usage examples for org.apache.lucene.index IndexWriter maybeMerge
AtomicBoolean maybeMerge
To view the source code for org.apache.lucene.index IndexWriter maybeMerge.
Click Source Link
From source file:cn.hbu.cs.esearch.index.BaseSearchIndex.java
License:Apache License
public void loadFromIndex(BaseSearchIndex<R> index) throws IOException { LongSet delDocs = null;// w ww. j av a 2 s. com // delete docs in disk index first synchronized (this) { if (_delDocs != null && _delDocs.size() > 0) { delDocs = _delDocs; clearDeletes(); } } deleteDocs(delDocs); // open readOnly ram index reader EsearchMultiReader<R> reader = index.openIndexReader(); if (reader == null) { return; } // merge the readOnly ram index with the disk index IndexWriter writer = null; try { writer = openIndexWriter(null, null); writer.addIndexes(reader.directory()); writer.maybeMerge(); } finally { closeIndexWriter(); } }
From source file:org.apache.blur.manager.writer.IndexImporter.java
License:Apache License
private IndexAction getIndexAction(final HdfsDirectory directory, final FileSystem fileSystem) { return new IndexAction() { @Override// w w w . j a va 2 s . c o m public void performMutate(IndexSearcherCloseable searcher, IndexWriter writer) throws IOException { LOG.info("About to import [{0}] into [{1}/{2}]", directory, _shard, _table); boolean emitDeletes = searcher.getIndexReader().numDocs() != 0; applyDeletes(directory, writer, _shard, emitDeletes); LOG.info("Add index [{0}] [{1}/{2}]", directory, _shard, _table); writer.addIndexes(directory); LOG.info("Removing delete markers [{0}] on [{1}/{2}]", directory, _shard, _table); writer.deleteDocuments(new Term(BlurConstants.DELETE_MARKER, BlurConstants.DELETE_MARKER_VALUE)); LOG.info("Finishing import [{0}], commiting on [{1}/{2}]", directory, _shard, _table); } @Override public void doPreCommit(IndexSearcherCloseable indexSearcher, IndexWriter writer) throws IOException { } @Override public void doPostCommit(IndexWriter writer) throws IOException { Path path = directory.getPath(); fileSystem.delete(new Path(path, INPROGRESS), false); LOG.info("Import complete on [{0}/{1}]", _shard, _table); writer.maybeMerge(); } @Override public void doPreRollback(IndexWriter writer) throws IOException { LOG.info("Starting rollback on [{0}/{1}]", _shard, _table); } @Override public void doPostRollback(IndexWriter writer) throws IOException { LOG.info("Finished rollback on [{0}/{1}]", _shard, _table); Path path = directory.getPath(); String name = path.getName(); fileSystem.rename(path, new Path(path.getParent(), rename(name, BADROWIDS))); } }; }
From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java
License:Apache License
@Override public void optimize(Optimize optimize) throws EngineException { if (optimizeMutex.compareAndSet(false, true)) { try (InternalLock _ = readLock.acquire()) { final IndexWriter writer = currentIndexWriter(); /*//from w ww . ja va 2 s. co m * The way we implement upgrades is a bit hackish in the sense that we set an instance * variable and that this setting will thus apply to the next forced merge that will be run. * This is ok because (1) this is the only place we call forceMerge, (2) we have a single * thread for optimize, and the 'optimizeMutex' guarding this code, and (3) ConcurrentMergeScheduler * syncs calls to findForcedMerges. */ MergePolicy mp = writer.getConfig().getMergePolicy(); assert mp instanceof ElasticsearchMergePolicy : "MergePolicy is " + mp.getClass().getName(); if (optimize.upgrade()) { ((ElasticsearchMergePolicy) mp).setUpgradeInProgress(true); } if (optimize.onlyExpungeDeletes()) { writer.forceMergeDeletes(false); } else if (optimize.maxNumSegments() <= 0) { writer.maybeMerge(); possibleMergeNeeded = false; } else { writer.forceMerge(optimize.maxNumSegments(), false); } } catch (Throwable t) { maybeFailEngine(t, "optimize"); throw new OptimizeFailedEngineException(shardId, t); } finally { optimizeMutex.set(false); } } // wait for the merges outside of the read lock if (optimize.waitForMerge()) { waitForMerges(optimize.flush()); } else if (optimize.flush()) { // we only need to monitor merges for async calls if we are going to flush threadPool.executor(ThreadPool.Names.OPTIMIZE).execute(new AbstractRunnable() { @Override public void run() { try { waitForMerges(true); } catch (Exception e) { logger.error("Exception while waiting for merges asynchronously after optimize", e); } } }); } }
From source file:org.elasticsearch.index.merge.Merges.java
License:Apache License
/** * See {@link org.apache.lucene.index.IndexWriter#maybeMerge()}, with the additional * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}. */// w w w. j a v a2s. c o m public static void maybeMerge(IndexWriter writer) throws IOException { MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler(); if (mergeScheduler instanceof EnableMergeScheduler) { ((EnableMergeScheduler) mergeScheduler).enableMerge(); try { writer.maybeMerge(); } finally { ((EnableMergeScheduler) mergeScheduler).disableMerge(); } } else { writer.maybeMerge(); } }
From source file:org.ms123.common.data.lucene.LuceneServiceImpl.java
License:Open Source License
public synchronized void commit(LuceneSession session) { IndexWriter tiw = session.getIndexWriter(); String namespace = session.getStoreDesc().getNamespace(); try {/*w w w . java2s . c o m*/ IndexReader ir = IndexReader.open(tiw, true); IndexWriter riw = getRealIndexWriter(namespace); deleteExistingDocs(session, riw); riw.addIndexes(ir); riw.maybeMerge(); riw.commit(); tiw.getDirectory().close(); } catch (Exception e) { e.printStackTrace(); } }
From source file:proj.zoie.impl.indexing.internal.BaseSearchIndex.java
License:Apache License
public void loadFromIndex(BaseSearchIndex<R> index) throws IOException { // hao: open readOnly ram index reader ZoieIndexReader<R> reader = index.openIndexReader(); if (reader == null) return;/*from w w w . jav a 2s .c om*/ Directory dir = reader.directory(); // hao: delete docs in disk index LongSet delDocs = _delDocs; clearDeletes(); deleteDocs(delDocs); // hao: merge the readOnly ram index with the disk index IndexWriter writer = null; try { writer = openIndexWriter(null, null); writer.addIndexes(new Directory[] { dir }); writer.maybeMerge(); } finally { closeIndexWriter(); } }