Example usage for org.apache.lucene.index IndexWriter forceMergeDeletes

List of usage examples for org.apache.lucene.index IndexWriter forceMergeDeletes

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter forceMergeDeletes.

Prototype

public void forceMergeDeletes(boolean doWait) throws IOException 

Source Link

Document

Just like #forceMergeDeletes() , except you can specify whether the call should block until the operation completes.

Usage

From source file:collene.TestIndexing.java

License:Apache License

@Test
public void test() throws IOException, ParseException {
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9);

    // write it out.
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter writer = new IndexWriter(directory, config);

    for (int i = 0; i < 100; i++) {
        Collection<Document> documents = new ArrayList<Document>();
        Document doc = new Document();
        doc.add(new Field("key", "aaa_" + i, TextField.TYPE_STORED));
        doc.add(new Field("not", "notaaa", TextField.TYPE_NOT_STORED));
        doc.add(new Field("meta", "aaa_meta_aaa_" + i, TextField.TYPE_STORED));
        documents.add(doc);//from  w  w w. j a  v a 2s.c  om

        writer.addDocuments(documents);

        writer.commit();
        writer.forceMerge(1);
        writer.forceMergeDeletes(true);
    }

    // now read it back.
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
    QueryParser parser = new QueryParser(Version.LUCENE_4_9, "key", analyzer);

    Query query = parser.parse("aaa_4");
    TopDocs docs = searcher.search(query, 1);
    int idToDelete = docs.scoreDocs[0].doc;
    Assert.assertTrue(docs.totalHits > 0);

    query = parser.parse("fersoius");
    docs = searcher.search(query, 1);
    Assert.assertFalse(docs.totalHits > 0);

    // delete that document.
    DirectoryReader reader = DirectoryReader.open(writer, true);
    writer.tryDeleteDocument(reader, idToDelete);

    reader.close();
    writer.close();

    // list files
    Set<String> files = new HashSet<String>();
    System.out.println("Listing files for " + directory.toString());
    for (String file : directory.listAll()) {
        files.add(file);
        System.out.println(" " + file);
    }

    if (strictFileChecking) {
        System.out.println("String file checking...");
        Sets.SetView<String> difference = Sets.difference(expectedFiles, files);
        Assert.assertEquals(Joiner.on(",").join(difference), 0, difference.size());
    }

    reader = DirectoryReader.open(directory);
    searcher = new IndexSearcher(reader);
    query = parser.parse("aaa_4");
    docs = searcher.search(query, 1);
    reader.close();
    Assert.assertFalse(docs.totalHits > 0);

    directory.close();
}

From source file:org.eclipse.dltk.internal.core.index.lucene.IndexContainer.java

License:Open Source License

synchronized void commit(IProgressMonitor monitor, boolean mergeDeletes) {
    int ticks = 1;
    for (Map<?, ?> dataWriters : fIndexWriters.values()) {
        ticks += dataWriters.size();//from   w  ww .  j a  v a2 s.c  o m
    }
    SubMonitor subMonitor = SubMonitor.convert(monitor, ticks);
    try {
        for (Map<Integer, IndexWriter> dataWriters : fIndexWriters.values()) {
            for (IndexWriter writer : dataWriters.values()) {
                if (writer != null && !subMonitor.isCanceled()) {
                    writer.forceMergeDeletes(mergeDeletes);
                    writer.commit();
                    subMonitor.worked(1);
                }
            }
        }
        if (fTimestampsWriter != null && !subMonitor.isCanceled()) {
            fTimestampsWriter.forceMergeDeletes(mergeDeletes);
            fTimestampsWriter.commit();
            subMonitor.worked(1);
        }
        subMonitor.done();
    } catch (IOException e) {
        Logger.logException(e);
    }
}

From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java

License:Apache License

@Override
public void optimize(Optimize optimize) throws EngineException {
    if (optimizeMutex.compareAndSet(false, true)) {
        try (InternalLock _ = readLock.acquire()) {
            final IndexWriter writer = currentIndexWriter();

            /*/*from ww w  .  j a  va  2 s.  c  o m*/
             * The way we implement upgrades is a bit hackish in the sense that we set an instance
             * variable and that this setting will thus apply to the next forced merge that will be run.
             * This is ok because (1) this is the only place we call forceMerge, (2) we have a single
             * thread for optimize, and the 'optimizeMutex' guarding this code, and (3) ConcurrentMergeScheduler
             * syncs calls to findForcedMerges.
             */
            MergePolicy mp = writer.getConfig().getMergePolicy();
            assert mp instanceof ElasticsearchMergePolicy : "MergePolicy is " + mp.getClass().getName();
            if (optimize.upgrade()) {
                ((ElasticsearchMergePolicy) mp).setUpgradeInProgress(true);
            }

            if (optimize.onlyExpungeDeletes()) {
                writer.forceMergeDeletes(false);
            } else if (optimize.maxNumSegments() <= 0) {
                writer.maybeMerge();
                possibleMergeNeeded = false;
            } else {
                writer.forceMerge(optimize.maxNumSegments(), false);
            }
        } catch (Throwable t) {
            maybeFailEngine(t, "optimize");
            throw new OptimizeFailedEngineException(shardId, t);
        } finally {
            optimizeMutex.set(false);
        }
    }

    // wait for the merges outside of the read lock
    if (optimize.waitForMerge()) {
        waitForMerges(optimize.flush());
    } else if (optimize.flush()) {
        // we only need to monitor merges for async calls if we are going to flush
        threadPool.executor(ThreadPool.Names.OPTIMIZE).execute(new AbstractRunnable() {
            @Override
            public void run() {
                try {
                    waitForMerges(true);
                } catch (Exception e) {
                    logger.error("Exception while waiting for merges asynchronously after optimize", e);
                }
            }
        });
    }
}

From source file:org.elasticsearch.index.merge.Merges.java

License:Apache License

/**
 * See {@link org.apache.lucene.index.IndexWriter#forceMergeDeletes(boolean)}, with the additional
 * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
 *///from w  w  w  . j  a v a 2 s  .  c o  m
public static void forceMergeDeletes(IndexWriter writer, boolean doWait) throws IOException {
    MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler();
    if (mergeScheduler instanceof EnableMergeScheduler) {
        ((EnableMergeScheduler) mergeScheduler).enableMerge();
        try {
            writer.forceMergeDeletes(doWait);
        } finally {
            ((EnableMergeScheduler) mergeScheduler).disableMerge();
        }
    } else {
        writer.forceMergeDeletes(doWait);
    }
}