Example usage for org.apache.lucene.index IndexWriterConfig setIndexDeletionPolicy

List of usage examples for org.apache.lucene.index IndexWriterConfig setIndexDeletionPolicy

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig setIndexDeletionPolicy.

Prototype

public IndexWriterConfig setIndexDeletionPolicy(IndexDeletionPolicy delPolicy) 

Source Link

Document

Expert: allows an optional IndexDeletionPolicy implementation to be specified.

Usage

From source file:org.elasticsearch.index.engine.InternalEngine.java

License:Apache License

private IndexWriter createWriter(boolean create) throws IOException {
    try {/*from  w w w  .  j av  a 2s  . c  o  m*/
        final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
        iwc.setCommitOnClose(false); // we by default don't commit on close
        iwc.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
        iwc.setIndexDeletionPolicy(deletionPolicy);
        // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
        boolean verbose = false;
        try {
            verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
        } catch (Throwable ignore) {
        }
        iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
        iwc.setMergeScheduler(mergeScheduler);
        MergePolicy mergePolicy = config().getMergePolicy();
        // Give us the opportunity to upgrade old segments while performing
        // background merges
        mergePolicy = new ElasticsearchMergePolicy(mergePolicy);
        iwc.setMergePolicy(mergePolicy);
        iwc.setSimilarity(engineConfig.getSimilarity());
        iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
        iwc.setCodec(engineConfig.getCodec());
        /* We set this timeout to a highish value to work around
         * the default poll interval in the Lucene lock that is
         * 1000ms by default. We might need to poll multiple times
         * here but with 1s poll this is only executed twice at most
         * in combination with the default writelock timeout*/
        iwc.setWriteLockTimeout(5000);
        iwc.setUseCompoundFile(this.engineConfig.isCompoundOnFlush());
        // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
        // of the merge operation and won't slow down _refresh
        iwc.setMergedSegmentWarmer(new IndexReaderWarmer() {
            @Override
            public void warm(LeafReader reader) throws IOException {
                try {
                    LeafReader esLeafReader = new ElasticsearchLeafReader(reader, shardId);
                    assert isMergedSegment(esLeafReader);
                    if (warmer != null) {
                        final Engine.Searcher searcher = new Searcher("warmer",
                                searcherFactory.newSearcher(esLeafReader, null));
                        final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId,
                                searcher);
                        warmer.warmNewReaders(context);
                    }
                } catch (Throwable t) {
                    // Don't fail a merge if the warm-up failed
                    if (isClosed.get() == false) {
                        logger.warn("Warm-up failed", t);
                    }
                    if (t instanceof Error) {
                        // assertion/out-of-memory error, don't ignore those
                        throw (Error) t;
                    }
                }
            }
        });
        return new IndexWriter(store.directory(), iwc);
    } catch (LockObtainFailedException ex) {
        boolean isLocked = IndexWriter.isLocked(store.directory());
        logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
        throw ex;
    }
}

From source file:org.elasticsearch.index.engine.robin.RobinEngine.java

License:Apache License

private IndexWriter createWriter() throws IOException {
    IndexWriter indexWriter = null;/*from w w  w  .j a v a  2 s.  c o  m*/
    try {
        // release locks when started
        if (IndexWriter.isLocked(store.directory())) {
            logger.warn("shard is locked, releasing lock");
            IndexWriter.unlock(store.directory());
        }
        boolean create = !IndexReader.indexExists(store.directory());
        IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION,
                analysisService.defaultIndexAnalyzer());
        config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
        config.setIndexDeletionPolicy(deletionPolicy);
        config.setMergeScheduler(mergeScheduler.newMergeScheduler());
        config.setMergePolicy(mergePolicyProvider.newMergePolicy());
        config.setSimilarity(similarityService.defaultIndexSimilarity());
        config.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
        config.setTermIndexInterval(termIndexInterval);
        config.setReaderTermsIndexDivisor(termIndexDivisor);
        config.setMaxThreadStates(indexConcurrency);

        indexWriter = new IndexWriter(store.directory(), config);
    } catch (IOException e) {
        safeClose(indexWriter);
        throw e;
    }
    return indexWriter;
}

From source file:org.elasticsearch.index.store.StoreTest.java

License:Apache License

@Test
public void testCleanupFromSnapshot() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store = new Store(shardId, ImmutableSettings.EMPTY, directoryService,
            randomDistributor(directoryService), new DummyShardLock(shardId));
    // this time random codec....
    IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), TEST_VERSION_CURRENT,
            new MockAnalyzer(random())).setCodec(actualDefaultCodec());
    // we keep all commits and that allows us clean based on multiple snapshots
    indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
    int docs = 1 + random().nextInt(100);
    int numCommits = 0;
    for (int i = 0; i < docs; i++) {
        if (i > 0 && randomIntBetween(0, 10) == 0) {
            writer.commit();/* www .  ja  v  a2s  .  co m*/
            numCommits++;
        }
        Document doc = new Document();
        doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        writer.addDocument(doc);

    }
    if (numCommits < 1) {
        writer.commit();
        Document doc = new Document();
        doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        writer.addDocument(doc);
    }

    Store.MetadataSnapshot firstMeta = store.getMetadata();

    if (random().nextBoolean()) {
        for (int i = 0; i < docs; i++) {
            if (random().nextBoolean()) {
                Document doc = new Document();
                doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
                doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                        random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
                writer.updateDocument(new Term("id", "" + i), doc);
            }
        }
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot secondMeta = store.getMetadata();

    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
    for (String file : store.directory().listAll()) {
        if (file.equals("write.lock") || file.equals(IndexFileNames.SEGMENTS_GEN)) {
            continue;
        }
        BytesRef hash = new BytesRef();
        if (file.startsWith("segments")) {
            hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
        }
        StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file),
                file + "checksum", null, hash);
        legacyMeta.put(file, storeFileMetaData);
        checksums.add(storeFileMetaData);
    }
    checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups

    if (randomBoolean()) {
        store.cleanupAndVerify("test", firstMeta);
        String[] strings = store.directory().listAll();
        int numChecksums = 0;
        int numNotFound = 0;
        for (String file : strings) {
            assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
            if (Store.isChecksum(file)) {
                numChecksums++;
            } else if (secondMeta.contains(file) == false) {
                numNotFound++;
            }

        }
        assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
        assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept",
                numChecksums, 1);
    } else {
        store.cleanupAndVerify("test", secondMeta);
        String[] strings = store.directory().listAll();
        int numChecksums = 0;
        int numNotFound = 0;
        for (String file : strings) {
            assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
            if (Store.isChecksum(file)) {
                numChecksums++;
            } else if (firstMeta.contains(file) == false) {
                numNotFound++;
            }

        }
        assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
        assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept",
                numChecksums, 1);
    }

    deleteContent(store.directory());
    IOUtils.close(store);
}

From source file:org.elasticsearch.index.store.StoreTest.java

License:Apache License

@Test
public void testUserDataRead() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    IndexWriterConfig iwc = newIndexWriterConfig(random(), TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setCodec(actualDefaultCodec());
    Store store = new Store(shardId, ImmutableSettings.EMPTY, directoryService,
            randomDistributor(directoryService), new DummyShardLock(shardId));
    SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(
            new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
    iwc.setIndexDeletionPolicy(deletionPolicy);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    Document doc = new Document();
    doc.add(new TextField("id", "1", Field.Store.NO));
    writer.addDocument(doc);/*from  ww  w .  j a  v  a  2  s.  c  o m*/
    Map<String, String> commitData = new HashMap<>(2);
    String syncId = "a sync id";
    String translogId = "a translog id";
    commitData.put(Engine.SYNC_COMMIT_ID, syncId);
    commitData.put(Translog.TRANSLOG_ID_KEY, translogId);
    writer.setCommitData(commitData);
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata;
    if (randomBoolean()) {
        metadata = store.getMetadata();
    } else {
        metadata = store.getMetadata(deletionPolicy.snapshot());
    }
    assertFalse(metadata.asMap().isEmpty());
    // do not check for correct files, we have enough tests for that above
    assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
    assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_ID_KEY), equalTo(translogId));
    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
}

From source file:org.elasticsearch.index.store.StoreTests.java

License:Apache License

@Test
public void testCleanupFromSnapshot() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
    // this time random codec....
    IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random()))
            .setCodec(TestUtil.getDefaultCodec());
    // we keep all commits and that allows us clean based on multiple snapshots
    indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
    int docs = 1 + random().nextInt(100);
    int numCommits = 0;
    for (int i = 0; i < docs; i++) {
        if (i > 0 && randomIntBetween(0, 10) == 0) {
            writer.commit();//www  .jav a 2s  . co  m
            numCommits++;
        }
        Document doc = new Document();
        doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        writer.addDocument(doc);

    }
    if (numCommits < 1) {
        writer.commit();
        Document doc = new Document();
        doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        writer.addDocument(doc);
    }

    Store.MetadataSnapshot firstMeta = store.getMetadata();

    if (random().nextBoolean()) {
        for (int i = 0; i < docs; i++) {
            if (random().nextBoolean()) {
                Document doc = new Document();
                doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
                doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                        random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
                writer.updateDocument(new Term("id", "" + i), doc);
            }
        }
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot secondMeta = store.getMetadata();

    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
    for (String file : store.directory().listAll()) {
        if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)
                || file.startsWith("extra")) {
            continue;
        }
        BytesRef hash = new BytesRef();
        if (file.startsWith("segments")) {
            hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
        }
        StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file),
                file + "checksum", null, hash);
        legacyMeta.put(file, storeFileMetaData);
        checksums.add(storeFileMetaData);
    }
    checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups

    if (randomBoolean()) {
        store.cleanupAndVerify("test", firstMeta);
        String[] strings = store.directory().listAll();
        int numChecksums = 0;
        int numNotFound = 0;
        for (String file : strings) {
            if (file.startsWith("extra")) {
                continue;
            }
            assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
            if (Store.isChecksum(file)) {
                numChecksums++;
            } else if (secondMeta.contains(file) == false) {
                numNotFound++;
            }

        }
        assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
        assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept",
                numChecksums, 1);
    } else {
        store.cleanupAndVerify("test", secondMeta);
        String[] strings = store.directory().listAll();
        int numChecksums = 0;
        int numNotFound = 0;
        for (String file : strings) {
            if (file.startsWith("extra")) {
                continue;
            }
            assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
            if (Store.isChecksum(file)) {
                numChecksums++;
            } else if (firstMeta.contains(file) == false) {
                numNotFound++;
            }

        }
        assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
        assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept",
                numChecksums, 1);
    }

    deleteContent(store.directory());
    IOUtils.close(store);
}

From source file:org.elasticsearch.index.store.StoreTests.java

License:Apache License

@Test
public void testUserDataRead() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
    IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random()))
            .setCodec(TestUtil.getDefaultCodec());
    SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(
            new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
    config.setIndexDeletionPolicy(deletionPolicy);
    IndexWriter writer = new IndexWriter(store.directory(), config);
    Document doc = new Document();
    doc.add(new TextField("id", "1", Field.Store.NO));
    writer.addDocument(doc);/* ww w. j  a  v a2 s.  c om*/
    Map<String, String> commitData = new HashMap<>(2);
    String syncId = "a sync id";
    String translogId = "a translog id";
    commitData.put(Engine.SYNC_COMMIT_ID, syncId);
    commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogId);
    writer.setCommitData(commitData);
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata;
    if (randomBoolean()) {
        metadata = store.getMetadata();
    } else {
        metadata = store.getMetadata(deletionPolicy.snapshot());
    }
    assertFalse(metadata.asMap().isEmpty());
    // do not check for correct files, we have enough tests for that above
    assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
    assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId));
    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
}

From source file:org.eu.bitzone.Leia.java

License:Apache License

private IndexWriter createIndexWriter() {
    try {/*from  w  w  w .  j av a  2 s  . c  o m*/
        final IndexWriterConfig cfg = new IndexWriterConfig(LV, new WhitespaceAnalyzer(LV));
        IndexDeletionPolicy policy;
        if (keepCommits) {
            policy = new KeepAllIndexDeletionPolicy();
        } else {
            policy = new KeepLastIndexDeletionPolicy();
        }
        cfg.setIndexDeletionPolicy(policy);
        final MergePolicy mp = cfg.getMergePolicy();
        cfg.setUseCompoundFile(IndexGate.preferCompoundFormat(dir));
        final IndexWriter iw = new IndexWriter(dir, cfg);
        return iw;
    } catch (final Exception e) {
        errorMsg("Error creating IndexWriter: " + e.toString());
        return null;
    }
}

From source file:org.eu.bitzone.Leia.java

License:Apache License

/**
 * Optimize the index.//from   w  w  w. jav  a  2s.com
 */
public void optimize(final Object dialog) {
    final Thread t = new Thread() {

        @Override
        public void run() {
            IndexWriter iw = null;
            final Object optimizeButton = find(dialog, "optimizeButton");
            setBoolean(optimizeButton, "enabled", false);
            final Object closeButton = find(dialog, "closeButton");
            setBoolean(closeButton, "enabled", false);
            final Object msg = find(dialog, "msg");
            final Object stat = find(dialog, "stat");
            setString(stat, "text", "Running ...");
            final PanelPrintWriter ppw = new PanelPrintWriter(Leia.this, msg);
            final boolean useCompound = getBoolean(find(dialog, "optCompound"), "selected");
            final boolean expunge = getBoolean(find(dialog, "optExpunge"), "selected");
            final boolean keep = getBoolean(find(dialog, "optKeepAll"), "selected");
            final boolean useLast = getBoolean(find(dialog, "optLastCommit"), "selected");
            final Object tiiSpin = find(dialog, "tii");
            final Object segnumSpin = find(dialog, "segnum");
            final int tii = Integer.parseInt(getString(tiiSpin, "text"));
            final int segnum = Integer.parseInt(getString(segnumSpin, "text"));
            try {
                if (is != null) {
                    is = null;
                }
                if (ir != null) {
                    ir.close();
                }
                if (ar != null) {
                    ar.close();
                }
                IndexDeletionPolicy policy;
                if (keep) {
                    policy = new KeepAllIndexDeletionPolicy();
                } else {
                    policy = new KeepLastIndexDeletionPolicy();
                }
                final IndexWriterConfig cfg = new IndexWriterConfig(LV, new WhitespaceAnalyzer(LV));
                if (!useLast) {
                    final IndexCommit ic = ((DirectoryReader) ir).getIndexCommit();
                    if (ic != null) {
                        cfg.setIndexCommit(ic);
                    }
                }
                cfg.setIndexDeletionPolicy(policy);
                cfg.setTermIndexInterval(tii);
                final MergePolicy p = cfg.getMergePolicy();
                cfg.setUseCompoundFile(useCompound);
                if (useCompound) {
                    p.setNoCFSRatio(1.0);
                }
                cfg.setInfoStream(ppw);
                iw = new IndexWriter(dir, cfg);
                final long startSize = Util.calcTotalFileSize(pName, dir);
                final long startTime = System.currentTimeMillis();
                if (expunge) {
                    iw.forceMergeDeletes();
                } else {
                    if (segnum > 1) {
                        iw.forceMerge(segnum, true);
                    } else {
                        iw.forceMerge(1, true);
                    }
                }
                iw.commit();
                final long endTime = System.currentTimeMillis();
                final long endSize = Util.calcTotalFileSize(pName, dir);
                final long deltaSize = startSize - endSize;
                final String sign = deltaSize < 0 ? " Increased " : " Reduced ";
                final String sizeMsg = sign + Util.normalizeSize(Math.abs(deltaSize))
                        + Util.normalizeUnit(Math.abs(deltaSize));
                final String timeMsg = String.valueOf(endTime - startTime) + " ms";
                showStatus(sizeMsg + " in " + timeMsg);
                iw.close();
                setString(stat, "text", "Finished OK.");
            } catch (final Exception e) {
                e.printStackTrace(ppw);
                setString(stat, "text", "ERROR - aborted.");
                errorMsg("ERROR optimizing: " + e.toString());
                if (iw != null) {
                    try {
                        iw.close();
                    } catch (final Exception e1) {
                    }
                }
            } finally {
                setBoolean(closeButton, "enabled", true);
            }
            try {
                actionReopen();
                is = new IndexSearcher(ir);
                // add dialog again
                add(dialog);
            } catch (final Exception e) {
                e.printStackTrace(ppw);
                errorMsg("ERROR reopening after optimize:\n" + e.getMessage());
            }
        }
    };
    t.start();
}

From source file:org.getopt.luke.Luke.java

License:Apache License

private IndexWriter createIndexWriter() {
    try {/*w  w  w.j a v a 2 s  .co  m*/
        IndexWriterConfig cfg = new IndexWriterConfig(LV, new WhitespaceAnalyzer(LV));
        IndexDeletionPolicy policy;
        if (keepCommits) {
            policy = new KeepAllIndexDeletionPolicy();
        } else {
            policy = new KeepLastIndexDeletionPolicy();
        }
        cfg.setIndexDeletionPolicy(policy);
        cfg.setUseCompoundFile(IndexGate.preferCompoundFormat(dir));
        IndexWriter iw = new IndexWriter(dir, cfg);
        return iw;
    } catch (Exception e) {
        errorMsg("Error creating IndexWriter: " + e.toString());
        return null;
    }
}

From source file:org.getopt.luke.Luke.java

License:Apache License

/**
 * Optimize the index./*from w w  w  .  j  a  va2  s . co m*/
 */
public void optimize(final Object dialog) {
    Thread t = new Thread() {
        public void run() {
            IndexWriter iw = null;
            Object optimizeButton = find(dialog, "optimizeButton");
            setBoolean(optimizeButton, "enabled", false);
            Object closeButton = find(dialog, "closeButton");
            setBoolean(closeButton, "enabled", false);
            Object msg = find(dialog, "msg");
            Object stat = find(dialog, "stat");
            setString(stat, "text", "Running ...");
            PanelPrintWriter ppw = new PanelPrintWriter(Luke.this, msg);
            boolean useCompound = getBoolean(find(dialog, "optCompound"), "selected");
            boolean expunge = getBoolean(find(dialog, "optExpunge"), "selected");
            boolean keep = getBoolean(find(dialog, "optKeepAll"), "selected");
            boolean useLast = getBoolean(find(dialog, "optLastCommit"), "selected");
            Object tiiSpin = find(dialog, "tii");
            Object segnumSpin = find(dialog, "segnum");
            int tii = Integer.parseInt(getString(tiiSpin, "text"));
            int segnum = Integer.parseInt(getString(segnumSpin, "text"));
            try {
                if (is != null)
                    is = null;
                if (ir != null)
                    ir.close();
                if (ar != null)
                    ar.close();
                IndexDeletionPolicy policy;
                if (keep) {
                    policy = new KeepAllIndexDeletionPolicy();
                } else {
                    policy = new KeepLastIndexDeletionPolicy();
                }
                IndexWriterConfig cfg = new IndexWriterConfig(LV, new WhitespaceAnalyzer(LV));
                if (!useLast) {
                    IndexCommit ic = ((DirectoryReader) ir).getIndexCommit();
                    if (ic != null) {
                        cfg.setIndexCommit(ic);
                    }
                }
                cfg.setIndexDeletionPolicy(policy);
                cfg.setTermIndexInterval(tii);
                cfg.setUseCompoundFile(useCompound);
                cfg.setInfoStream(ppw);
                iw = new IndexWriter(dir, cfg);
                long startSize = Util.calcTotalFileSize(pName, dir);
                long startTime = System.currentTimeMillis();
                if (expunge) {
                    iw.forceMergeDeletes();
                } else {
                    if (segnum > 1) {
                        iw.forceMerge(segnum, true);
                    } else {
                        iw.forceMerge(1, true);
                    }
                }
                iw.commit();
                long endTime = System.currentTimeMillis();
                long endSize = Util.calcTotalFileSize(pName, dir);
                long deltaSize = startSize - endSize;
                String sign = deltaSize < 0 ? " Increased " : " Reduced ";
                String sizeMsg = sign + Util.normalizeSize(Math.abs(deltaSize))
                        + Util.normalizeUnit(Math.abs(deltaSize));
                String timeMsg = String.valueOf(endTime - startTime) + " ms";
                showStatus(sizeMsg + " in " + timeMsg);
                iw.close();
                setString(stat, "text", "Finished OK.");
            } catch (Exception e) {
                e.printStackTrace(ppw);
                setString(stat, "text", "ERROR - aborted.");
                errorMsg("ERROR optimizing: " + e.toString());
                if (iw != null)
                    try {
                        iw.close();
                    } catch (Exception e1) {
                    }
            } finally {
                setBoolean(closeButton, "enabled", true);
            }
            try {
                actionReopen();
                is = new IndexSearcher(ir);
                // add dialog again
                add(dialog);
            } catch (Exception e) {
                e.printStackTrace(ppw);
                errorMsg("ERROR reopening after optimize:\n" + e.getMessage());
            }
        }
    };
    t.start();
}