Example usage for org.apache.lucene.index CheckIndex setInfoStream

List of usage examples for org.apache.lucene.index CheckIndex setInfoStream

Introduction

In this page you can find the example usage for org.apache.lucene.index CheckIndex setInfoStream.

Prototype

public void setInfoStream(PrintStream out) 

Source Link

Document

Set infoStream where messages should go.

Usage

From source file:com.zimbra.cs.index.LuceneIndex.java

License:Open Source License

/**
 * Run a sanity check for the index. Callers are responsible to make sure the index is not opened by any writer.
 *
 * @param out info stream where messages should go. If null, no messages are printed.
 * @return true if no problems were found, otherwise false
 * @throws IOException failed to verify, but it doesn't necessarily mean the index is corrupted.
 *//*from   w  w  w .ja va2s  .c  o m*/
@Override
public boolean verify(PrintStream out) throws IOException {
    if (!IndexReader.indexExists(luceneDirectory)) {
        out.println("index does not exist or no segments file found: " + luceneDirectory.getDirectory());
        return true;
    }
    CheckIndex check = new CheckIndex(luceneDirectory);
    if (out != null) {
        check.setInfoStream(out);
    }
    CheckIndex.Status status = check.checkIndex();
    return status.clean;
}

From source file:com.zimbra.cs.index.LuceneViewer.java

License:Open Source License

private static void doCheck(CommandLine cl) throws Exception {
    Console console = new Console(cl.hasOption(CLI.O_VERBOSE));

    String indexDir = cl.getOptionValue(CLI.O_INPUT);
    console.info("Checking index " + indexDir);

    Directory dir = null;/*from   w w  w .j  a v  a  2s  .  co m*/
    try {
        dir = LuceneDirectory.open(new File(indexDir));
    } catch (Throwable t) {
        console.info("ERROR: could not open directory \"" + indexDir + "\"; exiting");
        t.printStackTrace(System.out);
        System.exit(1);
    }

    CheckIndex checker = new CheckIndex(dir);
    checker.setInfoStream(System.out);

    Status result = checker.checkIndex();
    console.info("Result:" + (result.clean ? "clean" : "not clean"));
}

From source file:io.anserini.integration.EndToEndTest.java

License:Apache License

protected void checkIndex() throws IOException {
    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
    Directory dir = FSDirectory.open(Paths.get(this.indexOutputPrefix + this.collectionClass));
    CheckIndex checker = new CheckIndex(dir);
    checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8));
    if (VERBOSE)//from   w w w  .  j ava  2  s  . c  om
        checker.setInfoStream(System.out);
    CheckIndex.Status indexStatus = checker.checkIndex();
    if (!indexStatus.clean) {
        System.out.println("CheckIndex failed");
        System.out.println(bos.toString(IOUtils.UTF_8));
        fail();
    }

    final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0);
    assertTrue(seg.openReaderPassed);

    assertNotNull(seg.diagnostics);

    assertNotNull(seg.fieldNormStatus);
    assertNull(seg.fieldNormStatus.error);
    assertEquals(this.fieldNormStatusTotalFields, seg.fieldNormStatus.totFields);

    assertNotNull(seg.termIndexStatus);
    assertNull(seg.termIndexStatus.error);
    assertEquals(this.termIndexStatusTermCount, seg.termIndexStatus.termCount);
    assertEquals(this.termIndexStatusTotFreq, seg.termIndexStatus.totFreq);
    assertEquals(this.termIndexStatusTotPos, seg.termIndexStatus.totPos);

    assertNotNull(seg.storedFieldStatus);
    assertNull(seg.storedFieldStatus.error);
    assertEquals(this.storedFieldStatusTotalDocCounts, seg.storedFieldStatus.docCount);
    assertEquals(this.storedFieldStatusTotFields, seg.storedFieldStatus.totFields);

    assertTrue(seg.diagnostics.size() > 0);
    final List<String> onlySegments = new ArrayList<>();
    onlySegments.add("_0");

    assertTrue(checker.checkIndex(onlySegments).clean);
    checker.close();
}

From source file:org.elasticsearch.index.shard.service.InternalIndexShard.java

License:Apache License

private void checkIndex(boolean throwException) throws IndexShardException {
    try {/*from w ww .j  a v  a2s  .  co m*/
        checkIndexTook = 0;
        long time = System.currentTimeMillis();
        if (!Lucene.indexExists(store.directory())) {
            return;
        }
        CheckIndex checkIndex = new CheckIndex(store.directory());
        BytesStreamOutput os = new BytesStreamOutput();
        PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
        checkIndex.setInfoStream(out);
        out.flush();
        CheckIndex.Status status = checkIndex.checkIndex();
        if (!status.clean) {
            if (state == IndexShardState.CLOSED) {
                // ignore if closed....
                return;
            }
            logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
            if ("fix".equalsIgnoreCase(checkIndexOnStartup)) {
                if (logger.isDebugEnabled()) {
                    logger.debug("fixing index, writing new segments file ...");
                }
                checkIndex.fixIndex(status);
                if (logger.isDebugEnabled()) {
                    logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName);
                }
            } else {
                // only throw a failure if we are not going to fix the index
                if (throwException) {
                    throw new IndexShardException(shardId, "index check failure");
                }
            }
        } else {
            if (logger.isDebugEnabled()) {
                logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
            }
        }
        checkIndexTook = System.currentTimeMillis() - time;
    } catch (Exception e) {
        logger.warn("failed to check index", e);
    }
}

From source file:org.elasticsearch.index.store.CorruptedFileTest.java

License:Apache License

/**
 * Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
 *//* www.  java2 s .c o m*/
@Test
public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    // have enough space for 3 copies
    internalCluster().ensureAtLeastNumDataNodes(3);
    if (cluster().numDataNodes() == 3) {
        logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
    }

    final boolean failOnCorruption = randomBoolean();
    assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder()
            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
            .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
            .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
            .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, failOnCorruption)
            .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files
            .put("indices.recovery.concurrent_streams", 10)));
    if (failOnCorruption == false) { // test the dynamic setting
        client().admin().indices().prepareUpdateSettings("test")
                .setSettings(
                        ImmutableSettings.builder().put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true))
                .get();
    }
    ensureGreen();
    disableAllocation("test");
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true)
            .execute().actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    CountResponse countResponse = client().prepareCount().get();
    assertHitCount(countResponse, numDocs);

    final int numShards = numShards("test");
    ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
    logger.info("--> {} corrupted", corruptedShardRouting);
    enableAllocation("test");
    /*
    * we corrupted the primary shard - now lets make sure we never recover from it successfully
    */
    Settings build = ImmutableSettings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    ClusterHealthResponse health = client().admin().cluster()
            .health(Requests.clusterHealthRequest("test").waitForGreenStatus().timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough.
                    .waitForRelocatingShards(0))
            .actionGet();
    if (health.isTimedOut()) {
        logger.info("cluster state:\n{}\n{}",
                client().admin().cluster().prepareState().get().getState().prettyPrint(),
                client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
        assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
    }
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
        SearchResponse response = client().prepareSearch().setSize(numDocs).get();
        assertHitCount(response, numDocs);
    }

    /*
     * now hook into the IndicesService and register a close listener to
     * run the checkindex. if the corruption is still there we will catch it.
     */
    final CountDownLatch latch = new CountDownLatch(numShards * 3); // primary + 2 replicas
    final CopyOnWriteArrayList<Throwable> exception = new CopyOnWriteArrayList<>();
    final IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() {
        @Override
        public void beforeIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
                @IndexSettings Settings indexSettings) {
            if (indexShard != null) {
                Store store = ((IndexShard) indexShard).store();
                store.incRef();
                try {
                    if (!Lucene.indexExists(store.directory())
                            && indexShard.state() == IndexShardState.STARTED) {
                        return;
                    }
                    CheckIndex checkIndex = new CheckIndex(store.directory());
                    BytesStreamOutput os = new BytesStreamOutput();
                    PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
                    checkIndex.setInfoStream(out);
                    out.flush();
                    CheckIndex.Status status = checkIndex.checkIndex();
                    if (!status.clean) {
                        logger.warn("check index [failure]\n{}",
                                new String(os.bytes().toBytes(), Charsets.UTF_8));
                        throw new IndexShardException(sid, "index check failure");
                    }
                } catch (Throwable t) {
                    exception.add(t);
                } finally {
                    store.decRef();
                    latch.countDown();
                }
            }
        }
    };

    for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
        service.indicesLifecycle().addListener(listener);
    }
    try {
        client().admin().indices().prepareDelete("test").get();
        latch.await();
        assertThat(exception, empty());
    } finally {
        for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
            service.indicesLifecycle().removeListener(listener);
        }
    }
}

From source file:org.elasticsearch.test.stress.compress.LuceneCompressionStressTest.java

License:Apache License

public static void main(String[] args) throws Exception {
    final boolean USE_COMPOUND = false;
    final Compressor compressor = CompressorFactory.defaultCompressor();

    File testFile = new File("target/bench/compress/lucene");
    FileSystemUtils.deleteRecursively(testFile);
    testFile.mkdirs();//from w ww . j a  v  a  2s  . c o  m

    Directory dir = new CompressedDirectory(new NIOFSDirectory(new File(testFile, "compressed")), compressor,
            false, "fdt", "tvf");
    TieredMergePolicy mergePolicy = new TieredMergePolicy();
    mergePolicy.setUseCompoundFile(USE_COMPOUND);
    IndexWriter writer = new IndexWriter(dir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER).setMergePolicy(mergePolicy));

    System.out.println("feeding data...");
    TestData testData = new TestData();
    long count = 0;
    long round = 0;
    while (true) {
        // json
        XContentBuilder builder = XContentFactory.jsonBuilder();
        testData.current(builder);
        builder.close();
        Document doc = new Document();
        doc.add(new Field("_source", builder.bytes().array(), builder.bytes().arrayOffset(),
                builder.bytes().length()));
        if (true) {
            Field field = new Field("text", builder.string(), Field.Store.NO, Field.Index.ANALYZED,
                    Field.TermVector.WITH_POSITIONS_OFFSETS);
            doc.add(field);
        }
        writer.addDocument(doc);

        if ((++count % 10000) == 0) {
            writer.commit();
            ++round;
            System.out.println(DateTime.now() + "[" + round + "] closing");
            writer.close(true);
            System.out.println(DateTime.now() + "[" + round + "] closed");
            CheckIndex checkIndex = new CheckIndex(dir);
            FastByteArrayOutputStream os = new FastByteArrayOutputStream();
            PrintStream out = new PrintStream(os);
            checkIndex.setInfoStream(out);
            out.flush();
            CheckIndex.Status status = checkIndex.checkIndex();
            if (!status.clean) {
                System.out.println("check index [failure]\n" + new String(os.bytes().toBytes()));
            } else {
                System.out.println(DateTime.now() + "[" + round + "] checked");
            }
            mergePolicy = new TieredMergePolicy();
            mergePolicy.setUseCompoundFile(USE_COMPOUND);
            writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)
                    .setMergePolicy(mergePolicy));
        }
    }
}

From source file:org.elasticsearch.test.stress.compress.LuceneCompressionStressTests.java

License:Apache License

@Test
public void test() throws Exception {
    final boolean USE_COMPOUND = false;
    final Compressor compressor = CompressorFactory.defaultCompressor();

    File testFile = new File("target/bench/compress/lucene");
    FileSystemUtils.deleteRecursively(testFile);
    testFile.mkdirs();// w  w w.  j  av a  2s.co  m

    Directory dir = new CompressedDirectory(new NIOFSDirectory(new File(testFile, "compressed")), compressor,
            false, "fdt", "tvf");
    TieredMergePolicy mergePolicy = new TieredMergePolicy();
    mergePolicy.setUseCompoundFile(USE_COMPOUND);
    IndexWriter writer = new IndexWriter(dir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER).setMergePolicy(mergePolicy));

    logger.info("feeding data...");
    TestData testData = new TestData();
    long count = 0;
    long round = 0;
    while (round < 100) {
        // json
        XContentBuilder builder = XContentFactory.jsonBuilder();
        testData.current(builder);
        builder.close();
        Document doc = new Document();
        doc.add(new Field("_source", builder.bytes().array(), builder.bytes().arrayOffset(),
                builder.bytes().length()));
        if (true) {
            Field field = new Field("text", builder.string(), Field.Store.NO, Field.Index.ANALYZED,
                    Field.TermVector.WITH_POSITIONS_OFFSETS);
            doc.add(field);
        }
        writer.addDocument(doc);

        if ((++count % 10000) == 0) {
            writer.commit();
            ++round;
            logger.info("[" + round + "] closing");
            writer.close(true);
            logger.info("[" + round + "] closed");
            CheckIndex checkIndex = new CheckIndex(dir);
            FastByteArrayOutputStream os = new FastByteArrayOutputStream();
            PrintStream out = new PrintStream(os);
            checkIndex.setInfoStream(out);
            out.flush();
            CheckIndex.Status status = checkIndex.checkIndex();
            if (!status.clean) {
                logger.warn("check index [failure]\n" + new String(os.bytes().toBytes()));
            } else {
                logger.info("[" + round + "] checked");
            }
            mergePolicy = new TieredMergePolicy();
            mergePolicy.setUseCompoundFile(USE_COMPOUND);
            writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)
                    .setMergePolicy(mergePolicy));
        }
    }
}

From source file:org.eu.bitzone.Leia.java

License:Apache License

public void checkIndex(final Object dialog) {
    final Thread t = new Thread() {

        @Override/*ww  w  .  java 2 s . co m*/
        public void run() {
            final Object panel = find(dialog, "msg");
            final Object fixPanel = find(dialog, "fixPanel");
            final PanelPrintWriter ppw = new PanelPrintWriter(Leia.this, panel);
            final Object ckRes = find(dialog, "ckRes");
            CheckIndex.Status status = null;
            final CheckIndex ci = new CheckIndex(dir);
            ci.setInfoStream(ppw);
            putProperty(dialog, "checkIndex", ci);
            putProperty(dialog, "ppw", ppw);
            try {
                status = ci.checkIndex();
            } catch (final Exception e) {
                ppw.println("ERROR: caught exception, giving up.\n\n");
                e.printStackTrace();
                e.printStackTrace(ppw);
            }
            if (status != null) {
                Leia.this.putProperty(dialog, "checkStatus", status);
                String statMsg;
                if (status.clean) {
                    statMsg = "OK";
                } else if (status.toolOutOfDate) {
                    statMsg = "ERROR: Can't check - tool out-of-date";
                } else {
                    // show fixPanel
                    setBoolean(fixPanel, "visible", true);
                    repaint(dialog);
                    statMsg = "BAD: ";
                    if (status.cantOpenSegments) {
                        statMsg += "cantOpenSegments ";
                    }
                    if (status.missingSegments) {
                        statMsg += "missingSegments ";
                    }
                    if (status.missingSegmentVersion) {
                        statMsg += "missingSegVersion ";
                    }
                    if (status.numBadSegments > 0) {
                        statMsg += "numBadSegments=" + status.numBadSegments + " ";
                    }
                    if (status.totLoseDocCount > 0) {
                        statMsg += "lostDocCount=" + status.totLoseDocCount + " ";
                    }
                }
                setString(ckRes, "text", statMsg);
            }
        }
    };
    t.start();
}

From source file:org.getopt.luke.Luke.java

License:Apache License

public void checkIndex(final Object dialog) {
    Thread t = new Thread() {
        public void run() {
            Object panel = find(dialog, "msg");
            Object fixPanel = find(dialog, "fixPanel");
            PanelPrintWriter ppw = new PanelPrintWriter(Luke.this, panel);
            Object ckRes = find(dialog, "ckRes");
            CheckIndex.Status status = null;
            CheckIndex ci = new CheckIndex(dir);
            ci.setInfoStream(ppw);
            putProperty(dialog, "checkIndex", ci);
            putProperty(dialog, "ppw", ppw);
            try {
                status = ci.checkIndex();
            } catch (Exception e) {
                ppw.println("ERROR: caught exception, giving up.\n\n");
                e.printStackTrace();/*from   w  w w. j a va2 s .  c om*/
                e.printStackTrace(ppw);
            }
            if (status != null) {
                Luke.this.putProperty(dialog, "checkStatus", status);
                String statMsg;
                if (status.clean) {
                    statMsg = "OK";
                } else if (status.toolOutOfDate) {
                    statMsg = "ERROR: Can't check - tool out-of-date";
                } else {
                    // show fixPanel
                    setBoolean(fixPanel, "visible", true);
                    repaint(dialog);
                    statMsg = "BAD: ";
                    if (status.cantOpenSegments) {
                        statMsg += "cantOpenSegments ";
                    }
                    if (status.missingSegments) {
                        statMsg += "missingSegments ";
                    }
                    if (status.missingSegmentVersion) {
                        statMsg += "missingSegVersion ";
                    }
                    if (status.numBadSegments > 0) {
                        statMsg += "numBadSegments=" + status.numBadSegments + " ";
                    }
                    if (status.totLoseDocCount > 0) {
                        statMsg += "lostDocCount=" + status.totLoseDocCount + " ";
                    }
                }
                setString(ckRes, "text", statMsg);
            }
        }
    };
    t.start();
}

From source file:org.hibernate.search.test.performance.util.CheckerLuceneIndex.java

License:LGPL

@SuppressWarnings("deprecation") // performance tests can be run against older hsearch versions, where isn't getIndexManagerHolder yet
public static void printIndexReport(TestContext ctx, PrintStream out) throws IOException {
    if (!CHECK_INDEX_STATE) {
        return;//ww w  .j a v a  2  s.  c o m
    }

    out.println("INDEX CHECK...");
    out.println("");

    Session s = ctx.sf.openSession();
    FullTextSession fts = Search.getFullTextSession(s);
    ExtendedSearchIntegrator integrator = fts.getSearchFactory().unwrap(ExtendedSearchIntegrator.class);
    Collection<IndexManager> indexManagers = integrator.getIndexManagerHolder().getIndexManagers();

    for (IndexManager indexManager : indexManagers) {
        DirectoryBasedIndexManager directoryBasedIndexManager = (DirectoryBasedIndexManager) indexManager;
        stopBackend(directoryBasedIndexManager);
        DirectoryProvider<?> directoryProvider = directoryBasedIndexManager.getDirectoryProvider();
        Directory directory = directoryProvider.getDirectory();

        out.println("directory : " + directory.toString());
        out.println("");

        CheckIndex checkIndex = new CheckIndex(directory);
        checkIndex.setInfoStream(out);
        Status status;
        try {
            status = checkIndex.checkIndex();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }

        assertTrue(status.clean);
    }

    out.println("==================================================================");
    out.flush();
}