Example usage for org.apache.lucene.index LiveIndexWriterConfig setMaxBufferedDocs

List of usage examples for org.apache.lucene.index LiveIndexWriterConfig setMaxBufferedDocs

Introduction

In this page you can find the example usage for org.apache.lucene.index LiveIndexWriterConfig setMaxBufferedDocs.

Prototype

public synchronized LiveIndexWriterConfig setMaxBufferedDocs(int maxBufferedDocs) 

Source Link

Document

Determines the minimal number of documents required before the buffered in-memory documents are flushed as a new Segment.

Usage

From source file:stroom.index.server.IndexShardWriterImpl.java

License:Apache License

private synchronized boolean doOpen(final boolean create) {
    boolean success = false;

    try {//from  www .j  av a  2 s.c o  m
        // Never open deleted index shards.
        if (IndexShardStatus.DELETED.equals(indexShard.getStatus())) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Shard is deleted " + indexShard);
            }
            return false;
        }

        // Don't open old index shards for writing.
        final Version currentVersion = LuceneVersionUtil
                .getLuceneVersion(LuceneVersionUtil.getCurrentVersion());
        final Version shardVersion = LuceneVersionUtil.getLuceneVersion(indexShard.getIndexVersion());
        if (!shardVersion.equals(currentVersion)) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Shard version is different to current version " + indexShard);
            }
            return false;
        }

        final long startMs = System.currentTimeMillis();
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Opening " + indexShard);
        }

        if (create) {
            // Make sure the index directory does not exist. If one does
            // then throw an exception
            // as we don't want to overwrite an index.
            if (Files.isDirectory(dir)) {
                // This is a workaround for lingering .nfs files.
                Files.list(dir).forEach(file -> {
                    if (Files.isDirectory(file) || !file.getFileName().startsWith(".")) {
                        throw new IndexException("Attempting to create a new index in \""
                                + dir.toAbsolutePath().toString() + "\" but one already exists.");
                    }
                });
            } else {
                // Try and make all required directories.
                try {
                    Files.createDirectories(dir);
                } catch (final IOException e) {
                    throw new IndexException("Unable to create directories for new index in \""
                            + dir.toAbsolutePath().toString() + "\"");
                }
            }
        }

        // Create lucene directory object.
        directory = new NIOFSDirectory(dir, SimpleFSLockFactory.INSTANCE);

        analyzerWrapper.setVersion(shardVersion);
        final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzerWrapper);

        // In debug mode we do extra trace in LUCENE and we also count
        // certain logging info like merge and flush
        // counts, so you can get this later using the trace method.
        if (LOGGER.isDebugEnabled()) {
            loggerPrintStream = new LoggerPrintStream(LOGGER);
            for (final String term : LOG_WATCH_TERMS.values()) {
                loggerPrintStream.addWatchTerm(term);
            }
            indexWriterConfig.setInfoStream(loggerPrintStream);
        }

        // IndexWriter to use for adding data to the index.
        indexWriter = new IndexWriter(directory, indexWriterConfig);

        final LiveIndexWriterConfig liveIndexWriterConfig = indexWriter.getConfig();
        liveIndexWriterConfig.setRAMBufferSizeMB(ramBufferSizeMB);

        // TODO : We might still want to write separate segments I'm not
        // sure on pros/cons?
        liveIndexWriterConfig.setUseCompoundFile(false);
        liveIndexWriterConfig.setMaxBufferedDocs(Integer.MAX_VALUE);

        // Check the number of committed docs in this shard.
        documentCount.set(indexWriter.numDocs());
        lastDocumentCount = documentCount.get();
        if (create) {
            if (lastDocumentCount != 0) {
                LOGGER.error("Index should be new but already contains docs: " + lastDocumentCount);
            }
        } else if (indexShard.getDocumentCount() != lastDocumentCount) {
            LOGGER.error("Mismatch document count.  Index says " + lastDocumentCount + " DB says "
                    + indexShard.getDocumentCount());
        }

        // We have opened the index so update the DB object.
        setStatus(IndexShardStatus.OPEN);

        // Output some debug.
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("getIndexWriter() - Opened " + indexShard + " in "
                    + (System.currentTimeMillis() - startMs) + "ms");
        }

        success = true;
    } catch (final LockObtainFailedException t) {
        LOGGER.warn(t.getMessage());
    } catch (final Throwable t) {
        LOGGER.error(t.getMessage(), t);
    }

    return success;
}