Example usage for org.apache.lucene.index IndexWriterConfig DISABLE_AUTO_FLUSH

List of usage examples for org.apache.lucene.index IndexWriterConfig DISABLE_AUTO_FLUSH

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig DISABLE_AUTO_FLUSH.

Prototype

int DISABLE_AUTO_FLUSH

To view the source code for org.apache.lucene.index IndexWriterConfig DISABLE_AUTO_FLUSH.

Click Source Link

Document

Denotes a flush trigger is disabled.

Usage

From source file:com.baidu.rigel.biplatform.tesseract.isservice.index.service.IndexWriterFactory.java

License:Open Source License

/**
 * /*from  w w w .  j  av  a  2s .c  o m*/
 * getIndexWriter
 * 
 * @param idxPath
 *            
 * @return IndexWriter
 * @throws IOException
 *             IO
 */
public static synchronized IndexWriter getIndexWriter(String idxPath) throws IOException {
    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_BEGIN, "getIndexWriter",
            "[idxPath:" + idxPath + "]"));
    IndexWriter indexWriter = null;
    if (INSTANCE.idxWriterMaps.containsKey(idxPath)) {
        indexWriter = INSTANCE.idxWriterMaps.get(idxPath);
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "getIndexWriter",
                "return exist IndexWriter "));
    } else {
        File indexFile = new File(idxPath);
        Directory directory = FSDirectory.open(indexFile);
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_4_10_1,
                new StandardAnalyzer());
        indexWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        indexWriterConfig.setRAMBufferSizeMB(64.0);
        indexWriterConfig.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
        indexWriter = new IndexWriter(directory, indexWriterConfig);

        INSTANCE.idxWriterMaps.put(idxPath, indexWriter);
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "getIndexWriter",
                "create new IndexWriter "));
    }
    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_END, "getIndexWriter",
            "[idxPath:" + idxPath + "]"));
    return indexWriter;
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testChangeCodecAndMerge() throws IOException {
    Directory dir = newDirectory();//from   w  w  w.java2  s.c  om
    if (VERBOSE) {
        System.out.println("TEST: make new index");
    }
    IndexWriterConfig iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)
            .setCodec(new MockCodec());
    iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10);
    IndexWriter writer = newWriter(dir, iwconf);

    addDocs(writer, 10);
    writer.commit();
    assertQuery(new Term("content", "aaa"), dir, 10);
    if (VERBOSE) {
        System.out.println("TEST: addDocs3");
    }
    addDocs3(writer, 10);
    writer.commit();
    writer.close();

    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "aaa"), dir, 10);
    Codec codec = iwconf.getCodec();

    iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setCodec(codec);
    //((LogMergePolicy) iwconf.getMergePolicy()).setNoCFSRatio(0.0);
    //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10);
    iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);

    iwconf.setCodec(new MockCodec2()); // uses standard for field content
    writer = newWriter(dir, iwconf);
    // swap in new codec for currently written segments
    if (VERBOSE) {
        System.out.println("TEST: add docs w/ Standard codec for content field");
    }
    addDocs2(writer, 10);
    writer.commit();
    codec = iwconf.getCodec();
    assertEquals(30, writer.maxDoc());
    assertQuery(new Term("content", "bbb"), dir, 10);
    assertQuery(new Term("content", "ccc"), dir, 10); ////
    assertQuery(new Term("content", "aaa"), dir, 10);

    if (VERBOSE) {
        System.out.println("TEST: add more docs w/ new codec");
    }
    addDocs2(writer, 10);
    writer.commit();
    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "bbb"), dir, 20);
    assertQuery(new Term("content", "aaa"), dir, 10);
    assertEquals(40, writer.maxDoc());

    if (VERBOSE) {
        System.out.println("TEST: now optimize");
    }
    writer.forceMerge(1);
    assertEquals(40, writer.maxDoc());
    writer.close();
    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "bbb"), dir, 20);
    assertQuery(new Term("content", "aaa"), dir, 10);

    dir.close();
}

From source file:edu.udel.ece.infolab.btc.Indexing.java

License:Apache License

/**
 * Create a index writer that uses a #TupleAnalyzer on the triples fields with
 * a tokenization of the URI's localname, and the default #WhitespaceAnalyzer
 * on the others./*from  www . ja v  a  2 s. co m*/
 * @param dir
 * @return
 * @throws IOException
 */
@SuppressWarnings("deprecation")
private IndexWriter initializeIndexWriter(final Directory dir) throws IOException {
    final Analyzer defaultAnalyzer = new WhitespaceAnalyzer(Version.LUCENE_31);
    final Map<String, Analyzer> fieldAnalyzers = new HashMap<String, Analyzer>();
    final TupleAnalyzer tuple = new TupleAnalyzer(new StandardAnalyzer(Version.LUCENE_31));
    tuple.setURINormalisation(URINormalisation.LOCALNAME);
    fieldAnalyzers.put(OUTGOING_TRIPLE, tuple);
    fieldAnalyzers.put(INCOMING_TRIPLE, tuple);

    final IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_31,
            new PerFieldAnalyzerWrapper(defaultAnalyzer, fieldAnalyzers));

    // Disable compound file
    ((LogMergePolicy) config.getMergePolicy()).setUseCompoundFile(false);
    // Increase merge factor to 20 - more adapted to batch creation
    ((LogMergePolicy) config.getMergePolicy()).setMergeFactor(20);

    config.setRAMBufferSizeMB(256);
    config.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    config.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);

    final IndexWriter writer = new IndexWriter(dir, config);
    writer.setMaxFieldLength(Integer.MAX_VALUE);
    return writer;
}

From source file:org.apache.solr.core.TestMergePolicyConfig.java

License:Apache License

public void testLogMergePolicyConfig() throws Exception {

    final Class<? extends LogMergePolicy> mpClass = random().nextBoolean() ? LogByteSizeMergePolicy.class
            : LogDocMergePolicy.class;

    System.setProperty("solr.test.log.merge.policy", mpClass.getName());

    initCore("solrconfig-logmergepolicy.xml", "schema-minimal.xml");
    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());

    // verify some props set to -1 get lucene internal defaults
    assertEquals(-1, solrConfig.indexConfig.maxBufferedDocs);
    assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, iwc.getMaxBufferedDocs());
    assertEquals(-1, solrConfig.indexConfig.maxIndexingThreads);
    assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, iwc.getMaxThreadStates());
    assertEquals(-1, solrConfig.indexConfig.ramBufferSizeMB, 0.0D);
    assertEquals(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, iwc.getRAMBufferSizeMB(), 0.0D);

    LogMergePolicy logMP = assertAndCast(mpClass, iwc.getMergePolicy());

    // set by legacy <mergeFactor> setting
    assertEquals(11, logMP.getMergeFactor());
    // set by legacy <maxMergeDocs> setting
    assertEquals(456, logMP.getMaxMergeDocs());

}

From source file:org.lahab.clucene.server.indexer.Indexer.java

License:Apache License

/**
 * Opens an index writer on the current directory
 * @throws CorruptIndexException//from w ww . ja va  2 s  .c o m
 * @throws IOException
 * @throws ParametizerException 
 */
public void open() throws CorruptIndexException, IOException, ParametizerException {
    StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_36);
    IndexWriterConfig configWriter = new IndexWriterConfig(Version.LUCENE_36, analyzer);
    configWriter.setRAMBufferSizeMB(_params.getDouble("bufferSize"));
    configWriter.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    configWriter.setOpenMode(IndexWriterConfig.OpenMode.CREATE);

    try {
        _index = new IndexWriter(_directory, configWriter);
        _nbLastCommit = _index.maxDoc();
        _close = false;
    } catch (LockObtainFailedException e) {
        System.out.println("Lock is taken trying again");
        _directory.clearLock("write.lock");
    }
}

From source file:org.neo4j.kernel.api.impl.index.IndexWriterConfigs.java

License:Open Source License

public static IndexWriterConfig population() {
    IndexWriterConfig writerConfig = standard();
    writerConfig.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    writerConfig.setRAMBufferSizeMB(POPULATION_RAM_BUFFER_SIZE_MB);
    return writerConfig;
}