Example usage for org.apache.lucene.index IndexWriter forceMerge

List of usage examples for org.apache.lucene.index IndexWriter forceMerge

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter forceMerge.

Prototype

public void forceMerge(int maxNumSegments) throws IOException 

Source Link

Document

Forces merge policy to merge segments until there are <= maxNumSegments .

Usage

From source file:org.apache.solr.uninverting.TestFieldCacheWithThreads.java

License:Apache License

public void test() throws Exception {
    Directory dir = newDirectory();//w  w  w .ja v a 2 s . c  om
    IndexWriter w = new IndexWriter(dir,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

    final List<Long> numbers = new ArrayList<>();
    final List<BytesRef> binary = new ArrayList<>();
    final List<BytesRef> sorted = new ArrayList<>();
    final int numDocs = atLeast(100);
    for (int i = 0; i < numDocs; i++) {
        Document d = new Document();
        long number = random().nextLong();
        d.add(new NumericDocValuesField("number", number));
        BytesRef bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
        d.add(new BinaryDocValuesField("bytes", bytes));
        binary.add(bytes);
        bytes = new BytesRef(TestUtil.randomRealisticUnicodeString(random()));
        d.add(new SortedDocValuesField("sorted", bytes));
        sorted.add(bytes);
        w.addDocument(d);
        numbers.add(number);
    }

    w.forceMerge(1);
    final IndexReader r = DirectoryReader.open(w);
    w.close();

    assertEquals(1, r.leaves().size());
    final LeafReader ar = r.leaves().get(0).reader();

    int numThreads = TestUtil.nextInt(random(), 2, 5);
    List<Thread> threads = new ArrayList<>();
    final CountDownLatch startingGun = new CountDownLatch(1);
    for (int t = 0; t < numThreads; t++) {
        final Random threadRandom = new Random(random().nextLong());
        Thread thread = new Thread() {
            @Override
            public void run() {
                try {
                    startingGun.await();
                    int iters = atLeast(1000);
                    for (int iter = 0; iter < iters; iter++) {
                        int docID = threadRandom.nextInt(numDocs);
                        switch (threadRandom.nextInt(4)) {
                        case 0: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.INT_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 1: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.LONG_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 2: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.FLOAT_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        case 3: {
                            NumericDocValues values = FieldCache.DEFAULT.getNumerics(ar, "number",
                                    FieldCache.DOUBLE_POINT_PARSER);
                            assertEquals(docID, values.advance(docID));
                            assertEquals(numbers.get(docID).longValue(), values.longValue());
                        }
                            break;
                        }
                        BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes");
                        assertEquals(docID, bdv.advance(docID));
                        assertEquals(binary.get(docID), bdv.binaryValue());
                        SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
                        assertEquals(docID, sdv.advance(docID));
                        assertEquals(sorted.get(docID), sdv.binaryValue());
                    }
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        };
        thread.start();
        threads.add(thread);
    }

    startingGun.countDown();

    for (Thread thread : threads) {
        thread.join();
    }

    r.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testSortedSetInteger() throws IOException {
    Directory dir = newDirectory();//w w w.  j a  v  a2s  .  co  m
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
    iw.addDocument(doc);

    doc = new Document();
    doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
    doc.add(new LegacyIntField("foo", -3, Field.Store.NO));
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir),
            Collections.singletonMap("foo", Type.SORTED_SET_INTEGER));
    LeafReader ar = ir.leaves().get(0).reader();
    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
    assertEquals(2, v.getValueCount());

    assertEquals(0, v.nextDoc());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    assertEquals(1, v.nextDoc());
    assertEquals(0, v.nextOrd());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    BytesRef value = v.lookupOrd(0);
    assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value));

    value = v.lookupOrd(1);
    assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value));
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testSortedSetFloat() throws IOException {
    Directory dir = newDirectory();//from  w w  w . jav a2  s .com
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
    iw.addDocument(doc);

    doc = new Document();
    doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
    doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO));
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir),
            Collections.singletonMap("foo", Type.SORTED_SET_FLOAT));
    LeafReader ar = ir.leaves().get(0).reader();

    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
    assertEquals(2, v.getValueCount());

    assertEquals(0, v.nextDoc());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    assertEquals(1, v.nextDoc());
    assertEquals(0, v.nextOrd());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    BytesRef value = v.lookupOrd(0);
    assertEquals(Float.floatToRawIntBits(-3f), LegacyNumericUtils.prefixCodedToInt(value));

    value = v.lookupOrd(1);
    assertEquals(Float.floatToRawIntBits(5f), LegacyNumericUtils.prefixCodedToInt(value));
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testSortedSetLong() throws IOException {
    Directory dir = newDirectory();//from ww  w  .ja va 2  s .c om
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
    iw.addDocument(doc);

    doc = new Document();
    doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
    doc.add(new LegacyLongField("foo", -3, Field.Store.NO));
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir),
            Collections.singletonMap("foo", Type.SORTED_SET_LONG));
    LeafReader ar = ir.leaves().get(0).reader();
    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
    assertEquals(2, v.getValueCount());

    assertEquals(0, v.nextDoc());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    assertEquals(1, v.nextDoc());
    assertEquals(0, v.nextOrd());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    BytesRef value = v.lookupOrd(0);
    assertEquals(-3, LegacyNumericUtils.prefixCodedToLong(value));

    value = v.lookupOrd(1);
    assertEquals(5, LegacyNumericUtils.prefixCodedToLong(value));
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testSortedSetDouble() throws IOException {
    Directory dir = newDirectory();//from  ww w  .  j  a  va 2s  . c  o m
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
    iw.addDocument(doc);

    doc = new Document();
    doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
    doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(-3d), Field.Store.NO));
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir),
            Collections.singletonMap("foo", Type.SORTED_SET_DOUBLE));
    LeafReader ar = ir.leaves().get(0).reader();
    SortedSetDocValues v = ar.getSortedSetDocValues("foo");
    assertEquals(2, v.getValueCount());

    assertEquals(0, v.nextDoc());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    assertEquals(1, v.nextDoc());
    assertEquals(0, v.nextOrd());
    assertEquals(1, v.nextOrd());
    assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());

    BytesRef value = v.lookupOrd(0);
    assertEquals(Double.doubleToRawLongBits(-3d), LegacyNumericUtils.prefixCodedToLong(value));

    value = v.lookupOrd(1);
    assertEquals(Double.doubleToRawLongBits(5d), LegacyNumericUtils.prefixCodedToLong(value));
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testFieldInfos() throws IOException {
    Directory dir = newDirectory();//from  w w  w .  ja va  2  s.c om
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    BytesRef idBytes = new BytesRef("id");
    doc.add(new StringField("id", idBytes, Store.YES));
    doc.add(new LegacyIntField("int", 5, Store.YES));
    doc.add(new NumericDocValuesField("dv", 5));
    doc.add(new IntPoint("dint", 5));
    doc.add(new StoredField("stored", 5)); // not indexed
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    Map<String, Type> uninvertingMap = new HashMap<>();
    uninvertingMap.put("int", Type.LEGACY_INTEGER);
    uninvertingMap.put("dv", Type.LEGACY_INTEGER);
    uninvertingMap.put("dint", Type.INTEGER_POINT);

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), uninvertingMap);
    LeafReader leafReader = ir.leaves().get(0).reader();

    FieldInfo intFInfo = leafReader.getFieldInfos().fieldInfo("int");
    assertEquals(DocValuesType.NUMERIC, intFInfo.getDocValuesType());
    assertEquals(0, intFInfo.getPointDimensionCount());
    assertEquals(0, intFInfo.getPointNumBytes());

    FieldInfo dintFInfo = leafReader.getFieldInfos().fieldInfo("dint");
    assertEquals(DocValuesType.NUMERIC, dintFInfo.getDocValuesType());
    assertEquals(1, dintFInfo.getPointDimensionCount());
    assertEquals(4, dintFInfo.getPointNumBytes());

    FieldInfo dvFInfo = leafReader.getFieldInfos().fieldInfo("dv");
    assertEquals(DocValuesType.NUMERIC, dvFInfo.getDocValuesType());

    FieldInfo storedFInfo = leafReader.getFieldInfos().fieldInfo("stored");
    assertEquals(DocValuesType.NONE, storedFInfo.getDocValuesType());

    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.update.DirectUpdateHandler2.java

License:Apache License

@Override
public void commit(CommitUpdateCommand cmd) throws IOException {
    if (cmd.prepareCommit) {
        prepareCommit(cmd);//from  www. j  a  v a2s. c o  m
        return;
    }

    if (cmd.optimize) {
        optimizeCommands.incrementAndGet();
    } else {
        commitCommands.incrementAndGet();
        if (cmd.expungeDeletes)
            expungeDeleteCommands.incrementAndGet();
    }

    Future[] waitSearcher = null;
    if (cmd.waitSearcher) {
        waitSearcher = new Future[1];
    }

    boolean error = true;
    try {
        // only allow one hard commit to proceed at once
        if (!cmd.softCommit) {
            solrCoreState.getCommitLock().lock();
        }

        log.info("start " + cmd);

        // We must cancel pending commits *before* we actually execute the commit.

        if (cmd.openSearcher) {
            // we can cancel any pending soft commits if this commit will open a new searcher
            softCommitTracker.cancelPendingCommit();
        }
        if (!cmd.softCommit && (cmd.openSearcher || !commitTracker.getOpenSearcher())) {
            // cancel a pending hard commit if this commit is of equal or greater "strength"...
            // If the autoCommit has openSearcher=true, then this commit must have openSearcher=true
            // to cancel.
            commitTracker.cancelPendingCommit();
        }

        RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
        try {
            IndexWriter writer = iw.get();
            if (cmd.optimize) {
                writer.forceMerge(cmd.maxOptimizeSegments);
            } else if (cmd.expungeDeletes) {
                writer.forceMergeDeletes();
            }

            if (!cmd.softCommit) {
                synchronized (solrCoreState.getUpdateLock()) { // sync is currently needed to prevent preCommit
                    // from being called between preSoft and
                    // postSoft... see postSoft comments.
                    if (ulog != null)
                        ulog.preCommit(cmd);
                }

                // SolrCore.verbose("writer.commit() start writer=",writer);

                if (writer.hasUncommittedChanges()) {
                    final Map<String, String> commitData = new HashMap<String, String>();
                    commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY,
                            String.valueOf(System.currentTimeMillis()));
                    writer.setCommitData(commitData);
                    writer.commit();
                } else {
                    log.info("No uncommitted changes. Skipping IW.commit.");
                }

                // SolrCore.verbose("writer.commit() end");
                numDocsPending.set(0);
                callPostCommitCallbacks();
            } else {
                callPostSoftCommitCallbacks();
            }
        } finally {
            iw.decref();
        }

        if (cmd.optimize) {
            callPostOptimizeCallbacks();
        }

        if (cmd.softCommit) {
            // ulog.preSoftCommit();
            synchronized (solrCoreState.getUpdateLock()) {
                if (ulog != null)
                    ulog.preSoftCommit(cmd);
                core.getSearcher(true, false, waitSearcher, true);
                if (ulog != null)
                    ulog.postSoftCommit(cmd);
            }
            // ulog.postSoftCommit();
        } else {
            synchronized (solrCoreState.getUpdateLock()) {
                if (ulog != null)
                    ulog.preSoftCommit(cmd);
                if (cmd.openSearcher) {
                    core.getSearcher(true, false, waitSearcher);
                } else {
                    // force open a new realtime searcher so realtime-get and versioning code can see the latest
                    RefCounted<SolrIndexSearcher> searchHolder = core.openNewSearcher(true, true);
                    searchHolder.decref();
                }
                if (ulog != null)
                    ulog.postSoftCommit(cmd);
            }
            if (ulog != null)
                ulog.postCommit(cmd); // postCommit currently means new searcher has
            // also been opened
        }

        // reset commit tracking

        if (cmd.softCommit) {
            softCommitTracker.didCommit();
        } else {
            commitTracker.didCommit();
        }

        log.info("end_commit_flush");

        error = false;
    } finally {
        if (!cmd.softCommit) {
            solrCoreState.getCommitLock().unlock();
        }

        addCommands.set(0);
        deleteByIdCommands.set(0);
        deleteByQueryCommands.set(0);
        if (error)
            numErrors.incrementAndGet();
    }

    // if we are supposed to wait for the searcher to be registered, then we should do it
    // outside any synchronized block so that other update operations can proceed.
    if (waitSearcher != null && waitSearcher[0] != null) {
        try {
            waitSearcher[0].get();
        } catch (InterruptedException e) {
            SolrException.log(log, e);
        } catch (ExecutionException e) {
            SolrException.log(log, e);
        }
    }
}

From source file:org.apache.vxquery.runtime.functions.index.IndexConstructorUtil.java

License:Apache License

public void evaluate(String collectioFolder, String indexFolder, IPointable result,
        ArrayBackedValueStorage abvs, ITreeNodeIdProvider nodeIdProvider, ArrayBackedValueStorage abvsFileNode,
        boolean isElementPath, String nodeId) throws IOException {

    metaFileUtil = new MetaFileUtil(indexFolder);
    isMetaFilePresent = metaFileUtil.isMetaFilePresent();
    metaFileUtil.setCollection(collectioFolder);

    File collectionDirectory = new File(collectioFolder);
    if (!collectionDirectory.exists()) {
        throw new IOException("The collection directory (" + collectioFolder + ") does not exist.");
    }/*from  www .j  ava  2  s .c om*/

    try {
        abvs.reset();
        sb.reset(abvs);

        Directory dir = FSDirectory.open(Paths.get(indexFolder));
        Analyzer analyzer = new CaseSensitiveAnalyzer();
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);

        // Create will overwrite the index everytime
        iwc.setOpenMode(OpenMode.CREATE);

        //Create an index writer
        IndexWriter writer = new IndexWriter(dir, iwc);

        //Add files to index
        indexXmlFiles(collectionDirectory, writer, isElementPath, abvsFileNode, nodeIdProvider, sb, nodeId);

        if (!isMetaFilePresent) {
            // Write metadata map to a file.
            metaFileUtil.updateMetadataMap(metadataMap, indexFolder);
            metaFileUtil.writeMetadataToFile();
        }

        //This makes write slower but search faster.
        writer.forceMerge(1);

        writer.close();

        sb.finish();
        result.set(abvs);
    } catch (IOException e) {
        throw new SystemException(ErrorCode.SYSE0001, e);
    }
}

From source file:org.elasticsearch.benchmark.compress.LuceneCompressionBenchmark.java

License:Apache License

public static void main(String[] args) throws Exception {
    final long MAX_SIZE = ByteSizeValue.parseBytesSizeValue("50mb").bytes();
    final boolean WITH_TV = true;

    File testFile = new File("target/test/compress/lucene");
    FileSystemUtils.deleteRecursively(testFile);
    testFile.mkdirs();//  ww w  . j  a v a  2 s  . co m

    FSDirectory uncompressedDir = new NIOFSDirectory(new File(testFile, "uncompressed"));
    IndexWriter uncompressedWriter = new IndexWriter(uncompressedDir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    Directory compressedLzfDir = new CompressedDirectory(
            new NIOFSDirectory(new File(testFile, "compressed_lzf")), new LZFCompressor(), false, "fdt", "tvf");
    IndexWriter compressedLzfWriter = new IndexWriter(compressedLzfDir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    Directory compressedSnappyDir = new CompressedDirectory(
            new NIOFSDirectory(new File(testFile, "compressed_snappy")), new XerialSnappyCompressor(), false,
            "fdt", "tvf");
    IndexWriter compressedSnappyWriter = new IndexWriter(compressedSnappyDir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    System.out.println("feeding data...");
    TestData testData = new TestData();
    while (testData.next() && testData.getTotalSize() < MAX_SIZE) {
        // json
        XContentBuilder builder = XContentFactory.jsonBuilder();
        testData.current(builder);
        builder.close();
        Document doc = new Document();
        doc.add(new Field("_source", builder.bytes().array(), builder.bytes().arrayOffset(),
                builder.bytes().length()));
        if (WITH_TV) {
            Field field = new Field("text", builder.string(), Field.Store.NO, Field.Index.ANALYZED,
                    Field.TermVector.WITH_POSITIONS_OFFSETS);
            doc.add(field);
        }
        uncompressedWriter.addDocument(doc);
        compressedLzfWriter.addDocument(doc);
        compressedSnappyWriter.addDocument(doc);
    }
    System.out.println("optimizing...");
    uncompressedWriter.forceMerge(1);
    compressedLzfWriter.forceMerge(1);
    compressedSnappyWriter.forceMerge(1);
    uncompressedWriter.waitForMerges();
    compressedLzfWriter.waitForMerges();
    compressedSnappyWriter.waitForMerges();

    System.out.println("done");
    uncompressedWriter.close();
    compressedLzfWriter.close();
    compressedSnappyWriter.close();

    compressedLzfDir.close();
    compressedSnappyDir.close();
    uncompressedDir.close();
}

From source file:org.elasticsearch.benchmark.compress.LuceneCompressionBenchmarkTests.java

License:Apache License

@Test
public void test() throws Exception {
    final long MAX_SIZE = ByteSizeValue.parseBytesSizeValue("50mb").bytes();
    final boolean WITH_TV = true;

    File testFile = new File("target/test/compress/lucene");
    FileSystemUtils.deleteRecursively(testFile);
    testFile.mkdirs();/*from ww w.  j a  v  a 2s . c  o  m*/

    FSDirectory uncompressedDir = new NIOFSDirectory(new File(testFile, "uncompressed"));
    IndexWriter uncompressedWriter = new IndexWriter(uncompressedDir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    Compressor lzf = CompressorFactory.compressor("lzf");
    Directory compressedLzfDir = new CompressedDirectory(
            new NIOFSDirectory(new File(testFile, "compressed_lzf")), lzf, false, "fdt", "tvf");
    IndexWriter compressedLzfWriter = new IndexWriter(compressedLzfDir,
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    //Directory compressedSnappyDir = new CompressedDirectory(new NIOFSDirectory(new File(testFile, "compressed_snappy")), new XerialSnappyCompressor(), false, "fdt", "tvf");
    //IndexWriter compressedSnappyWriter = new IndexWriter(compressedSnappyDir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    logger.info("feeding data...");
    TestData testData = new TestData();
    while (testData.next() && testData.getTotalSize() < MAX_SIZE) {
        // json
        XContentBuilder builder = XContentFactory.jsonBuilder();
        testData.current(builder);
        builder.close();
        Document doc = new Document();
        doc.add(new Field("_source", builder.bytes().array(), builder.bytes().arrayOffset(),
                builder.bytes().length()));
        if (WITH_TV) {
            Field field = new Field("text", builder.string(), Field.Store.NO, Field.Index.ANALYZED,
                    Field.TermVector.WITH_POSITIONS_OFFSETS);
            doc.add(field);
        }
        uncompressedWriter.addDocument(doc);
        compressedLzfWriter.addDocument(doc);
        //compressedSnappyWriter.addDocument(doc);
    }
    logger.info("optimizing...");
    uncompressedWriter.forceMerge(1);
    compressedLzfWriter.forceMerge(1);
    //compressedSnappyWriter.forceMerge(1);
    uncompressedWriter.waitForMerges();
    compressedLzfWriter.waitForMerges();
    //compressedSnappyWriter.waitForMerges();

    logger.info("done");
    uncompressedWriter.close();
    compressedLzfWriter.close();
    //compressedSnappyWriter.close();

    compressedLzfDir.close();
    //compressedSnappyDir.close();
    uncompressedDir.close();
}