Example usage for org.apache.lucene.index IndexWriterConfig setMergePolicy

List of usage examples for org.apache.lucene.index IndexWriterConfig setMergePolicy

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig setMergePolicy.

Prototype

@Override
    public IndexWriterConfig setMergePolicy(MergePolicy mergePolicy) 

Source Link

Usage

From source file:org.apache.solr.uninverting.TestFieldCache.java

License:Apache License

public void testLongFieldCache() throws IOException {
    Directory dir = newDirectory();//from ww w .  ja  v  a 2s . c  o  m
    IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
    cfg.setMergePolicy(newLogMergePolicy());
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
    Document doc = new Document();
    LongPoint field = new LongPoint("f", 0L);
    StoredField field2 = new StoredField("f", 0L);
    doc.add(field);
    doc.add(field2);
    final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
    Set<Integer> missing = new HashSet<>();
    for (int i = 0; i < values.length; ++i) {
        final long v;
        switch (random().nextInt(10)) {
        case 0:
            v = Long.MIN_VALUE;
            break;
        case 1:
            v = 0;
            break;
        case 2:
            v = Long.MAX_VALUE;
            break;
        default:
            v = TestUtil.nextLong(random(), -10, 10);
            break;
        }
        values[i] = v;
        if (v == 0 && random().nextBoolean()) {
            // missing
            iw.addDocument(new Document());
            missing.add(i);
        } else {
            field.setLongValue(v);
            field2.setLongValue(v);
            iw.addDocument(doc);
        }
    }
    iw.forceMerge(1);
    final DirectoryReader reader = iw.getReader();
    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f",
            FieldCache.LONG_POINT_PARSER);
    for (int i = 0; i < values.length; ++i) {
        if (missing.contains(i) == false) {
            assertEquals(i, longs.nextDoc());
            assertEquals(values[i], longs.longValue());
        }
    }
    assertEquals(NO_MORE_DOCS, longs.nextDoc());
    reader.close();
    iw.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCache.java

License:Apache License

public void testIntFieldCache() throws IOException {
    Directory dir = newDirectory();/*from   w w w  . j  a  v  a 2 s .co m*/
    IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
    cfg.setMergePolicy(newLogMergePolicy());
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
    Document doc = new Document();
    IntPoint field = new IntPoint("f", 0);
    doc.add(field);
    final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
    Set<Integer> missing = new HashSet<>();
    for (int i = 0; i < values.length; ++i) {
        final int v;
        switch (random().nextInt(10)) {
        case 0:
            v = Integer.MIN_VALUE;
            break;
        case 1:
            v = 0;
            break;
        case 2:
            v = Integer.MAX_VALUE;
            break;
        default:
            v = TestUtil.nextInt(random(), -10, 10);
            break;
        }
        values[i] = v;
        if (v == 0 && random().nextBoolean()) {
            // missing
            iw.addDocument(new Document());
            missing.add(i);
        } else {
            field.setIntValue(v);
            iw.addDocument(doc);
        }
    }
    iw.forceMerge(1);
    final DirectoryReader reader = iw.getReader();
    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f",
            FieldCache.INT_POINT_PARSER);
    for (int i = 0; i < values.length; ++i) {
        if (missing.contains(i) == false) {
            assertEquals(i, ints.nextDoc());
            assertEquals(values[i], ints.longValue());
        }
    }
    assertEquals(NO_MORE_DOCS, ints.nextDoc());
    reader.close();
    iw.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestLegacyFieldCache.java

License:Apache License

public void testLongFieldCache() throws IOException {
    Directory dir = newDirectory();/*w w w.j  a  v a  2  s  .c om*/
    IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
    cfg.setMergePolicy(newLogMergePolicy());
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
    Document doc = new Document();
    LegacyLongField field = new LegacyLongField("f", 0L, Store.YES);
    doc.add(field);
    final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
    Set<Integer> missing = new HashSet<>();
    for (int i = 0; i < values.length; ++i) {
        final long v;
        switch (random().nextInt(10)) {
        case 0:
            v = Long.MIN_VALUE;
            break;
        case 1:
            v = 0;
            break;
        case 2:
            v = Long.MAX_VALUE;
            break;
        default:
            v = TestUtil.nextLong(random(), -10, 10);
            break;
        }
        values[i] = v;
        if (v == 0 && random().nextBoolean()) {
            // missing
            iw.addDocument(new Document());
            missing.add(i);
        } else {
            field.setLongValue(v);
            iw.addDocument(doc);
        }
    }
    iw.forceMerge(1);
    final DirectoryReader reader = iw.getReader();
    final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f",
            FieldCache.LEGACY_LONG_PARSER);
    for (int i = 0; i < values.length; ++i) {
        if (missing.contains(i) == false) {
            assertEquals(i, longs.nextDoc());
            assertEquals(values[i], longs.longValue());
        }
    }
    assertEquals(NO_MORE_DOCS, longs.nextDoc());
    reader.close();
    iw.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestLegacyFieldCache.java

License:Apache License

public void testIntFieldCache() throws IOException {
    Directory dir = newDirectory();/*from   w  ww.j  ava2 s.  co m*/
    IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
    cfg.setMergePolicy(newLogMergePolicy());
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
    Document doc = new Document();
    LegacyIntField field = new LegacyIntField("f", 0, Store.YES);
    doc.add(field);
    final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
    Set<Integer> missing = new HashSet<>();
    for (int i = 0; i < values.length; ++i) {
        final int v;
        switch (random().nextInt(10)) {
        case 0:
            v = Integer.MIN_VALUE;
            break;
        case 1:
            v = 0;
            break;
        case 2:
            v = Integer.MAX_VALUE;
            break;
        default:
            v = TestUtil.nextInt(random(), -10, 10);
            break;
        }
        values[i] = v;
        if (v == 0 && random().nextBoolean()) {
            // missing
            iw.addDocument(new Document());
            missing.add(i);
        } else {
            field.setIntValue(v);
            iw.addDocument(doc);
        }
    }
    iw.forceMerge(1);
    final DirectoryReader reader = iw.getReader();
    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f",
            FieldCache.LEGACY_INT_PARSER);
    for (int i = 0; i < values.length; ++i) {
        if (missing.contains(i) == false) {
            assertEquals(i, ints.nextDoc());
            assertEquals(values[i], ints.longValue());
        }
    }
    assertEquals(NO_MORE_DOCS, ints.nextDoc());
    reader.close();
    iw.close();
    dir.close();
}

From source file:org.apache.solr.update.SolrIndexConfig.java

License:Apache License

public IndexWriterConfig toIndexWriterConfig(IndexSchema schema) {
    // so that we can update the analyzer on core reload, we pass null
    // for the default analyzer, and explicitly pass an analyzer on 
    // appropriate calls to IndexWriter

    IndexWriterConfig iwc = new IndexWriterConfig(luceneVersion, null);
    if (maxBufferedDocs != -1)
        iwc.setMaxBufferedDocs(maxBufferedDocs);

    if (ramBufferSizeMB != -1)
        iwc.setRAMBufferSizeMB(ramBufferSizeMB);

    if (termIndexInterval != -1)
        iwc.setTermIndexInterval(termIndexInterval);

    if (writeLockTimeout != -1)
        iwc.setWriteLockTimeout(writeLockTimeout);

    iwc.setSimilarity(schema.getSimilarity());
    iwc.setMergePolicy(buildMergePolicy(schema));
    iwc.setMergeScheduler(buildMergeScheduler(schema));
    iwc.setInfoStream(infoStream);//from  ww w  .  j av  a 2  s .  c  om

    // do this after buildMergePolicy since the backcompat logic 
    // there may modify the effective useCompoundFile
    iwc.setUseCompoundFile(getUseCompoundFile());

    if (maxIndexingThreads != -1) {
        iwc.setMaxThreadStates(maxIndexingThreads);
    }

    if (mergedSegmentWarmerInfo != null) {
        // TODO: add infostream -> normal logging system (there is an issue somewhere)
        IndexReaderWarmer warmer = schema.getResourceLoader().newInstance(mergedSegmentWarmerInfo.className,
                IndexReaderWarmer.class, null, new Class[] { InfoStream.class },
                new Object[] { iwc.getInfoStream() });
        iwc.setMergedSegmentWarmer(warmer);
    }

    return iwc;
}

From source file:org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();// ww  w .java 2s  . c  om
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());

    // this is kind of stupid, but for now its here
    assertNotSame(ir.leaves().get(0).reader().getCombinedCoreAndDeletesKey(),
            ir2.leaves().get(0).reader().getCombinedCoreAndDeletesKey());

    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.ESDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();//  w  ww .ja  v a 2 s.com
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.FreqTermsEnumTests.java

License:Apache License

@Before
@Override//from  w  ww .  jav  a2  s .  c  o  m
public void setUp() throws Exception {
    super.setUp();
    referenceAll = Maps.newHashMap();
    referenceNotDeleted = Maps.newHashMap();
    referenceFilter = Maps.newHashMap();

    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new KeywordAnalyzer()); // use keyword analyzer we rely on the stored field holding the exact term.
    if (frequently()) {
        // we don't want to do any merges, so we won't expunge deletes
        conf.setMergePolicy(NoMergePolicy.INSTANCE);
    }

    iw = new IndexWriter(dir, conf);
    terms = new String[scaledRandomIntBetween(10, 300)];
    for (int i = 0; i < terms.length; i++) {
        terms[i] = randomAsciiOfLength(5);
    }

    int numberOfDocs = scaledRandomIntBetween(30, 300);
    Document[] docs = new Document[numberOfDocs];
    for (int i = 0; i < numberOfDocs; i++) {
        Document doc = new Document();
        doc.add(new StringField("id", Integer.toString(i), Field.Store.YES));
        docs[i] = doc;
        for (String term : terms) {
            if (randomBoolean()) {
                continue;
            }
            int freq = randomIntBetween(1, 3);
            for (int j = 0; j < freq; j++) {
                doc.add(new TextField("field", term, Field.Store.YES));
            }
        }
    }

    // add all docs

    for (int i = 0; i < docs.length; i++) {
        Document doc = docs[i];
        iw.addDocument(doc);
        if (rarely()) {
            iw.commit();
        }
    }

    Set<String> deletedIds = Sets.newHashSet();
    for (int i = 0; i < docs.length; i++) {
        Document doc = docs[i];
        if (randomInt(5) == 2) {
            Term idTerm = new Term("id", doc.getField("id").stringValue());
            deletedIds.add(idTerm.text());
            iw.deleteDocuments(idTerm);
        }
    }

    for (String term : terms) {
        referenceAll.put(term, new FreqHolder());
        referenceFilter.put(term, new FreqHolder());
        referenceNotDeleted.put(term, new FreqHolder());
    }

    // now go over each doc, build the relevant references and filter
    reader = DirectoryReader.open(iw, true);
    List<Term> filterTerms = new ArrayList<>();
    for (int docId = 0; docId < reader.maxDoc(); docId++) {
        Document doc = reader.document(docId);
        addFreqs(doc, referenceAll);
        if (!deletedIds.contains(doc.getField("id").stringValue())) {
            addFreqs(doc, referenceNotDeleted);
            if (randomBoolean()) {
                filterTerms.add(new Term("id", doc.getField("id").stringValue()));
                addFreqs(doc, referenceFilter);
            }
        }
    }
    filter = new TermsQuery(filterTerms);
}

From source file:org.elasticsearch.common.lucene.LuceneTests.java

License:Apache License

public void testWaitForIndex() throws Exception {
    final MockDirectoryWrapper dir = newMockDirectory();

    final AtomicBoolean succeeded = new AtomicBoolean(false);
    final CountDownLatch latch = new CountDownLatch(1);

    // Create a shadow Engine, which will freak out because there is no
    // index yet//w w w  . j a  v a2 s.c o  m
    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                latch.await();
                if (Lucene.waitForIndex(dir, 5000)) {
                    succeeded.set(true);
                } else {
                    fail("index should have eventually existed!");
                }
            } catch (InterruptedException e) {
                // ignore interruptions
            } catch (Exception e) {
                fail("should have been able to create the engine! " + e.getMessage());
            }
        }
    });
    t.start();

    // count down latch
    // now shadow engine should try to be created
    latch.countDown();

    dir.setEnableVirusScanner(false);
    IndexWriterConfig iwc = newIndexWriterConfig();
    iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setMaxBufferedDocs(2);
    IndexWriter writer = new IndexWriter(dir, iwc);
    Document doc = new Document();
    doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
    writer.addDocument(doc);
    writer.commit();

    t.join();

    writer.close();
    dir.close();
    assertTrue("index should have eventually existed", succeeded.get());
}

From source file:org.elasticsearch.common.lucene.LuceneTests.java

License:Apache License

public void testCleanIndex() throws IOException {
    MockDirectoryWrapper dir = newMockDirectory();
    dir.setEnableVirusScanner(false);//from  w  w w.jav  a2s. c  om
    IndexWriterConfig iwc = newIndexWriterConfig();
    iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setMaxBufferedDocs(2);
    IndexWriter writer = new IndexWriter(dir, iwc);
    Document doc = new Document();
    doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
    writer.addDocument(doc);
    writer.commit();

    doc = new Document();
    doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
    writer.addDocument(doc);

    doc = new Document();
    doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
    writer.addDocument(doc);

    writer.commit();
    doc = new Document();
    doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
    writer.addDocument(doc);

    writer.deleteDocuments(new Term("id", "2"));
    writer.commit();
    try (DirectoryReader open = DirectoryReader.open(writer, true)) {
        assertEquals(3, open.numDocs());
        assertEquals(1, open.numDeletedDocs());
        assertEquals(4, open.maxDoc());
    }
    writer.close();
    if (random().nextBoolean()) {
        for (String file : dir.listAll()) {
            if (file.startsWith("_1")) {
                // delete a random file
                dir.deleteFile(file);
                break;
            }
        }
    }
    Lucene.cleanLuceneIndex(dir);
    if (dir.listAll().length > 0) {
        for (String file : dir.listAll()) {
            if (file.startsWith("extra") == false) {
                assertEquals(file, "write.lock");
            }
        }
    }
    dir.close();
}