Example usage for org.apache.lucene.index NoMergePolicy INSTANCE

List of usage examples for org.apache.lucene.index NoMergePolicy INSTANCE

Introduction

In this page you can find the example usage for org.apache.lucene.index NoMergePolicy INSTANCE.

Prototype

MergePolicy INSTANCE

To view the source code for org.apache.lucene.index NoMergePolicy INSTANCE.

Click Source Link

Document

Singleton instance.

Usage

From source file:io.druid.extension.lucene.LuceneDruidSegment.java

License:Apache License

private static IndexWriter buildRamWriter(RAMDirectory dir, Analyzer analyzer, int maxDocsPerSegment)
        throws IOException {
    IndexWriterConfig writerConfig = new IndexWriterConfig(analyzer);
    writerConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
    // some arbitrary large numbers
    writerConfig.setMaxBufferedDocs(maxDocsPerSegment * 2);
    writerConfig.setRAMBufferSizeMB(5000);
    writerConfig.setUseCompoundFile(false);
    writerConfig.setCommitOnClose(true);
    writerConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    writerConfig.setMergePolicy(NoMergePolicy.INSTANCE);
    writerConfig.setMergeScheduler(NoMergeScheduler.INSTANCE);
    return new IndexWriter(dir, writerConfig);
}

From source file:io.druid.extension.lucene.LuceneDruidSegment.java

License:Apache License

private static IndexWriter buildPersistWriter(Directory dir) throws IOException {
    IndexWriterConfig writerConfig = new IndexWriterConfig(null);
    writerConfig.setUseCompoundFile(false);
    writerConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
    writerConfig.setMergePolicy(NoMergePolicy.INSTANCE);
    writerConfig.setMergeScheduler(NoMergeScheduler.INSTANCE);
    return new IndexWriter(dir, writerConfig);
}

From source file:org.apache.solr.core.snapshots.SolrSnapshotManager.java

License:Apache License

/**
 * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
 *
 * @param core The Solr core/*from www  .  j  a  va2 s. c o m*/
 * @param dir The index directory storing the snapshot.
 * @throws IOException in case of I/O errors.
 */
private static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, IndexDeletionPolicy delPolicy)
        throws IOException {
    IndexWriterConfig conf = core.getSolrConfig().indexConfig.toIndexWriterConfig(core);
    conf.setOpenMode(OpenMode.APPEND);
    conf.setMergePolicy(NoMergePolicy.INSTANCE);//Don't want to merge any commits here!
    conf.setIndexDeletionPolicy(delPolicy);
    conf.setCodec(core.getCodec());

    try (SolrIndexWriter iw = new SolrIndexWriter("SolrSnapshotCleaner", dir, conf)) {
        // Do nothing. The only purpose of opening index writer is to invoke the Lucene IndexDeletionPolicy#onInit
        // method so that we can cleanup the files associated with specified index commit.
        // Note the index writer creates a new commit during the close() operation (which is harmless).
    }
}

From source file:org.apache.solr.index.NoMergePolicyFactory.java

License:Apache License

@Override
protected MergePolicy getMergePolicyInstance() {
    return NoMergePolicy.INSTANCE;
}

From source file:org.apache.solr.index.WrapperMergePolicyFactoryTest.java

License:Apache License

public void testReturnsDefaultMergePolicyIfNoneSpecified() {
    final MergePolicyFactoryArgs args = new MergePolicyFactoryArgs();
    MergePolicyFactory mpf = new DefaultingWrapperMergePolicyFactory(resourceLoader, args, null);
    assertSame(mpf.getMergePolicy(), NoMergePolicy.INSTANCE);
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * This method removes all files from the given directory that are not referenced by the given segments file.
 * This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files
 * that are newer than the given segments file are removed forcefully to prevent problems with IndexWriter opening a potentially
 * broken commit point / leftover./*from   w w w  . j  a  va 2 s  . com*/
 * <b>Note:</b> this method will fail if there is another IndexWriter open on the given directory. This method will also acquire
 * a write lock from the directory while pruning unused files. This method expects an existing index in the given directory that has
 * the given segments file.
 */
public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory)
        throws IOException {
    final SegmentInfos si = readSegmentInfos(segmentsFileName, directory);
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        int foundSegmentFiles = 0;
        for (final String file : directory.listAll()) {
            /**
             * we could also use a deletion policy here but in the case of snapshot and restore
             * sometimes we restore an index and override files that were referenced by a "future"
             * commit. If such a commit is opened by the IW it would likely throw a corrupted index exception
             * since checksums don's match anymore. that's why we prune the name here directly.
             * We also want the caller to know if we were not able to remove a segments_N file.
             */
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                foundSegmentFiles++;
                if (file.equals(si.getSegmentsFileName()) == false) {
                    directory.deleteFile(file); // remove all segment_N files except of the one we wanna keep
                }
            }
        }
        assert SegmentInfos.getLastCommitSegmentsFileName(directory).equals(segmentsFileName);
        if (foundSegmentFiles == 0) {
            throw new IllegalStateException("no commit found in the directory");
        }
    }
    final CommitPoint cp = new CommitPoint(si, directory);
    try (IndexWriter writer = new IndexWriter(directory,
            new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setIndexCommit(cp).setCommitOnClose(false)
                    .setMergePolicy(NoMergePolicy.INSTANCE).setOpenMode(IndexWriterConfig.OpenMode.APPEND))) {
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
    return si;
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.//from w  w w .j a v a2 s.c o  m
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory,
            new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setMergePolicy(NoMergePolicy.INSTANCE) // no merges
                    .setCommitOnClose(false) // no commits
                    .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
    {
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}

From source file:org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();/*from   ww  w  . ja va 2 s  .c  om*/
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());

    // this is kind of stupid, but for now its here
    assertNotSame(ir.leaves().get(0).reader().getCombinedCoreAndDeletesKey(),
            ir2.leaves().get(0).reader().getCombinedCoreAndDeletesKey());

    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.ESDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();//from   w w w .  j  a va 2  s  . c om
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.FreqTermsEnumTests.java

License:Apache License

@Before
@Override/*from   w  ww.j a v a2 s.  c  o  m*/
public void setUp() throws Exception {
    super.setUp();
    referenceAll = Maps.newHashMap();
    referenceNotDeleted = Maps.newHashMap();
    referenceFilter = Maps.newHashMap();

    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new KeywordAnalyzer()); // use keyword analyzer we rely on the stored field holding the exact term.
    if (frequently()) {
        // we don't want to do any merges, so we won't expunge deletes
        conf.setMergePolicy(NoMergePolicy.INSTANCE);
    }

    iw = new IndexWriter(dir, conf);
    terms = new String[scaledRandomIntBetween(10, 300)];
    for (int i = 0; i < terms.length; i++) {
        terms[i] = randomAsciiOfLength(5);
    }

    int numberOfDocs = scaledRandomIntBetween(30, 300);
    Document[] docs = new Document[numberOfDocs];
    for (int i = 0; i < numberOfDocs; i++) {
        Document doc = new Document();
        doc.add(new StringField("id", Integer.toString(i), Field.Store.YES));
        docs[i] = doc;
        for (String term : terms) {
            if (randomBoolean()) {
                continue;
            }
            int freq = randomIntBetween(1, 3);
            for (int j = 0; j < freq; j++) {
                doc.add(new TextField("field", term, Field.Store.YES));
            }
        }
    }

    // add all docs

    for (int i = 0; i < docs.length; i++) {
        Document doc = docs[i];
        iw.addDocument(doc);
        if (rarely()) {
            iw.commit();
        }
    }

    Set<String> deletedIds = Sets.newHashSet();
    for (int i = 0; i < docs.length; i++) {
        Document doc = docs[i];
        if (randomInt(5) == 2) {
            Term idTerm = new Term("id", doc.getField("id").stringValue());
            deletedIds.add(idTerm.text());
            iw.deleteDocuments(idTerm);
        }
    }

    for (String term : terms) {
        referenceAll.put(term, new FreqHolder());
        referenceFilter.put(term, new FreqHolder());
        referenceNotDeleted.put(term, new FreqHolder());
    }

    // now go over each doc, build the relevant references and filter
    reader = DirectoryReader.open(iw, true);
    List<Term> filterTerms = new ArrayList<>();
    for (int docId = 0; docId < reader.maxDoc(); docId++) {
        Document doc = reader.document(docId);
        addFreqs(doc, referenceAll);
        if (!deletedIds.contains(doc.getField("id").stringValue())) {
            addFreqs(doc, referenceNotDeleted);
            if (randomBoolean()) {
                filterTerms.add(new Term("id", doc.getField("id").stringValue()));
                addFreqs(doc, referenceFilter);
            }
        }
    }
    filter = new TermsQuery(filterTerms);
}