Example usage for org.apache.lucene.index IndexWriter flush

List of usage examples for org.apache.lucene.index IndexWriter flush

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter flush.

Prototype

public final void flush() throws IOException 

Source Link

Document

Moves all in-memory segments to the Directory , but does not commit (fsync) them (call #commit for that).

Usage

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

public void testGetQueries() throws Exception {
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));

    storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
    storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
    storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
    indexWriter.flush();
    storeQuery("3", indexWriter, termQuery("a", "3"), true, false);
    storeQuery("4", indexWriter, termQuery("a", "4"), true, false);
    storeQuery("5", indexWriter, termQuery("a", "5"), true, false);
    indexWriter.flush();/*from ww  w  .j ava 2  s. c om*/
    storeQuery("6", indexWriter, termQuery("a", "6"), true, false);
    storeQuery("7", indexWriter, termQuery("a", "7"), true, false);
    storeQuery("8", indexWriter, termQuery("a", "8"), true, false);
    indexWriter.flush();
    indexWriter.close();

    ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
    IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
    assertThat(indexReader.leaves().size(), equalTo(3));
    assertThat(indexReader.maxDoc(), equalTo(9));

    initialize("a", "type=keyword");

    try {
        cache.getQueries(indexReader.leaves().get(0));
        fail("IllegalStateException expected");
    } catch (IllegalStateException e) {
        assertThat(e.getMessage(),
                equalTo("queries not loaded, queries should be have been preloaded during index warming..."));
    }

    IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
    ThreadPool threadPool = mockThreadPool();
    IndexWarmer.Listener listener = cache.createListener(threadPool);
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    PercolatorQueryCacheStats stats = cache.getStats(shardId);
    assertThat(stats.getNumQueries(), equalTo(9L));

    PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "1"))));
    assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "2"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "3"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "4"))));
    assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "5"))));

    leaf = cache.getQueries(indexReader.leaves().get(2));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "6"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "7"))));
    assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "8"))));

    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

public void testInvalidateEntries() throws Exception {
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));

    storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
    indexWriter.flush();
    storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
    indexWriter.flush();//  w ww.jav a2 s.  co  m
    storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
    indexWriter.flush();

    ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
    IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(3));
    assertThat(indexReader.maxDoc(), equalTo(3));

    initialize("a", "type=keyword");

    IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
    ThreadPool threadPool = mockThreadPool();
    IndexWarmer.Listener listener = cache.createListener(threadPool);
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(3L));

    PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "1"))));

    leaf = cache.getQueries(indexReader.leaves().get(2));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    // change merge policy, so that merges will actually happen:
    indexWriter.getConfig().setMergePolicy(new TieredMergePolicy());
    indexWriter.deleteDocuments(new Term("id", "1"));
    indexWriter.forceMergeDeletes();
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(2));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.forceMerge(1);
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(1));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.hibernate.search.backend.lucene.work.impl.FlushIndexLuceneWork.java

License:LGPL

private void flushIndex(IndexWriter indexWriter) {
    try {/* w  w  w.j  a  va2 s.c  om*/
        indexWriter.flush();
    } catch (IOException e) {
        throw log.unableToFlushIndex(getEventContext(), e);
    }
}

From source file:org.sonatype.nexus.index.context.DefaultIndexingContext.java

License:Open Source License

private void storeDescriptor() throws IOException {
    Document hdr = new Document();

    hdr.add(new Field(FLD_DESCRIPTOR, FLD_DESCRIPTOR_CONTENTS, Field.Store.YES, Field.Index.UN_TOKENIZED));

    hdr.add(new Field(FLD_IDXINFO, VERSION + ArtifactInfo.FS + getRepositoryId(), Field.Store.YES,
            Field.Index.NO));/*from w  w w  .  j a va 2s .c  o m*/

    IndexWriter w = getIndexWriter();

    w.updateDocument(DESCRIPTOR_TERM, hdr);

    w.flush();
}

From source file:org.sonatype.nexus.index.context.DefaultIndexingContext.java

License:Open Source License

public void optimize() throws CorruptIndexException, IOException {
    IndexWriter w = getIndexWriter();

    try {//from  w  w  w . j a va  2  s  . c o  m
        w.optimize();

        w.flush();
    } finally {
        w.close();
    }
}

From source file:org.sonatype.nexus.index.context.IndexUtils.java

License:Open Source License

static void setGroups(IndexingContext context, Collection<String> groups, String groupField,
        String groupFieldValue, String groupListField) throws IOException, CorruptIndexException {
    IndexWriter w = context.getIndexWriter();

    w.updateDocument(new Term(groupField, groupFieldValue),
            createGroupsDocument(groups, groupField, groupFieldValue, groupListField));

    w.flush();
}

From source file:org.sonatype.nexus.index.DefaultIndexerEngine.java

License:Open Source License

public void update(IndexingContext context, ArtifactContext ac) throws IOException {
    Document d = ac.createDocument(context);

    if (d != null) {
        IndexWriter w = context.getIndexWriter();

        w.updateDocument(new Term(ArtifactInfo.UINFO, ac.getArtifactInfo().getUinfo()), d);

        updateGroups(context, ac);/*from   w w w  . j ava2  s .com*/

        w.flush();

        context.updateTimestamp();
    }
}

From source file:org.sonatype.nexus.index.DefaultIndexerEngine.java

License:Open Source License

public void remove(IndexingContext context, ArtifactContext ac) throws IOException {
    if (ac != null) {
        String uinfo = ac.getArtifactInfo().getUinfo();
        // add artifact deletion marker
        Document doc = new Document();
        doc.add(new Field(ArtifactInfo.DELETED, uinfo, Field.Store.YES, Field.Index.NO));
        doc.add(new Field(ArtifactInfo.LAST_MODIFIED, //
                Long.toString(System.currentTimeMillis()), Field.Store.YES, Field.Index.NO));
        IndexWriter w = context.getIndexWriter();
        w.addDocument(doc);//from  w w  w  .  ja va2s . c om
        w.deleteDocuments(new Term(ArtifactInfo.UINFO, uinfo));
        w.flush();
        context.updateTimestamp();
    }
}

From source file:org.sonatype.nexus.index.packer.DefaultIndexPacker.java

License:Open Source License

static void copyLegacyDocuments(IndexReader r, Directory targetdir, IndexingContext context)
        throws CorruptIndexException, LockObtainFailedException, IOException {
    IndexWriter w = null;
    try {//  ww  w.  java 2 s .c  om
        w = new IndexWriter(targetdir, false, new NexusLegacyAnalyzer(), true);

        for (int i = 0; i < r.maxDoc(); i++) {
            if (!r.isDeleted(i)) {
                w.addDocument(updateLegacyDocument(r.document(i), context));
            }
        }

        w.optimize();
        w.flush();
    } finally {
        IndexUtils.close(w);
    }
}

From source file:org.sonatype.nexus.index.updater.DefaultIndexUpdater.java

License:Open Source License

private static void copyUpdatedDocuments(final Directory sourcedir, final Directory targetdir,
        final IndexingContext context) throws CorruptIndexException, LockObtainFailedException, IOException {
    IndexWriter w = null;
    IndexReader r = null;//from www . jav a2 s.  c o m
    try {
        r = IndexReader.open(sourcedir);
        w = new IndexWriter(targetdir, false, new NexusAnalyzer(), true);

        for (int i = 0; i < r.maxDoc(); i++) {
            if (!r.isDeleted(i)) {
                w.addDocument(IndexUtils.updateDocument(r.document(i), context));
            }
        }

        w.optimize();
        w.flush();
    } finally {
        IndexUtils.close(w);
        IndexUtils.close(r);
    }
}