Java tutorial
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.engine; import com.carrotsearch.randomizedtesting.LifecycleScope; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider; import org.elasticsearch.index.merge.policy.MergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.distributor.LeastUsedDistributor; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ElasticsearchLuceneTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import static com.carrotsearch.randomizedtesting.RandomizedTest.*; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.test.ElasticsearchTestCase.terminate; import static org.hamcrest.Matchers.*; /** * Tests for ShadowEngine */ public class ShadowEngineTests extends ElasticsearchLuceneTestCase { protected final ShardId shardId = new ShardId(new Index("index"), 1); protected ThreadPool threadPool; private Store store; private Store storeReplica; protected Translog translog; protected Translog replicaTranslog; protected Engine primaryEngine; protected Engine replicaEngine; private Settings defaultSettings; private int indexConcurrency; private String codecName; private Path dirPath; @Before public void setUp() throws Exception { super.setUp(); CodecService codecService = new CodecService(shardId.index()); indexConcurrency = randomIntBetween(1, 20); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly // selected by lucene test case. codecName = name; } else { codecName = "default"; } defaultSettings = ImmutableSettings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, randomBoolean()) .put(EngineConfig.INDEX_CODEC_SETTING, codecName) .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency).build(); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); dirPath = newTempDir(LifecycleScope.TEST).toPath(); store = createStore(dirPath); storeReplica = createStore(dirPath); Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); translog = createTranslog(); primaryEngine = createInternalEngine(store, translog); LiveIndexWriterConfig currentIndexWriterConfig = ((InternalEngine) primaryEngine) .getCurrentIndexWriterConfig(); assertEquals(primaryEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); if (randomBoolean()) { primaryEngine.config().setEnableGcDeletes(false); } replicaTranslog = createTranslogReplica(); replicaEngine = createShadowEngine(storeReplica, replicaTranslog); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); if (randomBoolean()) { replicaEngine.config().setEnableGcDeletes(false); } } @After public void tearDown() throws Exception { super.tearDown(); replicaEngine.close(); storeReplica.close(); translog.close(); replicaTranslog.close(); primaryEngine.close(); store.close(); terminate(threadPool); } private ParseContext.Document testDocumentWithTextField() { ParseContext.Document document = testDocument(); document.add(new TextField("value", "test", Field.Store.YES)); return document; } private ParseContext.Document testDocument() { return new ParseContext.Document(); } private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, ParseContext.Document document, BytesReference source, boolean mappingsModified) { Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); document.add(uidField); document.add(versionField); return new ParsedDocument(uidField, versionField, id, type, routing, timestamp, ttl, Arrays.asList(document), Lucene.STANDARD_ANALYZER, source, mappingsModified); } protected Store createStore(Path p) throws IOException { return createStore(newMockFSDirectory(p.toFile())); } protected Store createStore(final Directory directory) throws IOException { final DirectoryService directoryService = new DirectoryService(shardId, EMPTY_SETTINGS) { @Override public void renameFile(Directory dir, String from, String to) throws IOException { dir.copy(dir, from, to, IOContext.DEFAULT); dir.deleteFile(from); } @Override public Directory[] build() throws IOException { return new Directory[] { directory }; } @Override public long throttleTimeInNanos() { return 0; } }; return new Store(shardId, EMPTY_SETTINGS, directoryService, new LeastUsedDistributor(directoryService), new DummyShardLock(shardId)); } protected Translog createTranslog() throws IOException { return new FsTranslog(shardId, EMPTY_SETTINGS, Paths.get("work/fs-translog/").toFile()); } protected Translog createTranslogReplica() throws IOException { return new FsTranslog(shardId, EMPTY_SETTINGS, Paths.get("work/fs-translog/").toFile()); } protected IndexDeletionPolicy createIndexDeletionPolicy() { return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS); } protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() { return new SnapshotDeletionPolicy(createIndexDeletionPolicy()); } protected MergePolicyProvider<?> createMergePolicy() { return new LogByteSizeMergePolicyProvider(store, new IndexSettingsService(new Index("test"), EMPTY_SETTINGS)); } protected MergeSchedulerProvider createMergeScheduler(IndexSettingsService indexSettingsService) { return new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, indexSettingsService); } protected ShadowEngine createShadowEngine(Store store, Translog translog) { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); return createShadowEngine(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)); } protected InternalEngine createInternalEngine(Store store, Translog translog) { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); return createInternalEngine(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)); } protected ShadowEngine createShadowEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { return new ShadowEngine(config(indexSettingsService, store, translog, mergeSchedulerProvider)); } protected InternalEngine createInternalEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { return new InternalEngine(config(indexSettingsService, store, translog, mergeSchedulerProvider)); } public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { IndexWriterConfig iwc = newIndexWriterConfig(Lucene.STANDARD_ANALYZER); EngineConfig config = new EngineConfig(shardId, false/*per default optimization for auto generated ids is disabled*/, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService, null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(shardId.index()), new Engine.FailedEngineListener() { @Override public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } }); return config; } protected Term newUid(String id) { return new Term("_uid", id); } protected static final BytesReference B_1 = new BytesArray(new byte[] { 1 }); protected static final BytesReference B_2 = new BytesArray(new byte[] { 2 }); protected static final BytesReference B_3 = new BytesArray(new byte[] { 3 }); public void testCommitStats() { // create a doc and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); CommitStats stats1 = replicaEngine.commitStats(); assertThat(stats1.getGeneration(), greaterThan(0l)); assertThat(stats1.getUserData(), hasKey(Translog.TRANSLOG_ID_KEY)); // flush the primary engine primaryEngine.flush(); // flush on replica to make flush visible replicaEngine.flush(); CommitStats stats2 = replicaEngine.commitStats(); assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration())); assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_ID_KEY)); assertThat(stats2.getUserData().get(Translog.TRANSLOG_ID_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_ID_KEY)))); } @Test public void testSegments() throws Exception { List<Segment> segments = primaryEngine.segments(); assertThat(segments.isEmpty(), equalTo(true)); assertThat(primaryEngine.segmentsStats().getCount(), equalTo(0l)); assertThat(primaryEngine.segmentsStats().getMemoryInBytes(), equalTo(0l)); final boolean defaultCompound = defaultSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, true); // create a doc and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, false); primaryEngine.create(new Engine.Create(null, newUid("2"), doc2)); primaryEngine.refresh("test"); segments = primaryEngine.segments(); assertThat(segments.size(), equalTo(1)); SegmentsStats stats = primaryEngine.segmentsStats(); assertThat(stats.getCount(), equalTo(1l)); assertThat(segments.get(0).isCommitted(), equalTo(false)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(2)); assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); // Check that the replica sees nothing segments = replicaEngine.segments(); assertThat(segments.size(), equalTo(0)); stats = replicaEngine.segmentsStats(); assertThat(stats.getCount(), equalTo(0l)); assertThat(segments.size(), equalTo(0)); // flush the primary engine primaryEngine.flush(); // refresh the replica replicaEngine.refresh("tests"); // Check that the primary AND replica sees segments now segments = primaryEngine.segments(); assertThat(segments.size(), equalTo(1)); assertThat(primaryEngine.segmentsStats().getCount(), equalTo(1l)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(2)); assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); segments = replicaEngine.segments(); assertThat(segments.size(), equalTo(1)); assertThat(replicaEngine.segmentsStats().getCount(), equalTo(1l)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(2)); assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); primaryEngine.config().setCompoundOnFlush(false); ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, false); primaryEngine.create(new Engine.Create(null, newUid("3"), doc3)); primaryEngine.refresh("test"); segments = primaryEngine.segments(); assertThat(segments.size(), equalTo(2)); assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2l)); assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(2)); assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); assertThat(segments.get(1).isCommitted(), equalTo(false)); assertThat(segments.get(1).isSearch(), equalTo(true)); assertThat(segments.get(1).getNumDocs(), equalTo(1)); assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).isCompound(), equalTo(false)); // Make visible to shadow replica primaryEngine.flush(); replicaEngine.refresh("test"); segments = replicaEngine.segments(); assertThat(segments.size(), equalTo(2)); assertThat(replicaEngine.segmentsStats().getCount(), equalTo(2l)); assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(2)); assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); assertThat(segments.get(1).isCommitted(), equalTo(true)); assertThat(segments.get(1).isSearch(), equalTo(true)); assertThat(segments.get(1).getNumDocs(), equalTo(1)); assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).isCompound(), equalTo(false)); primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); primaryEngine.refresh("test"); segments = primaryEngine.segments(); assertThat(segments.size(), equalTo(2)); assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2l)); assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(1)); assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); assertThat(segments.get(1).isCommitted(), equalTo(true)); assertThat(segments.get(1).isSearch(), equalTo(true)); assertThat(segments.get(1).getNumDocs(), equalTo(1)); assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).isCompound(), equalTo(false)); // Make visible to shadow replica primaryEngine.flush(); replicaEngine.refresh("test"); primaryEngine.config().setCompoundOnFlush(true); ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), B_3, false); primaryEngine.create(new Engine.Create(null, newUid("4"), doc4)); primaryEngine.refresh("test"); segments = primaryEngine.segments(); assertThat(segments.size(), equalTo(3)); assertThat(primaryEngine.segmentsStats().getCount(), equalTo(3l)); assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); assertThat(segments.get(0).getNumDocs(), equalTo(1)); assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); assertThat(segments.get(1).isCommitted(), equalTo(true)); assertThat(segments.get(1).isSearch(), equalTo(true)); assertThat(segments.get(1).getNumDocs(), equalTo(1)); assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).isCompound(), equalTo(false)); assertThat(segments.get(2).isCommitted(), equalTo(false)); assertThat(segments.get(2).isSearch(), equalTo(true)); assertThat(segments.get(2).getNumDocs(), equalTo(1)); assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); assertThat(segments.get(2).isCompound(), equalTo(true)); } @Test public void testShadowEngineIgnoresWriteOperations() throws Exception { // create a document ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); try { replicaEngine.create(new Engine.Create(null, newUid("1"), doc)); fail("should have thrown an exception"); } catch (UnsupportedOperationException e) { } replicaEngine.refresh("test"); // its not there... Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); getResult.release(); // index a document document = testDocument(); document.add(new TextField("value", "test1", Field.Store.YES)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); try { replicaEngine.index(new Engine.Index(null, newUid("1"), doc)); fail("should have thrown an exception"); } catch (UnsupportedOperationException e) { } replicaEngine.refresh("test"); // its still not there... searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); getResult.release(); // Now, add a document to the primary so we can test shadow engine deletes document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); primaryEngine.flush(); replicaEngine.refresh("test"); // Now the replica can see it searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); // And the replica can retrieve it getResult = replicaEngine.get(new Engine.Get(false, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); // try to delete it on the replica try { replicaEngine.delete(new Engine.Delete("test", "1", newUid("1"))); fail("should have thrown an exception"); } catch (UnsupportedOperationException e) { } replicaEngine.flush(); replicaEngine.refresh("test"); primaryEngine.refresh("test"); // it's still there! searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); getResult = replicaEngine.get(new Engine.Get(false, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); // it's still there on the primary also! searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); } @Test public void testSimpleOperations() throws Exception { Engine.Searcher searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); searchResult.close(); // create a document ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // not on the replica either... searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // but, we can still get it (in realtime) Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.source().source.toBytesArray(), equalTo(B_1.toBytesArray())); assertThat(getResult.docIdAndVersion(), nullValue()); getResult.release(); // can't get it from the replica, because it's not in the translog for a shadow replica getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); getResult.release(); // but, not there non realtime getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); getResult.release(); // refresh and it should be there primaryEngine.refresh("test"); // now its there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); // also in non realtime getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); // still not in the replica because no flush searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // now do an update document = testDocument(); document.add(new TextField("value", "test1", Field.Store.YES)); document.add(new Field(SourceFieldMapper.NAME, B_2.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, false); primaryEngine.index(new Engine.Index(null, newUid("1"), doc)); // its not updated yet... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // but, we can still get it (in realtime) getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.source().source.toBytesArray(), equalTo(B_2.toBytesArray())); assertThat(getResult.docIdAndVersion(), nullValue()); getResult.release(); // refresh and it should be updated primaryEngine.refresh("test"); searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // flush, now shadow replica should have the files primaryEngine.flush(); // still not in the replica because the replica hasn't refreshed searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); replicaEngine.refresh("test"); // the replica finally sees it because primary has flushed and replica refreshed searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // now delete primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); // its not deleted yet searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // but, get should not see it (in realtime) getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); getResult.release(); // refresh and it should be deleted primaryEngine.refresh("test"); searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // add it back document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // refresh and it should be there primaryEngine.refresh("test"); // now its there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // now flush primaryEngine.flush(); // and, verify get (in real time) getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.source(), nullValue()); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); // the replica should see it if we refresh too! replicaEngine.refresh("test"); searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.source(), nullValue()); assertThat(getResult.docIdAndVersion(), notNullValue()); getResult.release(); // make sure we can still work with the engine // now do an update document = testDocument(); document.add(new TextField("value", "test1", Field.Store.YES)); doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); primaryEngine.index(new Engine.Index(null, newUid("1"), doc)); // its not updated yet... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // refresh and it should be updated primaryEngine.refresh("test"); searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // Make visible to shadow replica primaryEngine.flush(); replicaEngine.refresh("test"); searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); } @Test public void testSearchResultRelease() throws Exception { Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); searchResult.close(); // create a document ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); // its not there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // flush & refresh and it should everywhere primaryEngine.flush(); primaryEngine.refresh("test"); replicaEngine.refresh("test"); // now its there... searchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); // don't release the replica search result yet... // delete, refresh and do a new search, it should not be there primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); primaryEngine.flush(); primaryEngine.refresh("test"); replicaEngine.refresh("test"); Engine.Searcher updateSearchResult = primaryEngine.acquireSearcher("test"); MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); updateSearchResult.close(); // the non released replica search result should not see the deleted yet... MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); } @Test public void testFailEngineOnCorruption() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); primaryEngine.flush(); MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); leaf.setRandomIOExceptionRate(1.0); leaf.setRandomIOExceptionRateOnOpen(1.0); try { replicaEngine.refresh("foo"); fail("exception expected"); } catch (Exception ex) { } try { Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher .engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); fail("exception expected"); } catch (EngineClosedException ex) { // all is well } } /** * Random test that throws random exception and ensures all references are * counted down / released and resources are closed. */ @Test public void testFailStart() throws IOException { // Need a commit point for this ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); primaryEngine.create(new Engine.Create(null, newUid("1"), doc)); primaryEngine.flush(); // this test fails if any reader, searcher or directory is not closed - MDW FTW final int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { MockDirectoryWrapper wrapper = newMockFSDirectory(dirPath.toFile()); wrapper.setFailOnOpenInput(randomBoolean()); wrapper.setAllowRandomFileNotFoundException(randomBoolean()); wrapper.setRandomIOExceptionRate(randomDouble()); wrapper.setRandomIOExceptionRateOnOpen(randomDouble()); try (Store store = createStore(wrapper)) { int refCount = store.refCount(); assertTrue("refCount: " + store.refCount(), store.refCount() > 0); Translog translog = createTranslog(); ShadowEngine holder; try { holder = createShadowEngine(store, translog); } catch (EngineCreationFailureException ex) { assertEquals(store.refCount(), refCount); continue; } holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount + 1); final int numStarts = scaledRandomIntBetween(1, 5); for (int j = 0; j < numStarts; j++) { try { assertEquals(store.refCount(), refCount + 1); holder.close(); holder = createShadowEngine(store, translog); holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount + 1); } catch (EngineCreationFailureException ex) { // all is fine assertEquals(store.refCount(), refCount); break; } } translog.close(); holder.close(); assertEquals(store.refCount(), refCount); } } } @Test public void testSettings() { CodecService codecService = new CodecService(shardId.index()); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency); } @Test public void testShadowEngineCreationRetry() throws Exception { final Path srDir = newTempDir(LifecycleScope.TEST).toPath(); final Store srStore = createStore(srDir); Lucene.cleanLuceneIndex(srStore.directory()); final Translog srTranslog = createTranslogReplica(); final AtomicBoolean succeeded = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); // Create a shadow Engine, which will freak out because there is no // index yet Thread t = new Thread(new Runnable() { @Override public void run() { try { latch.await(); } catch (InterruptedException e) { // ignore interruptions } try (ShadowEngine srEngine = createShadowEngine(srStore, srTranslog)) { succeeded.set(true); } catch (Exception e) { fail("should have been able to create the engine!"); } } }); t.start(); // count down latch // now shadow engine should try to be created latch.countDown(); // Create an InternalEngine, which creates the index so the shadow // replica will handle it correctly Store pStore = createStore(srDir); Translog pTranslog = createTranslog(); InternalEngine pEngine = createInternalEngine(pStore, pTranslog); // create a document ParseContext.Document document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, false); pEngine.create(new Engine.Create(null, newUid("1"), doc)); pEngine.flush(true, true); t.join(); assertTrue("ShadowEngine should have been able to be created", succeeded.get()); // (shadow engine is already shut down in the try-with-resources) IOUtils.close(srTranslog, srStore, pTranslog, pEngine, pStore); } }