Example usage for org.apache.lucene.index IndexWriter deleteDocuments

List of usage examples for org.apache.lucene.index IndexWriter deleteDocuments

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter deleteDocuments.

Prototype

public long deleteDocuments(Query... queries) throws IOException 

Source Link

Document

Deletes the document(s) matching any of the provided queries.

Usage

From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java

License:Apache License

@Override
public void delete(DeleteByQuery delete) throws EngineException {
    try (InternalLock _ = readLock.acquire()) {
        IndexWriter writer = this.indexWriter;
        if (writer == null) {
            throw new EngineClosedException(shardId);
        }/*w  ww  .j  av  a  2  s .  c  om*/

        Query query;
        if (delete.nested() && delete.aliasFilter() != null) {
            query = new IncludeNestedDocsQuery(new XFilteredQuery(delete.query(), delete.aliasFilter()),
                    delete.parentFilter());
        } else if (delete.nested()) {
            query = new IncludeNestedDocsQuery(delete.query(), delete.parentFilter());
        } else if (delete.aliasFilter() != null) {
            query = new XFilteredQuery(delete.query(), delete.aliasFilter());
        } else {
            query = delete.query();
        }

        writer.deleteDocuments(query);
        translog.add(new Translog.DeleteByQuery(delete));
        dirty = true;
        possibleMergeNeeded = true;
        flushNeeded = true;
    } catch (Throwable t) {
        maybeFailEngine(t, "delete_by_query");
        throw new DeleteByQueryFailedEngineException(shardId, delete, t);
    }

    // TODO: This is heavy, since we refresh, but we must do this because we don't know which documents were in fact deleted (i.e., our
    // versionMap isn't updated), so we must force a cutover to a new reader to "see" the deletions:
    refresh(new Refresh("delete_by_query").force(true));
}

From source file:org.elasticsearch.index.engine.internal.InternalEngine.java

License:Apache License

private void innerDelete(Delete delete, IndexWriter writer) throws IOException {
    synchronized (dirtyLock(delete.uid())) {
        final long currentVersion;
        HashedBytesRef versionKey = versionKey(delete.uid());
        VersionValue versionValue = versionMap.get(versionKey);
        if (versionValue == null) {
            currentVersion = loadCurrentVersionFromIndex(delete.uid());
        } else {/* w w  w .j a  va 2 s  .c o  m*/
            if (enableGcDeletes && versionValue.delete()
                    && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
                currentVersion = Versions.NOT_FOUND; // deleted, and GC
            } else {
                currentVersion = versionValue.version();
            }
        }

        long updatedVersion;
        long expectedVersion = delete.version();
        if (delete.origin() == Operation.Origin.PRIMARY) {
            if (delete.versionType().isVersionConflict(currentVersion, expectedVersion)) {
                throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion,
                        expectedVersion);
            }

            updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);

        } else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
            // replicas treat the version as "external" as it comes from the primary ->
            // only exploding if the version they got is lower or equal to what they know.
            if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
                if (delete.origin() == Operation.Origin.RECOVERY) {
                    return;
                } else {
                    throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
                            currentVersion - 1, expectedVersion);
                }
            }
            updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
        }

        if (currentVersion == Versions.NOT_FOUND) {
            // doc does not exists and no prior deletes
            delete.version(updatedVersion).found(false);
            Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
            versionMap.put(versionKey, new VersionValue(updatedVersion, true,
                    threadPool.estimatedTimeInMillis(), translogLocation));
        } else if (versionValue != null && versionValue.delete()) {
            // a "delete on delete", in this case, we still increment the version, log it, and return that version
            delete.version(updatedVersion).found(false);
            Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
            versionMap.put(versionKey, new VersionValue(updatedVersion, true,
                    threadPool.estimatedTimeInMillis(), translogLocation));
        } else {
            delete.version(updatedVersion).found(true);
            writer.deleteDocuments(delete.uid());
            Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
            versionMap.put(versionKey, new VersionValue(updatedVersion, true,
                    threadPool.estimatedTimeInMillis(), translogLocation));
        }

        indexingService.postDeleteUnderLock(delete);
    }
}

From source file:org.elasticsearch.index.engine.internal.InternalEngine.java

License:Apache License

@Override
public void delete(DeleteByQuery delete) throws EngineException {
    rwl.readLock().lock();/*from  w  ww  .  j a  v a 2s  .c  o m*/
    try {
        IndexWriter writer = this.indexWriter;
        if (writer == null) {
            throw new EngineClosedException(shardId);
        }

        Query query;
        if (delete.nested() && delete.aliasFilter() != null) {
            query = new IncludeNestedDocsQuery(new XFilteredQuery(delete.query(), delete.aliasFilter()),
                    delete.parentFilter());
        } else if (delete.nested()) {
            query = new IncludeNestedDocsQuery(delete.query(), delete.parentFilter());
        } else if (delete.aliasFilter() != null) {
            query = new XFilteredQuery(delete.query(), delete.aliasFilter());
        } else {
            query = delete.query();
        }

        writer.deleteDocuments(query);
        translog.add(new Translog.DeleteByQuery(delete));
        dirty = true;
        possibleMergeNeeded = true;
        flushNeeded = true;
    } catch (IOException e) {
        throw new DeleteByQueryFailedEngineException(shardId, delete, e);
    } finally {
        rwl.readLock().unlock();
    }
    //TODO: This is heavy, since we refresh, but we really have to...
    refreshVersioningTable(System.currentTimeMillis());
}

From source file:org.elasticsearch.index.engine.robin.RobinEngine.java

License:Apache License

private void innerDelete(Delete delete, IndexWriter writer) throws IOException {
    synchronized (dirtyLock(delete.uid())) {
        if (delete.origin() == Operation.Origin.RECOVERY) {
            writer.deleteDocuments(delete.uid());
            Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
            // update the version with the exact version from recovery, assuming we have it
            if (delete.version() != 0) {
                versionMap.put(delete.uid().text(), new VersionValue(delete.version(), true,
                        threadPool.estimatedTimeInMillis(), translogLocation));
            }//from   w  w  w . ja va 2s.co  m
        } else {
            long currentVersion;
            VersionValue versionValue = versionMap.get(delete.uid().text());
            if (versionValue == null) {
                currentVersion = loadCurrentVersionFromIndex(delete.uid());
            } else {
                if (versionValue.delete()
                        && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
                    currentVersion = -1; // deleted, and GC
                } else {
                    currentVersion = versionValue.version();
                }
            }

            long updatedVersion;
            if (delete.origin() == Operation.Origin.PRIMARY) {
                if (delete.versionType() == VersionType.INTERNAL) { // internal version type
                    if (delete.version() != 0 && currentVersion != -2) { // -2 means we don't have a version, so ignore...
                        // an explicit version is provided, see if there is a conflict
                        // if the current version is -1, means we did not find anything, and
                        // a version is provided, so we do expect to find a doc under that version
                        if (currentVersion == -1) {
                            throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), -1,
                                    delete.version());
                        } else if (delete.version() != currentVersion) {
                            throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
                                    currentVersion, delete.version());
                        }
                    }
                    updatedVersion = currentVersion < 0 ? 1 : currentVersion + 1;
                } else { // External
                    if (currentVersion == -1) {
                        throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), -1,
                                delete.version());
                    } else if (currentVersion >= delete.version()) {
                        throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
                                currentVersion, delete.version());
                    }
                    updatedVersion = delete.version();
                }
            } else { // if (delete.origin() == Operation.Origin.REPLICA) {
                // on replica, the version is the future value expected (returned from the operation on the primary)
                if (currentVersion != -2) { // -2 means we don't have a version in the index, ignore
                    // only check if we have a version for it, otherwise, ignore (see later)
                    if (currentVersion != -1) {
                        // with replicas, we only check for previous version, we allow to set a future version
                        if (delete.version() <= currentVersion) {
                            throw new VersionConflictEngineException(shardId, delete.type(), delete.id(),
                                    currentVersion - 1, delete.version());
                        }
                    }
                }
                // replicas already hold the "future" version
                updatedVersion = delete.version();
            }

            if (currentVersion == -1) {
                // if the doc does not exists, just update with doc 0
                delete.version(0).notFound(true);
            } else if (versionValue != null && versionValue.delete()) {
                // if its a delete on delete and we have the current delete version, return it
                delete.version(versionValue.version()).notFound(true);
            } else {
                delete.version(updatedVersion);
                writer.deleteDocuments(delete.uid());
                Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
                versionMap.put(delete.uid().text(), new VersionValue(updatedVersion, true,
                        threadPool.estimatedTimeInMillis(), translogLocation));
            }
        }
    }
}

From source file:org.elasticsearch.index.engine.robin.RobinEngine.java

License:Apache License

@Override
public void delete(DeleteByQuery delete) throws EngineException {
    rwl.readLock().lock();/*www. j  a va  2 s.c  om*/
    try {
        IndexWriter writer = this.indexWriter;
        if (writer == null) {
            throw new EngineClosedException(shardId);
        }
        Query query;
        if (delete.aliasFilter() == null) {
            query = delete.query();
        } else {
            query = new FilteredQuery(delete.query(), delete.aliasFilter());
        }
        writer.deleteDocuments(query);
        translog.add(new Translog.DeleteByQuery(delete));
        dirty = true;
        possibleMergeNeeded = true;
        flushNeeded = true;
    } catch (IOException e) {
        throw new DeleteByQueryFailedEngineException(shardId, delete, e);
    } finally {
        rwl.readLock().unlock();
    }
    //TODO: This is heavy, since we refresh, but we really have to...
    refreshVersioningTable(System.currentTimeMillis());
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

public void testLoadQueries() throws Exception {
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));

    boolean legacyFormat = randomBoolean();
    Version version = legacyFormat ? Version.V_2_0_0 : Version.CURRENT;
    IndexShard indexShard = mockIndexShard(version, legacyFormat);

    storeQuery("0", indexWriter, termQuery("field1", "value1"), true, legacyFormat);
    storeQuery("1", indexWriter, wildcardQuery("field1", "v*"), true, legacyFormat);
    storeQuery("2", indexWriter,
            boolQuery().must(termQuery("field1", "value1")).must(termQuery("field2", "value2")), true,
            legacyFormat);//  w w  w .java 2s .  co m
    // dymmy docs should be skipped during loading:
    Document doc = new Document();
    doc.add(new StringField("dummy", "value", Field.Store.YES));
    indexWriter.addDocument(doc);
    storeQuery("4", indexWriter, termQuery("field2", "value2"), true, legacyFormat);
    // only documents that .percolator type should be loaded:
    storeQuery("5", indexWriter, termQuery("field2", "value2"), false, legacyFormat);
    storeQuery("6", indexWriter, termQuery("field3", "value3"), true, legacyFormat);
    indexWriter.forceMerge(1);

    // also include queries for percolator docs marked as deleted:
    indexWriter.deleteDocuments(new Term("id", "6"));
    indexWriter.close();

    ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
    IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
    assertThat(indexReader.leaves().size(), equalTo(1));
    assertThat(indexReader.numDeletedDocs(), equalTo(1));
    assertThat(indexReader.maxDoc(), equalTo(7));

    initialize("field1", "type=keyword", "field2", "type=keyword", "field3", "type=keyword");

    PercolatorQueryCache.QueriesLeaf leaf = cache.loadQueries(indexReader.leaves().get(0), indexShard);
    assertThat(leaf.queries.size(), equalTo(5));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("field1", "value1"))));
    assertThat(leaf.getQuery(1), equalTo(new WildcardQuery(new Term("field1", "v*"))));
    assertThat(leaf.getQuery(2),
            equalTo(new BooleanQuery.Builder()
                    .add(new TermQuery(new Term("field1", "value1")), BooleanClause.Occur.MUST)
                    .add(new TermQuery(new Term("field2", "value2")), BooleanClause.Occur.MUST).build()));
    assertThat(leaf.getQuery(4), equalTo(new TermQuery(new Term("field2", "value2"))));
    assertThat(leaf.getQuery(6), equalTo(new TermQuery(new Term("field3", "value3"))));

    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

public void testInvalidateEntries() throws Exception {
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));

    storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
    indexWriter.flush();/*from   w w w . j a  va 2 s  .  com*/
    storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
    indexWriter.flush();
    storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
    indexWriter.flush();

    ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
    IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(3));
    assertThat(indexReader.maxDoc(), equalTo(3));

    initialize("a", "type=keyword");

    IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
    ThreadPool threadPool = mockThreadPool();
    IndexWarmer.Listener listener = cache.createListener(threadPool);
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(3L));

    PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "1"))));

    leaf = cache.getQueries(indexReader.leaves().get(2));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    // change merge policy, so that merges will actually happen:
    indexWriter.getConfig().setMergePolicy(new TieredMergePolicy());
    indexWriter.deleteDocuments(new Term("id", "1"));
    indexWriter.forceMergeDeletes();
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(2));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.forceMerge(1);
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(1));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.store.StoreTest.java

License:Apache License

@Test
public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        Document doc = new Document();
        doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        docs.add(doc);/*  ww  w .j av  a  2s.co m*/
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
        Random random = new Random(seed);
        IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                .setCodec(actualDefaultCodec());
        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
        iwc.setUseCompoundFile(random.nextBoolean());
        iwc.setMaxThreadStates(1);
        final ShardId shardId = new ShardId(new Index("index"), 1);
        DirectoryService directoryService = new LuceneManagedDirectoryService(random);
        Store store = new Store(shardId, ImmutableSettings.EMPTY, directoryService,
                randomDistributor(random, directoryService), new DummyShardLock(shardId));
        IndexWriter writer = new IndexWriter(store.directory(), iwc);
        final boolean lotsOfSegments = rarely(random);
        for (Document d : docs) {
            writer.addDocument(d);
            if (lotsOfSegments && random.nextBoolean()) {
                writer.commit();
            } else if (rarely(random)) {
                writer.commit();
            }
        }
        writer.close();
        first = store.getMetadata();
        assertDeleteContent(store, directoryService);
        store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
        Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
        Random random = new Random(seed);
        IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                .setCodec(actualDefaultCodec());
        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
        iwc.setUseCompoundFile(random.nextBoolean());
        iwc.setMaxThreadStates(1);
        final ShardId shardId = new ShardId(new Index("index"), 1);
        DirectoryService directoryService = new LuceneManagedDirectoryService(random);
        store = new Store(shardId, ImmutableSettings.EMPTY, directoryService,
                randomDistributor(random, directoryService), new DummyShardLock(shardId));
        IndexWriter writer = new IndexWriter(store.directory(), iwc);
        final boolean lotsOfSegments = rarely(random);
        for (Document d : docs) {
            writer.addDocument(d);
            if (lotsOfSegments && random.nextBoolean()) {
                writer.commit();
            } else if (rarely(random)) {
                writer.commit();
            }
        }
        writer.close();
        second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
        assertThat(second.get(md.name()), notNullValue());
        // si files are different - containing timestamps etc
        assertThat(second.get(md.name()).isSame(md), equalTo(md.name().endsWith(".si") == false));
    }
    assertThat(diff.different.size(), equalTo(first.size() - 1));
    assertThat(diff.identical.size(), equalTo(1)); // commit point is identical
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
            .setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
        if (md.name().endsWith(".del")) {
            delFile = md;
            break;
        }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
        assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
        assertThat(afterDeleteDiff.different.size(), equalTo(0));
        assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
        // an entire segment must be missing (single doc segment got dropped)
        assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
        assertThat(afterDeleteDiff.different.size(), equalTo(0));
        assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
        assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
        assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
        assertThat(newCommitDiff.different.get(0).name(), endsWith(".del"));
        assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
        assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
        assertThat(newCommitDiff.different.size(), equalTo(0));
        assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped)  plus the commit is different
    }

    deleteContent(store.directory());
    IOUtils.close(store);
}

From source file:org.elasticsearch.index.store.StoreTests.java

License:Apache License

@Test
public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        Document doc = new Document();
        doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()),
                random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
        doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
        docs.add(doc);/*from   w  w  w  .j  av a  2s . c o  m*/
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
        Random random = new Random(seed);
        IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random))
                .setCodec(TestUtil.getDefaultCodec());
        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
        iwc.setUseCompoundFile(random.nextBoolean());
        final ShardId shardId = new ShardId(new Index("index"), 1);
        DirectoryService directoryService = new LuceneManagedDirectoryService(random);
        Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
        IndexWriter writer = new IndexWriter(store.directory(), iwc);
        final boolean lotsOfSegments = rarely(random);
        for (Document d : docs) {
            writer.addDocument(d);
            if (lotsOfSegments && random.nextBoolean()) {
                writer.commit();
            } else if (rarely(random)) {
                writer.commit();
            }
        }
        writer.commit();
        writer.close();
        first = store.getMetadata();
        assertDeleteContent(store, directoryService);
        store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
        Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
        Random random = new Random(seed);
        IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random))
                .setCodec(TestUtil.getDefaultCodec());
        iwc.setMergePolicy(NoMergePolicy.INSTANCE);
        iwc.setUseCompoundFile(random.nextBoolean());
        final ShardId shardId = new ShardId(new Index("index"), 1);
        DirectoryService directoryService = new LuceneManagedDirectoryService(random);
        store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
        IndexWriter writer = new IndexWriter(store.directory(), iwc);
        final boolean lotsOfSegments = rarely(random);
        for (Document d : docs) {
            writer.addDocument(d);
            if (lotsOfSegments && random.nextBoolean()) {
                writer.commit();
            } else if (rarely(random)) {
                writer.commit();
            }
        }
        writer.commit();
        writer.close();
        second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
        assertThat(second.get(md.name()), notNullValue());
        // si files are different - containing timestamps etc
        assertThat(second.get(md.name()).isSame(md), equalTo(false));
    }
    assertThat(diff.different.size(), equalTo(first.size()));
    assertThat(diff.identical.size(), equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random))
            .setCodec(TestUtil.getDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
        if (md.name().endsWith(".liv")) {
            delFile = md;
            break;
        }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
        assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
        assertThat(afterDeleteDiff.different.size(), equalTo(0));
        assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
        // an entire segment must be missing (single doc segment got dropped)
        assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
        assertThat(afterDeleteDiff.different.size(), equalTo(0));
        assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
        assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
        assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
        assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
        assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
        assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
        assertThat(newCommitDiff.different.size(), equalTo(0));
        assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped)  plus the commit is different
    }

    deleteContent(store.directory());
    IOUtils.close(store);
}

From source file:org.elasticsearch.test.unit.common.lucene.uid.UidFieldTests.java

License:Apache License

@Test
public void testUidField() throws Exception {
    IndexWriter writer = new IndexWriter(new RAMDirectory(),
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    DirectoryReader directoryReader = DirectoryReader.open(writer, true);
    AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    MatcherAssert.assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")),
            equalTo(-1l));/* w  w  w  .  j av a2s.c  om*/

    Document doc = new Document();
    doc.add(new Field("_uid", "1", UidFieldMapper.Defaults.FIELD_TYPE));
    writer.addDocument(doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-2l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(-2l));

    doc = new Document();
    doc.add(new UidField("_uid", "1", 1));
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(1l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(1l));

    doc = new Document();
    UidField uid = new UidField("_uid", "1", 2);
    doc.add(uid);
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(2l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(2l));

    // test reuse of uid field
    doc = new Document();
    uid.version(3);
    doc.add(uid);
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(3l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(3l));

    writer.deleteDocuments(new Term("_uid", "1"));
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-1l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")), nullValue());
}