Example usage for org.apache.lucene.index RandomIndexWriter deleteDocuments

List of usage examples for org.apache.lucene.index RandomIndexWriter deleteDocuments

Introduction

In this page you can find the example usage for org.apache.lucene.index RandomIndexWriter deleteDocuments.

Prototype

public long deleteDocuments(Query q) throws IOException 

Source Link

Usage

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testRandom() throws Exception {
    final Directory d = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), d);
    w.w.getConfig().setMaxBufferedDocs(17);
    final int numDocs = atLeast(100);
    final Set<String> aDocs = new HashSet<>();
    for (int i = 0; i < numDocs; i++) {
        final Document doc = new Document();
        final String v;
        if (random().nextInt(5) == 4) {
            v = "a";
            aDocs.add("" + i);
        } else {/*from  ww w . j a va 2s  .  c o  m*/
            v = "b";
        }
        final Field f = newStringField("field", v, Field.Store.NO);
        doc.add(f);
        doc.add(newStringField("id", "" + i, Field.Store.YES));
        w.addDocument(doc);
    }

    final int numDelDocs = atLeast(10);
    for (int i = 0; i < numDelDocs; i++) {
        final String delID = "" + random().nextInt(numDocs);
        w.deleteDocuments(new Term("id", delID));
        aDocs.remove(delID);
    }

    final IndexReader r = w.getReader();
    w.close();
    final TopDocs hits = newSearcher(r).search(new QueryWrapperFilter(new TermQuery(new Term("field", "a"))),
            numDocs);
    assertEquals(aDocs.size(), hits.totalHits);
    for (ScoreDoc sd : hits.scoreDocs) {
        assertTrue(aDocs.contains(r.document(sd.doc).get("id")));
    }
    r.close();
    d.close();
}

From source file:org.apache.solr.search.TestStressLucene.java

License:Apache License

@Test
public void testStressLuceneNRT() throws Exception {
    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = 1 + random().nextInt(5);
    final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
    int nWriteThreads = 5 + random().nextInt(25);

    final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers

    final AtomicLong operations = new AtomicLong(100000); // number of query operations to perform in total
    int nReadThreads = 5 + random().nextInt(25);
    final boolean tombstones = random().nextBoolean();
    final boolean syncCommits = random().nextBoolean();

    verbose("commitPercent=", commitPercent);
    verbose("softCommitPercent=", softCommitPercent);
    verbose("deletePercent=", deletePercent);
    verbose("deleteByQueryPercent=", deleteByQueryPercent);
    verbose("ndocs=", ndocs);
    verbose("nWriteThreads=", nWriteThreads);
    verbose("nReadThreads=", nReadThreads);
    verbose("maxConcurrentCommits=", maxConcurrentCommits);
    verbose("operations=", operations);
    verbose("tombstones=", tombstones);
    verbose("syncCommits=", syncCommits);

    initModel(ndocs);//from w w w. ja  v a 2 s.  c  o m

    final AtomicInteger numCommitting = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();

    final FieldType idFt = new FieldType();
    idFt.setIndexed(true);
    idFt.setStored(true);
    idFt.setOmitNorms(true);
    idFt.setTokenized(false);
    idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

    final FieldType ft2 = new FieldType();
    ft2.setIndexed(false);
    ft2.setStored(true);

    // model how solr does locking - only allow one thread to do a hard commit at once, and only one thread to do a soft commit, but
    // a hard commit in progress does not stop a soft commit.
    final Lock hardCommitLock = syncCommits ? new ReentrantLock() : null;
    final Lock reopenLock = syncCommits ? new ReentrantLock() : null;

    // RAMDirectory dir = new RAMDirectory();
    // final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));

    Directory dir = newDirectory();

    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    writer.setDoRandomForceMergeAssert(false);

    // writer.commit();
    // reader = IndexReader.open(dir);
    // make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
    // to only opening at the last commit point.
    reader = DirectoryReader.open(writer.w, true);

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.get() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
                                Map<Integer, DocInfo> newCommittedModel;
                                long version;
                                DirectoryReader oldReader;

                                boolean softCommit = rand.nextInt(100) < softCommitPercent;

                                if (!softCommit) {
                                    // only allow one hard commit to proceed at once
                                    if (hardCommitLock != null)
                                        hardCommitLock.lock();
                                    verbose("hardCommit start");

                                    writer.commit();
                                }

                                if (reopenLock != null)
                                    reopenLock.lock();

                                synchronized (globalLock) {
                                    newCommittedModel = new HashMap<Integer, DocInfo>(model); // take a snapshot
                                    version = snapshotCount++;
                                    oldReader = reader;
                                    oldReader.incRef(); // increment the reference since we will use this for reopening
                                }

                                if (!softCommit) {
                                    // must commit after taking a snapshot of the model
                                    // writer.commit();
                                }

                                verbose("reopen start using", oldReader);

                                DirectoryReader newReader;
                                if (softCommit) {
                                    newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
                                } else {
                                    // will only open to last commit
                                    newReader = DirectoryReader.openIfChanged(oldReader);
                                }

                                if (newReader == null) {
                                    oldReader.incRef();
                                    newReader = oldReader;
                                }
                                oldReader.decRef();

                                verbose("reopen result", newReader);

                                synchronized (globalLock) {
                                    assert newReader.getRefCount() > 0;
                                    assert reader.getRefCount() > 0;

                                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
                                    if (newReader.getVersion() > reader.getVersion()) {
                                        reader.decRef();
                                        reader = newReader;

                                        // install this snapshot only if it's newer than the current one
                                        if (version >= committedModelClock) {
                                            committedModel = newCommittedModel;
                                            committedModelClock = version;
                                        }

                                    } else {
                                        // close if unused
                                        newReader.decRef();
                                    }

                                }

                                if (reopenLock != null)
                                    reopenLock.unlock();

                                if (!softCommit) {
                                    if (hardCommitLock != null)
                                        hardCommitLock.unlock();
                                }

                            }
                            numCommitting.decrementAndGet();
                            continue;
                        }

                        int id = rand.nextInt(ndocs);
                        Object sync = syncArr[id];

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        // We can't concurrently update the same document and retain our invariants of increasing values
                        // since we can't guarantee what order the updates will be executed.
                        synchronized (sync) {
                            DocInfo info = model.get(id);
                            long val = info.val;
                            long nextVal = Math.abs(val) + 1;

                            if (oper < commitPercent + deletePercent) {
                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleting id", id, "val=", nextVal);
                                writer.deleteDocuments(new Term("id", Integer.toString(id)));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleting id", id, "val=", nextVal, "DONE");

                            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                                //assertU("<delete><query>id:" + id + "</query></delete>");

                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleteByQuery", id, "val=", nextVal);
                                writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleteByQuery", id, "val=", nextVal, "DONE");
                            } else {
                                // model.put(id, nextVal);   // uncomment this and this test should fail.

                                // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                                Document d = new Document();
                                d.add(new Field("id", Integer.toString(id), idFt));
                                d.add(new Field(field, Long.toString(nextVal), ft2));
                                verbose("adding id", id, "val=", nextVal);
                                writer.updateDocument(new Term("id", Integer.toString(id)), d);
                                if (tombstones) {
                                    // remove tombstone after new addition (this should be optional?)
                                    verbose("deleting tombstone for id", id);
                                    writer.deleteDocuments(new Term("id", "-" + Integer.toString(id)));
                                    verbose("deleting tombstone for id", id, "DONE");
                                }

                                model.put(id, new DocInfo(0, nextVal));
                                verbose("adding id", id, "val=", nextVal, "DONE");
                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        };

        threads.add(thread);
    }

    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        DocInfo info;
                        synchronized (globalLock) {
                            info = committedModel.get(id);
                        }
                        long val = info.val;

                        IndexReader r;
                        synchronized (globalLock) {
                            r = reader;
                            r.incRef();
                        }

                        int docid = getFirstMatch(r, new Term("id", Integer.toString(id)));

                        if (docid < 0 && tombstones) {
                            // if we couldn't find the doc, look for it's tombstone
                            docid = getFirstMatch(r, new Term("id", "-" + Integer.toString(id)));
                            if (docid < 0) {
                                if (val == -1L) {
                                    // expected... no doc was added yet
                                    r.decRef();
                                    continue;
                                }
                                verbose("ERROR: Couldn't find a doc  or tombstone for id", id, "using reader",
                                        r, "expected value", val);
                                fail("No documents or tombstones found for id " + id + ", expected at least "
                                        + val);
                            }
                        }

                        if (docid < 0 && !tombstones) {
                            // nothing to do - we can't tell anything from a deleted doc without tombstones
                        } else {
                            if (docid < 0) {
                                verbose("ERROR: Couldn't find a doc for id", id, "using reader", r);
                            }
                            assertTrue(docid >= 0); // we should have found the document, or it's tombstone
                            Document doc = r.document(docid);
                            long foundVal = Long.parseLong(doc.get(field));
                            if (foundVal < Math.abs(val)) {
                                verbose("ERROR: id", id, "model_val=", val, " foundVal=", foundVal, "reader=",
                                        reader);
                            }
                            assertTrue(foundVal >= Math.abs(val));
                        }

                        r.decRef();
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }

    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    writer.close();
    reader.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java

License:Apache License

private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
    Directory dir = newDirectory();/*from   w w  w  . j  a  v  a 2  s  . c o m*/
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    Field indexedField = new StringField("indexed", "", Field.Store.NO);
    Field dvField = new SortedDocValuesField("dv", new BytesRef());
    doc.add(idField);
    doc.add(indexedField);
    doc.add(dvField);

    // index some docs
    int numDocs = atLeast(300);
    for (int i = 0; i < numDocs; i++) {
        idField.setStringValue(Integer.toString(i));
        final int length;
        if (minLength == maxLength) {
            length = minLength; // fixed length
        } else {
            length = TestUtil.nextInt(random(), minLength, maxLength);
        }
        String value = TestUtil.randomSimpleString(random(), length);
        indexedField.setStringValue(value);
        dvField.setBytesValue(new BytesRef(value));
        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }

    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }
    writer.close();

    // compare
    DirectoryReader ir = DirectoryReader.open(dir);
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
        SortedDocValues actual = r.getSortedDocValues("dv");
        assertEquals(r.maxDoc(), expected, actual);
    }
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java

License:Apache License

private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
    Directory dir = newDirectory();//from   www  . j  a  va 2s.co  m
    IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random()));
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);

    // index some docs
    int numDocs = atLeast(300);
    for (int i = 0; i < numDocs; i++) {
        Document doc = new Document();
        Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
        doc.add(idField);
        final int length = TestUtil.nextInt(random(), minLength, maxLength);
        int numValues = random().nextInt(17);
        // create a random list of strings
        List<String> values = new ArrayList<>();
        for (int v = 0; v < numValues; v++) {
            values.add(TestUtil.randomSimpleString(random(), minLength, length));
        }

        // add in any order to the indexed field
        ArrayList<String> unordered = new ArrayList<>(values);
        Collections.shuffle(unordered, random());
        for (String v : values) {
            doc.add(newStringField("indexed", v, Field.Store.NO));
        }

        // add in any order to the dv field
        ArrayList<String> unordered2 = new ArrayList<>(values);
        Collections.shuffle(unordered2, random());
        for (String v : unordered2) {
            doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
        }

        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }

    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }

    // compare per-segment
    DirectoryReader ir = writer.getReader();
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed", null);
        SortedSetDocValues actual = r.getSortedSetDocValues("dv");
        assertEquals(r.maxDoc(), expected, actual);
    }
    ir.close();

    writer.forceMerge(1);

    // now compare again after the merge
    ir = writer.getReader();
    LeafReader ar = getOnlyLeafReader(ir);
    SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed", null);
    SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
    assertEquals(ir.maxDoc(), expected, actual);
    ir.close();

    writer.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java

License:Apache License

private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
    Directory dir = newDirectory();/*from   w  w  w .  j a va2 s . c  o m*/
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
    Field idField = new StringField("id", "", Field.Store.NO);
    Field indexedField = newStringField("indexed", "", Field.Store.NO);
    Field dvField = new NumericDocValuesField("dv", 0);

    // index some docs
    int numDocs = atLeast(300);
    // numDocs should be always > 256 so that in case of a codec that optimizes
    // for numbers of values <= 256, all storage layouts are tested
    assert numDocs > 256;
    for (int i = 0; i < numDocs; i++) {
        idField.setStringValue(Integer.toString(i));
        long value = longs.next();
        indexedField.setStringValue(Long.toString(value));
        dvField.setLongValue(value);
        Document doc = new Document();
        doc.add(idField);
        // 1/4 of the time we neglect to add the fields
        if (random().nextInt(4) > 0) {
            doc.add(indexedField);
            doc.add(dvField);
        }
        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }

    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }

    // merge some segments and ensure that at least one of them has more than
    // 256 values
    writer.forceMerge(numDocs / 256);

    writer.close();

    // compare
    DirectoryReader ir = DirectoryReader.open(dir);
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed", null);
        Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv", null);
        assertEquals(expected, actual);
    }
    ir.close();
    dir.close();
}

From source file:org.elasticsearch.common.lucene.LuceneTests.java

License:Apache License

public void testCount() throws Exception {
    Directory dir = newDirectory();/*from  w  ww. ja  va 2s.c o m*/
    RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir);

    try (DirectoryReader reader = w.getReader()) {
        // match_all does not match anything on an empty index
        IndexSearcher searcher = newSearcher(reader);
        assertFalse(Lucene.exists(searcher, new MatchAllDocsQuery()));
    }

    Document doc = new Document();
    w.addDocument(doc);

    doc.add(new StringField("foo", "bar", Store.NO));
    w.addDocument(doc);

    try (DirectoryReader reader = w.getReader()) {
        IndexSearcher searcher = newSearcher(reader);
        assertTrue(Lucene.exists(searcher, new MatchAllDocsQuery()));
        assertFalse(Lucene.exists(searcher, new TermQuery(new Term("baz", "bar"))));
        assertTrue(Lucene.exists(searcher, new TermQuery(new Term("foo", "bar"))));
    }

    w.deleteDocuments(new Term("foo", "bar"));
    try (DirectoryReader reader = w.getReader()) {
        IndexSearcher searcher = newSearcher(reader);
        assertFalse(Lucene.exists(searcher, new TermQuery(new Term("foo", "bar"))));
    }

    w.close();
    dir.close();
}

From source file:org.elasticsearch.index.search.child.ChildrenConstantScoreQueryTests.java

License:Apache License

@Test
public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
    String[] childValues = new String[numUniqueChildValues];
    for (int i = 0; i < numUniqueChildValues; i++) {
        childValues[i] = Integer.toString(i);
    }//w w w  .j  a v  a  2 s.c  om

    IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableSet<String>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
        boolean markParentAsDeleted = rarely();
        boolean filterMe = rarely();
        String parent = Integer.toString(parentDocId);
        Document document = new Document();
        document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
        document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
        if (markParentAsDeleted) {
            filteredOrDeletedDocs.add(parentDocId);
            document.add(new StringField("delete", "me", Field.Store.NO));
        }
        if (filterMe) {
            filteredOrDeletedDocs.add(parentDocId);
            document.add(new StringField("filter", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        int numChildDocs;
        if (rarely()) {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
        } else {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
        }
        for (int i = 0; i < numChildDocs; i++) {
            boolean markChildAsDeleted = rarely();
            String childValue = childValues[random().nextInt(childValues.length)];

            document = new Document();
            document.add(new StringField(UidFieldMapper.NAME,
                    Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
            document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
            document.add(
                    new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
            document.add(new StringField("field1", childValue, Field.Store.NO));
            if (markChildAsDeleted) {
                document.add(new StringField("delete", "me", Field.Store.NO));
            }
            indexWriter.addDocument(document);

            if (!markChildAsDeleted) {
                NavigableSet<String> parentIds;
                if (childValueToParentIds.containsKey(childValue)) {
                    parentIds = childValueToParentIds.lget();
                } else {
                    childValueToParentIds.put(childValue, parentIds = new TreeSet<String>());
                }
                if (!markParentAsDeleted && !filterMe) {
                    parentIds.add(parent);
                }
            }
        }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));

    indexWriter.commit();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher = new Engine.SimpleSearcher(
            ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher);
    ((TestSearchContext) SearchContext.current())
            .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child")
            .parentFieldMapper();
    ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData()
            .getForField(parentFieldMapper);
    Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
    Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
    int max = numUniqueChildValues / 4;
    for (int i = 0; i < max; i++) {
        // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
        // that deletes are applied at the top level when filters are cached.
        Filter parentFilter;
        if (random().nextBoolean()) {
            parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
        } else {
            parentFilter = rawParentFilter;
        }

        // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
        Filter filterMe;
        if (random().nextBoolean()) {
            filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
        } else {
            filterMe = rawFilterMe;
        }

        // Simulate a parent update
        if (random().nextBoolean()) {
            int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
            for (int j = 0; j < numberOfUpdates; j++) {
                int parentId;
                do {
                    parentId = random().nextInt(numParentDocs);
                } while (filteredOrDeletedDocs.contains(parentId));

                String parentUid = Uid.createUid("parent", Integer.toString(parentId));
                indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));

                Document document = new Document();
                document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
                document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
                indexWriter.addDocument(document);
            }

            indexReader.close();
            indexReader = DirectoryReader.open(indexWriter.w, true);
            searcher = new IndexSearcher(indexReader);
            engineSearcher = new Engine.SimpleSearcher(ChildrenConstantScoreQueryTests.class.getSimpleName(),
                    searcher);
            ((TestSearchContext) SearchContext.current())
                    .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
        }

        String childValue = childValues[random().nextInt(numUniqueChildValues)];
        TermQuery childQuery = new TermQuery(new Term("field1", childValue));
        int shortCircuitParentDocSet = random().nextInt(numParentDocs);
        Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
        Query query;
        if (random().nextBoolean()) {
            // Usage in HasChildQueryParser
            query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child",
                    parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
        } else {
            // Usage in HasChildFilterParser
            query = new XConstantScoreQuery(new CustomQueryWrappingFilter(
                    new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child",
                            parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter)));
        }
        query = new XFilteredQuery(query, filterMe);
        BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
        searcher.search(query, collector);
        FixedBitSet actualResult = collector.getResult();

        FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
        if (childValueToParentIds.containsKey(childValue)) {
            AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
            Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
            if (terms != null) {
                NavigableSet<String> parentIds = childValueToParentIds.lget();
                TermsEnum termsEnum = terms.iterator(null);
                DocsEnum docsEnum = null;
                for (String id : parentIds) {
                    TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));
                    if (seekStatus == TermsEnum.SeekStatus.FOUND) {
                        docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                        expectedResult.set(docsEnum.nextDoc());
                    } else if (seekStatus == TermsEnum.SeekStatus.END) {
                        break;
                    }
                }
            }
        }

        assertBitSet(actualResult, expectedResult, searcher);
    }

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.search.child.ChildrenQueryTests.java

License:Apache License

@Test
public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
    String[] childValues = new String[numUniqueChildValues];
    for (int i = 0; i < numUniqueChildValues; i++) {
        childValues[i] = Integer.toString(i);
    }/*from w w w  .  j  av  a2  s.  c  o  m*/

    IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();

    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>> childValueToParentIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>>();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
        boolean markParentAsDeleted = rarely();
        boolean filterMe = rarely();
        String parent = Integer.toString(parentDocId);
        Document document = new Document();
        document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
        document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
        if (markParentAsDeleted) {
            filteredOrDeletedDocs.add(parentDocId);
            document.add(new StringField("delete", "me", Field.Store.NO));
        }
        if (filterMe) {
            filteredOrDeletedDocs.add(parentDocId);
            document.add(new StringField("filter", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        int numChildDocs;
        if (rarely()) {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
        } else {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
        }
        for (int i = 0; i < numChildDocs; i++) {
            boolean markChildAsDeleted = rarely();
            String childValue = childValues[random().nextInt(childValues.length)];

            document = new Document();
            document.add(new StringField(UidFieldMapper.NAME,
                    Uid.createUid("child", Integer.toString(childDocId)), Field.Store.NO));
            document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
            document.add(
                    new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
            document.add(new StringField("field1", childValue, Field.Store.NO));
            if (markChildAsDeleted) {
                document.add(new StringField("delete", "me", Field.Store.NO));
            }
            indexWriter.addDocument(document);

            if (!markChildAsDeleted) {
                NavigableMap<String, FloatArrayList> parentIdToChildScores;
                if (childValueToParentIds.containsKey(childValue)) {
                    parentIdToChildScores = childValueToParentIds.lget();
                } else {
                    childValueToParentIds.put(childValue,
                            parentIdToChildScores = new TreeMap<String, FloatArrayList>());
                }
                if (!markParentAsDeleted && !filterMe) {
                    FloatArrayList childScores = parentIdToChildScores.get(parent);
                    if (childScores == null) {
                        parentIdToChildScores.put(parent, childScores = new FloatArrayList());
                    }
                    childScores.add(1f);
                }
            }
        }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));
    indexWriter.commit();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher = new Engine.SimpleSearcher(ChildrenQueryTests.class.getSimpleName(),
            searcher);
    ((TestSearchContext) SearchContext.current())
            .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child")
            .parentFieldMapper();
    ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData()
            .getForField(parentFieldMapper);
    Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
    Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
    int max = numUniqueChildValues / 4;
    for (int i = 0; i < max; i++) {
        // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
        // that deletes are applied at the top level when filters are cached.
        Filter parentFilter;
        if (random().nextBoolean()) {
            parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
        } else {
            parentFilter = rawParentFilter;
        }

        // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
        Filter filterMe;
        if (random().nextBoolean()) {
            filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
        } else {
            filterMe = rawFilterMe;
        }

        // Simulate a parent update
        if (random().nextBoolean()) {
            int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
            for (int j = 0; j < numberOfUpdates; j++) {
                int parentId;
                do {
                    parentId = random().nextInt(numParentDocs);
                } while (filteredOrDeletedDocs.contains(parentId));

                String parentUid = Uid.createUid("parent", Integer.toString(parentId));
                indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));

                Document document = new Document();
                document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
                document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
                indexWriter.addDocument(document);
            }

            indexReader.close();
            indexReader = DirectoryReader.open(indexWriter.w, true);
            searcher = new IndexSearcher(indexReader);
            engineSearcher = new Engine.SimpleSearcher(ChildrenConstantScoreQueryTests.class.getSimpleName(),
                    searcher);
            ((TestSearchContext) SearchContext.current())
                    .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
        }

        String childValue = childValues[random().nextInt(numUniqueChildValues)];
        Query childQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", childValue)));
        int shortCircuitParentDocSet = random().nextInt(numParentDocs);
        ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
        Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
        Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery,
                scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
        query = new XFilteredQuery(query, filterMe);
        BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
        int numHits = 1 + random().nextInt(25);
        TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
        searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
        FixedBitSet actualResult = collector.getResult();

        FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
        MockScorer mockScorer = new MockScorer(scoreType);
        TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
        expectedTopDocsCollector.setScorer(mockScorer);
        if (childValueToParentIds.containsKey(childValue)) {
            AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
            Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
            if (terms != null) {
                NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.lget();
                TermsEnum termsEnum = terms.iterator(null);
                DocsEnum docsEnum = null;
                for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
                    TermsEnum.SeekStatus seekStatus = termsEnum
                            .seekCeil(Uid.createUidAsBytes("parent", entry.getKey()));
                    if (seekStatus == TermsEnum.SeekStatus.FOUND) {
                        docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                        expectedResult.set(docsEnum.nextDoc());
                        mockScorer.scores = entry.getValue();
                        expectedTopDocsCollector.collect(docsEnum.docID());
                    } else if (seekStatus == TermsEnum.SeekStatus.END) {
                        break;
                    }
                }
            }
        }

        assertBitSet(actualResult, expectedResult, searcher);
        assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
    }

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.search.child.ParentConstantScoreQueryTests.java

License:Apache License

@Test
public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    String[] parentValues = new String[numUniqueParentValues];
    for (int i = 0; i < numUniqueParentValues; i++) {
        parentValues[i] = Integer.toString(i);
    }/*from w w  w .  j  av  a 2s . co  m*/

    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableSet<String>> parentValueToChildDocIds = new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
    IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
        boolean markParentAsDeleted = rarely();
        String parentValue = parentValues[random().nextInt(parentValues.length)];
        String parent = Integer.toString(parentDocId);
        Document document = new Document();
        document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
        document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
        document.add(new StringField("field1", parentValue, Field.Store.NO));
        if (markParentAsDeleted) {
            document.add(new StringField("delete", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        int numChildDocs;
        if (rarely()) {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
        } else {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
        }
        if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
            // ensure there is at least one child in the index
            numChildDocs = Math.max(1, numChildDocs);
        }
        for (int i = 0; i < numChildDocs; i++) {
            boolean markChildAsDeleted = rarely();
            boolean filterMe = rarely();
            String child = Integer.toString(childDocId++);

            document = new Document();
            document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
            document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
            document.add(
                    new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
            if (markChildAsDeleted) {
                document.add(new StringField("delete", "me", Field.Store.NO));
            }
            if (filterMe) {
                document.add(new StringField("filter", "me", Field.Store.NO));
            }
            indexWriter.addDocument(document);

            if (!markParentAsDeleted) {
                NavigableSet<String> childIds;
                if (parentValueToChildDocIds.containsKey(parentValue)) {
                    childIds = parentValueToChildDocIds.lget();
                } else {
                    parentValueToChildDocIds.put(parentValue, childIds = new TreeSet<String>());
                }
                if (!markChildAsDeleted && !filterMe) {
                    childIdToParentId.put(Integer.valueOf(child), parentDocId);
                    childIds.add(child);
                }
            }
        }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));
    indexWriter.commit();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher = new Engine.SimpleSearcher(ParentConstantScoreQuery.class.getSimpleName(),
            searcher);
    ((TestSearchContext) SearchContext.current())
            .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child")
            .parentFieldMapper();
    ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData()
            .getForField(parentFieldMapper);
    TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
    Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
    int max = numUniqueParentValues / 4;
    for (int i = 0; i < max; i++) {
        // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
        // that deletes are applied at the top level when filters are cached.
        Filter childrenFilter;
        if (random().nextBoolean()) {
            childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
        } else {
            childrenFilter = rawChildrenFilter;
        }

        // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
        Filter filterMe;
        if (random().nextBoolean()) {
            filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
        } else {
            filterMe = rawFilterMe;
        }

        // Simulate a child update
        if (random().nextBoolean()) {
            int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
            int[] childIds = childIdToParentId.keys().toArray();
            for (int j = 0; j < numberOfUpdates; j++) {
                int childId = childIds[random().nextInt(childIds.length)];
                String childUid = Uid.createUid("child", Integer.toString(childId));
                indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));

                Document document = new Document();
                document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
                document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
                String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
                document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
                indexWriter.addDocument(document);
            }

            indexReader.close();
            indexReader = DirectoryReader.open(indexWriter.w, true);
            searcher = new IndexSearcher(indexReader);
            engineSearcher = new Engine.SimpleSearcher(ParentConstantScoreQueryTests.class.getSimpleName(),
                    searcher);
            ((TestSearchContext) SearchContext.current())
                    .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
        }

        String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
        TermQuery parentQuery = new TermQuery(new Term("field1", parentValue));
        Query query;
        if (random().nextBoolean()) {
            // Usage in HasParentQueryParser
            query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent",
                    childrenFilter);
        } else {
            // Usage in HasParentFilterParser
            query = new XConstantScoreQuery(new CustomQueryWrappingFilter(new ParentConstantScoreQuery(
                    parentChildIndexFieldData, parentQuery, "parent", childrenFilter)));
        }
        query = new XFilteredQuery(query, filterMe);
        BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
        searcher.search(query, collector);
        FixedBitSet actualResult = collector.getResult();

        FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
        if (parentValueToChildDocIds.containsKey(parentValue)) {
            AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
            Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
            if (terms != null) {
                NavigableSet<String> childIds = parentValueToChildDocIds.lget();
                TermsEnum termsEnum = terms.iterator(null);
                DocsEnum docsEnum = null;
                for (String id : childIds) {
                    TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id));
                    if (seekStatus == TermsEnum.SeekStatus.FOUND) {
                        docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                        expectedResult.set(docsEnum.nextDoc());
                    } else if (seekStatus == TermsEnum.SeekStatus.END) {
                        break;
                    }
                }
            }
        }

        assertBitSet(actualResult, expectedResult, searcher);
    }

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.elasticsearch.index.search.child.ParentQueryTests.java

License:Apache License

@Test
public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueParentValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
    String[] parentValues = new String[numUniqueParentValues];
    for (int i = 0; i < numUniqueParentValues; i++) {
        parentValues[i] = Integer.toString(i);
    }/*w w w. j av a2  s  . c o  m*/

    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableMap<String, Float>> parentValueToChildIds = new ObjectObjectOpenHashMap<String, NavigableMap<String, Float>>();
    IntIntOpenHashMap childIdToParentId = new IntIntOpenHashMap();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
        boolean markParentAsDeleted = rarely();
        String parentValue = parentValues[random().nextInt(parentValues.length)];
        String parent = Integer.toString(parentDocId);
        Document document = new Document();
        document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
        document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
        document.add(new StringField("field1", parentValue, Field.Store.NO));
        if (markParentAsDeleted) {
            document.add(new StringField("delete", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        int numChildDocs;
        if (rarely()) {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
        } else {
            numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
        }
        if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
            // ensure there is at least one child in the index
            numChildDocs = Math.max(1, numChildDocs);
        }
        for (int i = 0; i < numChildDocs; i++) {
            String child = Integer.toString(childDocId++);
            boolean markChildAsDeleted = rarely();
            boolean filterMe = rarely();
            document = new Document();
            document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
            document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
            document.add(
                    new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
            if (markChildAsDeleted) {
                document.add(new StringField("delete", "me", Field.Store.NO));
            }
            if (filterMe) {
                document.add(new StringField("filter", "me", Field.Store.NO));
            }
            indexWriter.addDocument(document);

            if (!markParentAsDeleted) {
                NavigableMap<String, Float> childIdToScore;
                if (parentValueToChildIds.containsKey(parentValue)) {
                    childIdToScore = parentValueToChildIds.lget();
                } else {
                    parentValueToChildIds.put(parentValue, childIdToScore = new TreeMap<String, Float>());
                }
                if (!markChildAsDeleted && !filterMe) {
                    assertFalse("child [" + child + "] already has a score", childIdToScore.containsKey(child));
                    childIdToScore.put(child, 1f);
                    childIdToParentId.put(Integer.valueOf(child), parentDocId);
                }
            }
        }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));
    indexWriter.commit();

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher = new Engine.SimpleSearcher(ParentQueryTests.class.getSimpleName(),
            searcher);
    ((TestSearchContext) SearchContext.current())
            .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child")
            .parentFieldMapper();
    ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData()
            .getForField(parentFieldMapper);
    TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
    Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
    int max = numUniqueParentValues / 4;
    for (int i = 0; i < max; i++) {
        // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals with the fact
        // that deletes are applied at the top level when filters are cached.
        Filter childrenFilter;
        if (random().nextBoolean()) {
            childrenFilter = SearchContext.current().filterCache().cache(rawChildrenFilter);
        } else {
            childrenFilter = rawChildrenFilter;
        }

        // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
        Filter filterMe;
        if (random().nextBoolean()) {
            filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
        } else {
            filterMe = rawFilterMe;
        }

        // Simulate a child update
        if (random().nextBoolean()) {
            int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
            int[] childIds = childIdToParentId.keys().toArray();
            for (int j = 0; j < numberOfUpdates; j++) {
                int childId = childIds[random().nextInt(childIds.length)];
                String childUid = Uid.createUid("child", Integer.toString(childId));
                indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));

                Document document = new Document();
                document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
                document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
                String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
                document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
                indexWriter.addDocument(document);
            }

            indexReader.close();
            indexReader = DirectoryReader.open(indexWriter.w, true);
            searcher = new IndexSearcher(indexReader);
            engineSearcher = new Engine.SimpleSearcher(ParentConstantScoreQueryTests.class.getSimpleName(),
                    searcher);
            ((TestSearchContext) SearchContext.current())
                    .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
        }

        String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
        Query parentQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", parentValue)));
        Query query = new ParentQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter);
        query = new XFilteredQuery(query, filterMe);
        BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
        int numHits = 1 + random().nextInt(25);
        TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
        searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
        FixedBitSet actualResult = collector.getResult();

        FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
        MockScorer mockScorer = new MockScorer(ScoreType.MAX); // just save one score per parent...
        mockScorer.scores = new FloatArrayList();
        TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
        expectedTopDocsCollector.setScorer(mockScorer);
        if (parentValueToChildIds.containsKey(parentValue)) {
            AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
            Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
            if (terms != null) {
                NavigableMap<String, Float> childIdsAndScore = parentValueToChildIds.lget();
                TermsEnum termsEnum = terms.iterator(null);
                DocsEnum docsEnum = null;
                for (Map.Entry<String, Float> entry : childIdsAndScore.entrySet()) {
                    TermsEnum.SeekStatus seekStatus = termsEnum
                            .seekCeil(Uid.createUidAsBytes("child", entry.getKey()));
                    if (seekStatus == TermsEnum.SeekStatus.FOUND) {
                        docsEnum = termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                        expectedResult.set(docsEnum.nextDoc());
                        mockScorer.scores.add(entry.getValue());
                        expectedTopDocsCollector.collect(docsEnum.docID());
                        mockScorer.scores.clear();
                    } else if (seekStatus == TermsEnum.SeekStatus.END) {
                        break;
                    }
                }
            }
        }

        assertBitSet(actualResult, expectedResult, searcher);
        assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
    }

    indexWriter.close();
    indexReader.close();
    directory.close();
}