Example usage for org.apache.lucene.index RandomIndexWriter addDocuments

List of usage examples for org.apache.lucene.index RandomIndexWriter addDocuments

Introduction

In this page you can find the example usage for org.apache.lucene.index RandomIndexWriter addDocuments.

Prototype

public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException 

Source Link

Usage

From source file:com.sindicetech.siren.util.SirenTestCase.java

License:Open Source License

/**
 * Atomically adds a block of documents with sequentially
 * assigned document IDs.// w  w  w .  j a v a 2s. com
 * <br>
 * See also {@link IndexWriter#addDocuments(Iterable)}
 */
protected static void addDocuments(final RandomIndexWriter writer, final String[] data) throws IOException {
    final ArrayList<Document> docs = new ArrayList<Document>();

    for (final String entry : data) {
        final Document doc = new Document();
        doc.add(new Field(DEFAULT_TEST_FIELD, entry, newStoredFieldType()));
        docs.add(doc);
    }
    writer.addDocuments(docs);
    writer.commit();
}

From source file:com.sindicetech.siren.util.SirenTestCase.java

License:Open Source License

protected static void addDocuments(final RandomIndexWriter writer, final MockSirenDocument... sdocs)
        throws IOException {
    final ArrayList<Document> docs = new ArrayList<Document>(sdocs.length);
    for (final MockSirenDocument sdoc : sdocs) {
        final Document doc = new Document();
        doc.add(new Field(DEFAULT_TEST_FIELD, new MockSirenReader(sdoc), newFieldType()));
        docs.add(doc);/*from  w w w.  jav  a 2 s  . c o  m*/
    }
    writer.addDocuments(docs);
    writer.commit();
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

public void testSplitOnID() throws IOException {
    SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    Directory dir = newFSDirectory(createTempDir());
    final int numDocs = randomIntBetween(50, 100);
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    int numShards = randomIntBetween(2, 10);
    IndexMetaData metaData = IndexMetaData.builder("test")
            .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
            .numberOfShards(numShards).setRoutingNumShards(numShards * 1000000).numberOfReplicas(0).build();
    int targetShardId = randomIntBetween(0, numShards - 1);
    boolean hasNested = randomBoolean();
    for (int j = 0; j < numDocs; j++) {
        int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
        if (hasNested) {
            List<Iterable<IndexableField>> docs = new ArrayList<>();
            int numNested = randomIntBetween(0, 10);
            for (int i = 0; i < numNested; i++) {
                docs.add(Arrays.asList(
                        new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                        new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
                        new SortedNumericDocValuesField("shard_id", shardId)));
            }//from www.j  av  a2 s.c  om
            docs.add(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
            writer.addDocuments(docs);
        } else {
            writer.addDocument(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
        }
    }
    writer.commit();
    writer.close();

    assertSplit(dir, metaData, targetShardId, hasNested);
    dir.close();
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

public void testSplitOnRouting() throws IOException {
    SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    Directory dir = newFSDirectory(createTempDir());
    final int numDocs = randomIntBetween(50, 100);
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    int numShards = randomIntBetween(2, 10);
    IndexMetaData metaData = IndexMetaData.builder("test")
            .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
            .numberOfShards(numShards).setRoutingNumShards(numShards * 1000000).numberOfReplicas(0).build();
    boolean hasNested = randomBoolean();
    int targetShardId = randomIntBetween(0, numShards - 1);
    for (int j = 0; j < numDocs; j++) {
        String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
        final int shardId = OperationRouting.generateShardId(metaData, null, routing);
        if (hasNested) {
            List<Iterable<IndexableField>> docs = new ArrayList<>();
            int numNested = randomIntBetween(0, 10);
            for (int i = 0; i < numNested; i++) {
                docs.add(Arrays.asList(
                        new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                        new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
                        new SortedNumericDocValuesField("shard_id", shardId)));
            }/*from  ww  w.  j av a  2s .com*/
            docs.add(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
            writer.addDocuments(docs);
        } else {
            writer.addDocument(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
        }
    }
    writer.commit();
    writer.close();
    assertSplit(dir, metaData, targetShardId, hasNested);
    dir.close();
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

public void testSplitOnIdOrRouting() throws IOException {
    SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    Directory dir = newFSDirectory(createTempDir());
    final int numDocs = randomIntBetween(50, 100);
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    int numShards = randomIntBetween(2, 10);
    IndexMetaData metaData = IndexMetaData.builder("test")
            .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
            .numberOfShards(numShards).setRoutingNumShards(numShards * 1000000).numberOfReplicas(0).build();
    boolean hasNested = randomBoolean();
    int targetShardId = randomIntBetween(0, numShards - 1);
    for (int j = 0; j < numDocs; j++) {
        Iterable<IndexableField> rootDoc;
        final int shardId;
        if (randomBoolean()) {
            String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
            shardId = OperationRouting.generateShardId(metaData, null, routing);
            rootDoc = Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm);
        } else {//from ww  w .ja va 2  s  . com
            shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
            rootDoc = Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm);
        }

        if (hasNested) {
            List<Iterable<IndexableField>> docs = new ArrayList<>();
            int numNested = randomIntBetween(0, 10);
            for (int i = 0; i < numNested; i++) {
                docs.add(Arrays.asList(
                        new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                        new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
                        new SortedNumericDocValuesField("shard_id", shardId)));
            }
            docs.add(rootDoc);
            writer.addDocuments(docs);
        } else {
            writer.addDocument(rootDoc);
        }
    }
    writer.commit();
    writer.close();
    assertSplit(dir, metaData, targetShardId, hasNested);
    dir.close();
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

public void testSplitOnRoutingPartitioned() throws IOException {
    SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    Directory dir = newFSDirectory(createTempDir());
    final int numDocs = randomIntBetween(50, 100);
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    int numShards = randomIntBetween(2, 10);
    IndexMetaData metaData = IndexMetaData.builder("test")
            .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
            .numberOfShards(numShards).setRoutingNumShards(numShards * 1000000)
            .routingPartitionSize(randomIntBetween(1, 10)).numberOfReplicas(0).build();
    boolean hasNested = randomBoolean();
    int targetShardId = randomIntBetween(0, numShards - 1);
    for (int j = 0; j < numDocs; j++) {
        String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5);
        final int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), routing);

        if (hasNested) {
            List<Iterable<IndexableField>> docs = new ArrayList<>();
            int numNested = randomIntBetween(0, 10);
            for (int i = 0; i < numNested; i++) {
                docs.add(Arrays.asList(
                        new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                        new StringField(TypeFieldMapper.NAME, "__nested", Field.Store.YES),
                        new SortedNumericDocValuesField("shard_id", shardId)));
            }//from  w w w .ja  v  a  2s  . c  o m
            docs.add(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
            writer.addDocuments(docs);
        } else {
            writer.addDocument(Arrays.asList(
                    new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
                    new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
                    new SortedNumericDocValuesField("shard_id", shardId), sequenceIDFields.primaryTerm));
        }
    }
    writer.commit();
    writer.close();
    assertSplit(dir, metaData, targetShardId, hasNested);
    dir.close();
}

From source file:org.elasticsearch.search.aggregations.bucket.nested.NestedAggregatorTest.java

License:Apache License

@Test
public void testResetRootDocId() throws Exception {
    Directory directory = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);

    List<Document> documents = new ArrayList<>();

    // 1 segment with, 1 root document, with 3 nested sub docs
    Document document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);/* ww w . j  av a2 s. c  o m*/
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    indexWriter.commit();

    documents.clear();
    // 1 segment with:
    // 1 document, with 1 nested subdoc
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    documents.clear();
    // and 1 document, with 1 nested subdoc
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);

    indexWriter.commit();
    indexWriter.close();

    DirectoryReader directoryReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(directoryReader);

    IndexService indexService = createIndex("test");
    indexService.mapperService().merge("test",
            new CompressedString(
                    PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()),
            true);
    AggregationContext context = new AggregationContext(createSearchContext(indexService));

    AggregatorFactories.Builder builder = AggregatorFactories.builder();
    builder.add(new NestedAggregator.Factory("test", "nested_field"));
    AggregatorFactories factories = builder.build();
    Aggregator[] aggs = factories.createTopLevelAggregators(context);
    AggregationPhase.AggregationsCollector collector = new AggregationPhase.AggregationsCollector(
            Arrays.asList(aggs), context);
    // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient)
    // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because
    // root doc type#3 and root doc type#1 have the same segment docid
    searcher.search(new XConstantScoreQuery(new AndFilter(Arrays.asList(NonNestedDocsFilter.INSTANCE,
            new NotFilter(new TermFilter(new Term(UidFieldMapper.NAME, "type#2")))))), collector);
    collector.postCollection();

    Nested nested = (Nested) aggs[0].buildAggregation(0);
    // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits.
    assertThat(nested.getDocCount(), equalTo(4l));

    directoryReader.close();
    directory.close();
}

From source file:org.elasticsearch.search.aggregations.bucket.nested.NestedAggregatorTests.java

License:Apache License

@Test
public void testResetRootDocId() throws Exception {
    Directory directory = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);

    List<Document> documents = new ArrayList<>();

    // 1 segment with, 1 root document, with 3 nested sub docs
    Document document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);//from   w  w  w.  j  a v  a 2 s .com
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    indexWriter.commit();

    documents.clear();
    // 1 segment with:
    // 1 document, with 1 nested subdoc
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);
    documents.clear();
    // and 1 document, with 1 nested subdoc
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    document = new Document();
    document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
    document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
    documents.add(document);
    indexWriter.addDocuments(documents);

    indexWriter.commit();
    indexWriter.close();

    DirectoryReader directoryReader = DirectoryReader.open(directory);
    directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("test"), 0));
    IndexSearcher searcher = new IndexSearcher(directoryReader);

    IndexService indexService = createIndex("test");
    indexService.mapperService().merge("test",
            new CompressedXContent(
                    PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()),
            MapperService.MergeReason.MAPPING_UPDATE, false);
    SearchContext searchContext = createSearchContext(indexService);
    AggregationContext context = new AggregationContext(searchContext);

    AggregatorFactories.Builder builder = AggregatorFactories.builder();
    builder.addAggregator(new NestedAggregator.Factory("test", "nested_field"));
    AggregatorFactories factories = builder.build();
    searchContext.aggregations(new SearchContextAggregations(factories));
    Aggregator[] aggs = factories.createTopLevelAggregators(context);
    BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
    collector.preCollection();
    // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient)
    // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because
    // root doc type#3 and root doc type#1 have the same segment docid
    BooleanQuery.Builder bq = new BooleanQuery.Builder();
    bq.add(Queries.newNonNestedFilter(), Occur.MUST);
    bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT);
    searcher.search(new ConstantScoreQuery(bq.build()), collector);
    collector.postCollection();

    Nested nested = (Nested) aggs[0].buildAggregation(0);
    // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits.
    assertThat(nested.getDocCount(), equalTo(4l));

    directoryReader.close();
    directory.close();
}

From source file:org.elasticsearch.search.fetch.innerhits.NestedChildrenFilterTest.java

License:Apache License

@Test
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();//from  w  w w .  ja  va 2 s.  c  o m
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    FixedBitSetFilterCache fixedBitSetFilterCache = new FixedBitSetFilterCache(new Index("test"),
            ImmutableSettings.EMPTY);
    FixedBitSetFilter parentFilter = fixedBitSetFilterCache
            .getFixedBitSetFilter(new TermFilter(new Term("type", "parent")));
    Filter childFilter = new TermFilter(new Term("type", "child"));
    int checkedParents = 0;
    for (AtomicReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentFilter.getDocIdSet(leaf, null).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents
                .nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue()
                    .intValue();
            hitContext.reset(null, leaf, parentDoc, reader);
            NestedChildrenFilter nestedChildrenFilter = new NestedChildrenFilter(parentFilter, childFilter,
                    hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}

From source file:org.elasticsearch.search.fetch.innerhits.NestedChildrenFilterTests.java

License:Apache License

@Test
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();//from  ww  w .jav  a2s  .  c om
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
    Query childFilter = new TermQuery(new Term("type", "child"));
    int checkedParents = 0;
    final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")),
            false);
    for (LeafReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents
                .nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue()
                    .intValue();
            hitContext.reset(null, leaf, parentDoc, searcher);
            NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter,
                    hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}