Example usage for org.apache.lucene.document StoredField StoredField

List of usage examples for org.apache.lucene.document StoredField StoredField

Introduction

In this page you can find the example usage for org.apache.lucene.document StoredField StoredField.

Prototype

public StoredField(String name, double value) 

Source Link

Document

Create a stored-only field with the given double value.

Usage

From source file:org.elasticsearch.index.query.PercolatorQueryTests.java

License:Apache License

void addPercolatorQuery(String id, Query query, String... extraFields) throws IOException {
    queries.put(id, query);/*w w w .  jav  a2  s  .c o m*/
    ParseContext.Document document = new ParseContext.Document();
    ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME,
            UNKNOWN_QUERY_FIELD_NAME, EXTRACTED_TERMS_FIELD_TYPE);
    document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorFieldMapper.TYPE_NAME, id)));
    assert extraFields.length % 2 == 0;
    for (int i = 0; i < extraFields.length; i++) {
        document.add(new StringField(extraFields[i], extraFields[++i], Field.Store.NO));
    }
    indexWriter.addDocument(document);
}

From source file:org.elasticsearch.percolator.PercolatorQueryTests.java

License:Apache License

void addPercolatorQuery(String id, Query query, String... extraFields) throws IOException {
    queries.put(new BytesRef(id), query);
    ParseContext.Document document = new ParseContext.Document();
    ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME,
            UNKNOWN_QUERY_FIELD_NAME, EXTRACTED_TERMS_FIELD_TYPE);
    document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id)));
    assert extraFields.length % 2 == 0;
    for (int i = 0; i < extraFields.length; i++) {
        document.add(new StringField(extraFields[i], extraFields[++i], Field.Store.NO));
    }//w  ww  .ja  v  a 2s . c  om
    indexWriter.addDocument(document);
}

From source file:org.elasticsearch.percolator.PercolatorServiceTests.java

License:Apache License

void addPercolatorQuery(String id, Query query, IndexWriter writer, PercolatorQueriesRegistry registry)
        throws IOException {
    registry.getPercolateQueries().put(new BytesRef(id), query);
    ParseContext.Document document = new ParseContext.Document();
    FieldType extractedQueryTermsFieldType = new FieldType();
    extractedQueryTermsFieldType.setTokenized(false);
    extractedQueryTermsFieldType.setIndexOptions(IndexOptions.DOCS);
    extractedQueryTermsFieldType.freeze();
    ExtractQueryTermsService.extractQueryTerms(query, document,
            PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME,
            PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME, extractedQueryTermsFieldType);
    document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id)));
    writer.addDocument(document);//ww w.  j  a va  2  s  . com
}

From source file:org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorTests.java

License:Apache License

private void addMixedTextDocs(TextFieldType textFieldType, IndexWriter w) throws IOException {
    for (int i = 0; i < 10; i++) {
        Document doc = new Document();
        StringBuilder text = new StringBuilder("common ");
        if (i % 2 == 0) {
            text.append("odd ");
        } else {/*from w  w w.j a  va2  s  . c  o  m*/
            text.append("even ");
        }

        doc.add(new Field("text", text.toString(), textFieldType));
        String json = "{ \"text\" : \"" + text.toString() + "\" }";
        doc.add(new StoredField("_source", new BytesRef(json)));

        w.addDocument(doc);
    }
}

From source file:org.elasticsearch.search.aggregations.bucket.significant.SignificantTextAggregatorTests.java

License:Apache License

/**
 * Uses the significant text aggregation to find the keywords in text fields
 *//*from  w  w  w.j  av  a 2  s .c  o m*/
public void testSignificance() throws IOException {
    TextFieldType textFieldType = new TextFieldType();
    textFieldType.setName("text");
    textFieldType
            .setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer()));

    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
    indexWriterConfig.setMaxBufferedDocs(100);
    indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment
    try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) {
        for (int i = 0; i < 10; i++) {
            Document doc = new Document();
            StringBuilder text = new StringBuilder("common ");
            if (i % 2 == 0) {
                text.append("odd ");
            } else {
                text.append(
                        "even separator" + i + " duplicate duplicate duplicate duplicate duplicate duplicate ");
            }

            doc.add(new Field("text", text.toString(), textFieldType));
            String json = "{ \"text\" : \"" + text.toString() + "\"," + " \"json_only_field\" : \""
                    + text.toString() + "\"" + " }";
            doc.add(new StoredField("_source", new BytesRef(json)));
            w.addDocument(doc);
        }

        SignificantTextAggregationBuilder sigAgg = new SignificantTextAggregationBuilder("sig_text", "text")
                .filterDuplicateText(true);
        if (randomBoolean()) {
            sigAgg.sourceFieldNames(Arrays.asList(new String[] { "json_only_field" }));
        }
        SamplerAggregationBuilder aggBuilder = new SamplerAggregationBuilder("sampler").subAggregation(sigAgg);

        try (IndexReader reader = DirectoryReader.open(w)) {
            assertEquals("test expects a single segment", 1, reader.leaves().size());
            IndexSearcher searcher = new IndexSearcher(reader);

            // Search "odd" which should have no duplication
            Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aggBuilder,
                    textFieldType);
            SignificantTerms terms = sampler.getAggregations().get("sig_text");

            assertNull(terms.getBucketByKey("even"));
            assertNull(terms.getBucketByKey("duplicate"));
            assertNull(terms.getBucketByKey("common"));
            assertNotNull(terms.getBucketByKey("odd"));

            // Search "even" which will have duplication
            sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "even")), aggBuilder,
                    textFieldType);
            terms = sampler.getAggregations().get("sig_text");

            assertNull(terms.getBucketByKey("odd"));
            assertNull(terms.getBucketByKey("duplicate"));
            assertNull(terms.getBucketByKey("common"));
            assertNull(terms.getBucketByKey("separator2"));
            assertNull(terms.getBucketByKey("separator4"));
            assertNull(terms.getBucketByKey("separator6"));

            assertNotNull(terms.getBucketByKey("even"));

        }
    }
}

From source file:org.elasticsearch.search.aggregations.bucket.significant.SignificantTextAggregatorTests.java

License:Apache License

/**
 * Test documents with arrays of text//from  w ww .j av  a2  s. co m
 */
public void testSignificanceOnTextArrays() throws IOException {
    TextFieldType textFieldType = new TextFieldType();
    textFieldType.setName("text");
    textFieldType
            .setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer()));

    IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
    indexWriterConfig.setMaxBufferedDocs(100);
    indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment
    try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) {
        for (int i = 0; i < 10; i++) {
            Document doc = new Document();
            doc.add(new Field("text", "foo", textFieldType));
            String json = "{ \"text\" : [\"foo\",\"foo\"], \"title\" : [\"foo\", \"foo\"]}";
            doc.add(new StoredField("_source", new BytesRef(json)));
            w.addDocument(doc);
        }

        SignificantTextAggregationBuilder sigAgg = new SignificantTextAggregationBuilder("sig_text", "text");
        sigAgg.sourceFieldNames(Arrays.asList(new String[] { "title", "text" }));
        try (IndexReader reader = DirectoryReader.open(w)) {
            assertEquals("test expects a single segment", 1, reader.leaves().size());
            IndexSearcher searcher = new IndexSearcher(reader);
            searchAndReduce(searcher, new TermQuery(new Term("text", "foo")), sigAgg, textFieldType);
            // No significant results to be found in this test - only checking we don't end up
            // with the internal exception discovered in issue https://github.com/elastic/elasticsearch/issues/25029
        }
    }
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test filtering two stored fields (string)
 *///from   ww w.j  a  v  a2  s .  co  m
public void testStoredFieldsString() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new StoredField("fieldA", "testA"));
    doc.add(new StoredField("fieldB", "testB"));
    iw.addDocument(doc);

    // open reader
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw),
            new CharacterRunAutomaton(Automata.makeString("fieldA")));

    // see only one field
    Document d2 = ir.document(0);
    assertEquals(1, d2.getFields().size());
    assertEquals("testA", d2.get("fieldA"));

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test filtering two stored fields (binary)
 *///from ww w.  ja  va2 s. c  o m
public void testStoredFieldsBinary() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new StoredField("fieldA", new BytesRef("testA")));
    doc.add(new StoredField("fieldB", new BytesRef("testB")));
    iw.addDocument(doc);

    // open reader
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw),
            new CharacterRunAutomaton(Automata.makeString("fieldA")));

    // see only one field
    Document d2 = ir.document(0);
    assertEquals(1, d2.getFields().size());
    assertEquals(new BytesRef("testA"), d2.getBinaryValue("fieldA"));

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test filtering two stored fields (int)
 *///  w  ww  .  ja  v  a 2s . c om
public void testStoredFieldsInt() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new StoredField("fieldA", 1));
    doc.add(new StoredField("fieldB", 2));
    iw.addDocument(doc);

    // open reader
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw),
            new CharacterRunAutomaton(Automata.makeString("fieldA")));

    // see only one field
    Document d2 = ir.document(0);
    assertEquals(1, d2.getFields().size());
    assertEquals(1, d2.getField("fieldA").numericValue());

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test filtering two stored fields (long)
 *//*from  ww w  .j  av  a  2 s  .com*/
public void testStoredFieldsLong() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new StoredField("fieldA", 1L));
    doc.add(new StoredField("fieldB", 2L));
    iw.addDocument(doc);

    // open reader
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw),
            new CharacterRunAutomaton(Automata.makeString("fieldA")));

    // see only one field
    Document d2 = ir.document(0);
    assertEquals(1, d2.getFields().size());
    assertEquals(1L, d2.getField("fieldA").numericValue());

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}