Example usage for org.apache.lucene.index IndexWriter updateDocument

List of usage examples for org.apache.lucene.index IndexWriter updateDocument

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter updateDocument.

Prototype

private long updateDocument(final DocumentsWriterDeleteQueue.Node<?> delNode,
            Iterable<? extends IndexableField> doc) throws IOException 

Source Link

Usage

From source file:org.elasticsearch.indices.IndicesRequestCacheTests.java

License:Apache License

public void testCacheDifferentReaders() throws Exception {
    IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY);
    AtomicBoolean indexShard = new AtomicBoolean(true);
    ShardRequestCache requestCacheStats = new ShardRequestCache();
    Directory dir = newDirectory();/*w w  w  . j  av  a  2s.co  m*/
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());

    writer.addDocument(newDoc(0, "foo"));
    DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TermQueryBuilder termQuery = new TermQueryBuilder("id", "0");

    writer.updateDocument(new Term("id", "0"), newDoc(0, "bar"));
    DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));

    // initial cache
    TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0);
    BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes());
    assertEquals("foo", value.streamInput().readString());
    assertEquals(0, requestCacheStats.stats().getHitCount());
    assertEquals(1, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertFalse(entity.loadedFromCache());
    assertEquals(1, cache.count());
    assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length());
    final int cacheSize = requestCacheStats.stats().getMemorySize().bytesAsInt();
    assertEquals(1, cache.numRegisteredCloseListeners());

    // cache the second
    TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0);
    value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
    assertEquals("bar", value.streamInput().readString());
    assertEquals(0, requestCacheStats.stats().getHitCount());
    assertEquals(2, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertFalse(secondEntity.loadedFromCache());
    assertEquals(2, cache.count());
    assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length());
    assertEquals(2, cache.numRegisteredCloseListeners());

    secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0);
    value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
    assertEquals("bar", value.streamInput().readString());
    assertEquals(1, requestCacheStats.stats().getHitCount());
    assertEquals(2, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertTrue(secondEntity.loadedFromCache());
    assertEquals(2, cache.count());

    entity = new TestEntity(requestCacheStats, reader, indexShard, 0);
    value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes());
    assertEquals("foo", value.streamInput().readString());
    assertEquals(2, requestCacheStats.stats().getHitCount());
    assertEquals(2, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertTrue(entity.loadedFromCache());
    assertEquals(2, cache.count());

    // Closing the cache doesn't change returned entities
    reader.close();
    cache.cleanCache();
    assertEquals(2, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertTrue(entity.loadedFromCache());
    assertTrue(secondEntity.loadedFromCache());
    assertEquals(1, cache.count());
    assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt());
    assertEquals(1, cache.numRegisteredCloseListeners());

    // release
    if (randomBoolean()) {
        secondReader.close();
    } else {
        indexShard.set(false); // closed shard but reader is still open
        cache.clear(secondEntity);
    }
    cache.cleanCache();
    assertEquals(2, requestCacheStats.stats().getMissCount());
    assertEquals(0, requestCacheStats.stats().getEvictions());
    assertTrue(entity.loadedFromCache());
    assertTrue(secondEntity.loadedFromCache());
    assertEquals(0, cache.count());
    assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt());

    IOUtils.close(secondReader, writer, dir, cache);
    assertEquals(0, cache.numRegisteredCloseListeners());
}

From source file:org.elasticsearch.indices.IndicesRequestCacheTests.java

License:Apache License

public void testEviction() throws Exception {
    final ByteSizeValue size;
    {// ww  w. ja v  a 2 s. c o  m
        IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY);
        AtomicBoolean indexShard = new AtomicBoolean(true);
        ShardRequestCache requestCacheStats = new ShardRequestCache();
        Directory dir = newDirectory();
        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());

        writer.addDocument(newDoc(0, "foo"));
        DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
                new ShardId("foo", "bar", 1));
        TermQueryBuilder termQuery = new TermQueryBuilder("id", "0");
        TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0);

        writer.updateDocument(new Term("id", "0"), newDoc(0, "bar"));
        DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
                new ShardId("foo", "bar", 1));
        TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0);

        BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes());
        assertEquals("foo", value1.streamInput().readString());
        BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
        assertEquals("bar", value2.streamInput().readString());
        size = requestCacheStats.stats().getMemorySize();
        IOUtils.close(reader, secondReader, writer, dir, cache);
    }
    IndicesRequestCache cache = new IndicesRequestCache(Settings.builder()
            .put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.bytes() + 1 + "b").build());
    AtomicBoolean indexShard = new AtomicBoolean(true);
    ShardRequestCache requestCacheStats = new ShardRequestCache();
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());

    writer.addDocument(newDoc(0, "foo"));
    DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TermQueryBuilder termQuery = new TermQueryBuilder("id", "0");
    TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0);

    writer.updateDocument(new Term("id", "0"), newDoc(0, "bar"));
    DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0);

    writer.updateDocument(new Term("id", "0"), newDoc(0, "baz"));
    DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, indexShard, 0);

    BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes());
    assertEquals("foo", value1.streamInput().readString());
    BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
    assertEquals("bar", value2.streamInput().readString());
    logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
    BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
    assertEquals("baz", value3.streamInput().readString());
    assertEquals(2, cache.count());
    assertEquals(1, requestCacheStats.stats().getEvictions());
    IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache);
}

From source file:org.elasticsearch.indices.IndicesRequestCacheTests.java

License:Apache License

public void testClearAllEntityIdentity() throws Exception {
    IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY);
    AtomicBoolean indexShard = new AtomicBoolean(true);

    ShardRequestCache requestCacheStats = new ShardRequestCache();
    Directory dir = newDirectory();/*from  w  w w .j  a  v  a2s  . c  o m*/
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());

    writer.addDocument(newDoc(0, "foo"));
    DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TermQueryBuilder termQuery = new TermQueryBuilder("id", "0");
    TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0);

    writer.updateDocument(new Term("id", "0"), newDoc(0, "bar"));
    DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0);

    writer.updateDocument(new Term("id", "0"), newDoc(0, "baz"));
    DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer),
            new ShardId("foo", "bar", 1));
    AtomicBoolean differentIdentity = new AtomicBoolean(true);
    TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, differentIdentity, 0);

    BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes());
    assertEquals("foo", value1.streamInput().readString());
    BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes());
    assertEquals("bar", value2.streamInput().readString());
    logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
    BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
    assertEquals("baz", value3.streamInput().readString());
    assertEquals(3, cache.count());
    final long hitCount = requestCacheStats.stats().getHitCount();
    // clear all for the indexShard Idendity even though is't still open
    cache.clear(randomFrom(entity, secondEntity));
    cache.cleanCache();
    assertEquals(1, cache.count());
    // third has not been validated since it's a different identity
    value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes());
    assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount());
    assertEquals("baz", value3.streamInput().readString());

    IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache);

}

From source file:org.elasticsearch.termvectors.AbstractTermVectorTests.java

License:Apache License

protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {

    Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
    for (TestFieldSetting field : testDocs[0].fieldSettings) {
        if (field.storedPayloads) {
            mapping.put(field.name, new Analyzer() {
                @Override//  w w w .  j  a va  2 s. co  m
                protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
                    Tokenizer tokenizer = new StandardTokenizer(Version.CURRENT.luceneVersion, reader);
                    TokenFilter filter = new LowerCaseFilter(Version.CURRENT.luceneVersion, tokenizer);
                    filter = new TypeAsPayloadTokenFilter(filter);
                    return new TokenStreamComponents(tokenizer, filter);
                }

            });
        }
    }
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(
            new StandardAnalyzer(Version.CURRENT.luceneVersion), mapping);

    Directory dir = new RAMDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(Version.CURRENT.luceneVersion, wrapper);

    conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);

    for (TestDoc doc : testDocs) {
        Document d = new Document();
        d.add(new Field("id", doc.id, StringField.TYPE_STORED));
        for (int i = 0; i < doc.fieldContent.length; i++) {
            FieldType type = new FieldType(TextField.TYPE_STORED);
            TestFieldSetting fieldSetting = doc.fieldSettings[i];

            type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
            type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
            type.setStoreTermVectorPositions(
                    fieldSetting.storedPositions || fieldSetting.storedPayloads || fieldSetting.storedOffset);
            type.setStoreTermVectors(true);
            type.freeze();
            d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
        }
        writer.updateDocument(new Term("id", doc.id), d);
        writer.commit();
    }
    writer.close();

    return DirectoryReader.open(dir);
}

From source file:org.elasticsearch.test.integration.termvectors.GetTermVectorTests.java

License:Apache License

private void writeStandardTermVector(TermVectorResponse outResponse) throws IOException {

    Directory dir = FSDirectory.open(new File("/tmp/foo"));
    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
            new StandardAnalyzer(TEST_VERSION_CURRENT));
    conf.setOpenMode(OpenMode.CREATE);/*from   w  w  w.  j  a va 2 s  .c  om*/
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));
    d.add(new Field("title", "the1 quick brown fox jumps over  the1 lazy dog", type));
    d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog", type));

    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields fields = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(fields, null, flags, fields);

}

From source file:org.elasticsearch.test.integration.termvectors.GetTermVectorTests.java

License:Apache License

private Fields buildWithLuceneAndReturnFields(String docId, String[] fields, String[] content,
        boolean[] withPositions, boolean[] withOffsets, boolean[] withPayloads) throws IOException {
    assert (fields.length == withPayloads.length);
    assert (content.length == withPayloads.length);
    assert (withPositions.length == withPayloads.length);
    assert (withOffsets.length == withPayloads.length);

    Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
    for (int i = 0; i < withPayloads.length; i++) {
        if (withPayloads[i]) {
            mapping.put(fields[i], new Analyzer() {
                @Override//from  w w w . j  a v  a2 s .co  m
                protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
                    Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
                    TokenFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT, tokenizer);
                    filter = new TypeAsPayloadTokenFilter(filter);
                    return new TokenStreamComponents(tokenizer, filter);
                }

            });
        }
    }
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(TEST_VERSION_CURRENT),
            mapping);

    Directory dir = FSDirectory.open(new File("/tmp/foo"));
    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, wrapper);

    conf.setOpenMode(OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);

    Document d = new Document();
    for (int i = 0; i < fields.length; i++) {
        d.add(new Field("id", docId, StringField.TYPE_STORED));
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(withOffsets[i]);
        type.setStoreTermVectorPayloads(withPayloads[i]);
        type.setStoreTermVectorPositions(withPositions[i] || withOffsets[i] || withPayloads[i]);
        type.setStoreTermVectors(true);
        type.freeze();
        d.add(new Field(fields[i], content[i], type));
        writer.updateDocument(new Term("id", docId), d);
        writer.commit();
    }
    writer.close();

    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", docId)), 1);

    ScoreDoc[] scoreDocs = search.scoreDocs;
    assert (scoreDocs.length == 1);
    int doc = scoreDocs[0].doc;
    Fields returnFields = dr.getTermVectors(doc);
    return returnFields;

}

From source file:org.elasticsearch.test.unit.common.lucene.uid.UidFieldTests.java

License:Apache License

@Test
public void testUidField() throws Exception {
    IndexWriter writer = new IndexWriter(new RAMDirectory(),
            new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    DirectoryReader directoryReader = DirectoryReader.open(writer, true);
    AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    MatcherAssert.assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")),
            equalTo(-1l));//  w w w.jav  a  2  s  .  c om

    Document doc = new Document();
    doc.add(new Field("_uid", "1", UidFieldMapper.Defaults.FIELD_TYPE));
    writer.addDocument(doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-2l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(-2l));

    doc = new Document();
    doc.add(new UidField("_uid", "1", 1));
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(1l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(1l));

    doc = new Document();
    UidField uid = new UidField("_uid", "1", 2);
    doc.add(uid);
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(2l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(2l));

    // test reuse of uid field
    doc = new Document();
    uid.version(3);
    doc.add(uid);
    writer.updateDocument(new Term("_uid", "1"), doc);
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(3l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")).version,
            equalTo(3l));

    writer.deleteDocuments(new Term("_uid", "1"));
    directoryReader = DirectoryReader.openIfChanged(directoryReader);
    atomicReader = SlowCompositeReaderWrapper.wrap(directoryReader);
    assertThat(UidField.loadVersion(atomicReader.getContext(), new Term("_uid", "1")), equalTo(-1l));
    assertThat(UidField.loadDocIdAndVersion(atomicReader.getContext(), new Term("_uid", "1")), nullValue());
}

From source file:org.elasticsearch.test.unit.common.lucene.uid.VersionsTests.java

License:Apache License

@Test
public void testVersions() throws Exception {
    Directory dir = newDirectory();/*  w  w w .ja  va  2s.c om*/
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
    DirectoryReader directoryReader = DirectoryReader.open(writer, true);
    MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));

    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    writer.addDocument(doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_SET));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(Versions.NOT_SET));

    doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(UidFieldMapper.VERSION, 1));
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(1l));

    doc = new Document();
    Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
    Field version = new NumericDocValuesField(UidFieldMapper.VERSION, 2);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(2l));

    // test reuse of uid field
    doc = new Document();
    version.setLongValue(3);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);

    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(3l));

    writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
    directoryReader.close();
    writer.close();
    dir.close();
}

From source file:org.elasticsearch.test.unit.termvectors.TermVectorUnitTests.java

License:Apache License

private void writeEmptyTermVector(TermVectorResponse outResponse) throws IOException {

    Directory dir = new RAMDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
            new StandardAnalyzer(TEST_VERSION_CURRENT));
    conf.setOpenMode(OpenMode.CREATE);//w w  w .  j a  va  2 s  .co m
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));

    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields fields = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(fields, null, flags, fields);
    outResponse.setExists(true);

}

From source file:org.elasticsearch.test.unit.termvectors.TermVectorUnitTests.java

License:Apache License

private void writeStandardTermVector(TermVectorResponse outResponse) throws IOException {

    Directory dir = FSDirectory.open(new File("/tmp/foo"));
    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
            new StandardAnalyzer(TEST_VERSION_CURRENT));

    conf.setOpenMode(OpenMode.CREATE);/* w ww  . j  a v  a  2s.com*/
    IndexWriter writer = new IndexWriter(dir, conf);
    FieldType type = new FieldType(TextField.TYPE_STORED);
    type.setStoreTermVectorOffsets(true);
    type.setStoreTermVectorPayloads(false);
    type.setStoreTermVectorPositions(true);
    type.setStoreTermVectors(true);
    type.freeze();
    Document d = new Document();
    d.add(new Field("id", "abc", StringField.TYPE_STORED));
    d.add(new Field("title", "the1 quick brown fox jumps over  the1 lazy dog", type));
    d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog", type));

    writer.updateDocument(new Term("id", "abc"), d);
    writer.commit();
    writer.close();
    DirectoryReader dr = DirectoryReader.open(dir);
    IndexSearcher s = new IndexSearcher(dr);
    TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
    ScoreDoc[] scoreDocs = search.scoreDocs;
    int doc = scoreDocs[0].doc;
    Fields termVectors = dr.getTermVectors(doc);
    EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
    outResponse.setFields(termVectors, null, flags, termVectors);

}