Example usage for org.apache.lucene.document Field setLongValue

List of usage examples for org.apache.lucene.document Field setLongValue

Introduction

In this page you can find the example usage for org.apache.lucene.document Field setLongValue.

Prototype

public void setLongValue(long value) 

Source Link

Document

Expert: change the value of this field.

Usage

From source file:DVBench.java

License:Apache License

static void doBench(int bpv) throws Exception {
    File file = new File("/data/indices/dvbench");
    file.mkdirs();//from   w  ww .j  a v  a 2s .  c o m
    Directory dir = FSDirectory.open(file);
    IndexWriterConfig config = new IndexWriterConfig(null);
    config.setOpenMode(OpenMode.CREATE);
    config.setMergeScheduler(new SerialMergeScheduler());
    config.setMergePolicy(new LogDocMergePolicy());
    config.setMaxBufferedDocs(25000);
    IndexWriter writer = new IndexWriter(dir, config);

    MyRandom r = new MyRandom();
    int numdocs = 400000;
    Document doc = new Document();
    Field dv = new NumericDocValuesField("dv", 0);
    Field inv = new LongField("inv", 0, Field.Store.NO);
    Field boxed = new BinaryDocValuesField("boxed", new BytesRef(8));
    Field boxed2 = new BinaryDocValuesField("boxed2", new BytesRef(8));

    doc.add(dv);
    doc.add(inv);
    doc.add(boxed);
    doc.add(boxed2);
    for (int i = 0; i < numdocs; i++) {
        // defeat blockpackedwriter
        final long value;
        if (i % 8192 == 0) {
            value = bpv == 64 ? Long.MIN_VALUE : 0;
        } else if (i % 8192 == 1) {
            value = bpv == 64 ? Long.MAX_VALUE : (1L << bpv) - 1;
        } else {
            value = r.nextLong(bpv);
        }
        dv.setLongValue(value);
        inv.setLongValue(value);
        box(value, boxed.binaryValue());
        box(value, boxed2.binaryValue());
        boxed2.binaryValue().length = (bpv + 7) / 8; // fixed length
        writer.addDocument(doc);
    }

    writer.close();

    // run dv search tests
    String description = "dv (bpv=" + bpv + ")";
    DirectoryReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // don't bench the cache

    int hash = 0;
    // warmup
    hash += search(description, searcher, "dv", 300, true);
    hash += search(description, searcher, "dv", 300, false);

    // Uninverting
    Map<String, UninvertingReader.Type> mapping = Collections.singletonMap("inv", UninvertingReader.Type.LONG);
    DirectoryReader uninv = UninvertingReader.wrap(reader, mapping);
    IndexSearcher searcher2 = new IndexSearcher(uninv);
    searcher2.setQueryCache(null); // don't bench the cache

    description = "fc (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher2, "inv", 300, true);
    hash += search(description, searcher2, "inv", 300, false);

    // Boxed inside binary
    DirectoryReader boxedReader = new BinaryAsVLongReader(reader);
    IndexSearcher searcher3 = new IndexSearcher(boxedReader);
    searcher3.setQueryCache(null); // don't bench the cache
    description = "boxed (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher3, "boxed", 300, true);
    hash += search(description, searcher3, "boxed", 300, false);

    description = "boxed fixed-length (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher3, "boxed2", 300, true);
    hash += search(description, searcher3, "boxed2", 300, false);

    if (hash == 3) {
        // wont happen
        System.out.println("hash=" + hash);
    }
    reader.close();
    dir.close();
}

From source file:com.bewsia.script.LuceneHandler.java

License:Open Source License

protected void write(SEntity entity, Document doc) {
    String schema = entity.getSchema();
    if (schema == null)
        schema = "";
    String[] fields = schema.split("\\|");
    for (int i = 0; i < fields.length && i + 1 < fields.length; i += 2) {
        String kind = fields[i];/*  w w  w .  jav a  2 s .com*/
        String fname = fields[i + 1];
        if (SEntity.STRING.equalsIgnoreCase(kind)) {
            Field field = new Field(fname, entity.getString(fname), Store.YES, Index.NOT_ANALYZED_NO_NORMS);
            doc.add(field);
        } else if (SEntity.DOUBLE.equalsIgnoreCase(kind)) {
            NumericField field = new NumericField(fname, Store.YES, true);
            field.setDoubleValue(entity.getDouble(fname));
            doc.add(field);
        } else if (SEntity.FLOAT.equalsIgnoreCase(kind)) {
            NumericField field = new NumericField(fname, Store.YES, true);
            field.setFloatValue(entity.getFloat(fname));
            doc.add(field);
        } else if (SEntity.INTEGER.equalsIgnoreCase(kind)) {
            NumericField field = new NumericField(fname, Store.YES, true);
            field.setIntValue(entity.getInteger(fname));
            doc.add(field);
        } else if (SEntity.LONG.equalsIgnoreCase(kind)) {
            NumericField field = new NumericField(fname, Store.YES, true);
            field.setLongValue(entity.getLong(fname));
            doc.add(field);
        } else if (SEntity.ANALYZED.equalsIgnoreCase(kind)) {
            Field field = new Field(fname, entity.getString(fname), Store.YES, Index.ANALYZED);
            doc.add(field);
        }
    }
}

From source file:com.vmware.xenon.services.common.LuceneIndexDocumentHelper.java

License:Open Source License

private Field getAndSetStoredField(String name, Long value) {
    Field f = this.storedFields.computeIfAbsent(name, (k) -> {
        return new StoredField(name, value);
    });//from   w ww .  j av  a2 s.  co m
    f.setLongValue(value);
    return f;
}

From source file:de.tudarmstadt.lt.lm.app.GenerateNgramIndex.java

License:Apache License

public void create_ngram_index(File ngram_joined_counts_file) throws IOException {
    File index_dir = new File(_index_dir, "ngram");
    if (index_dir.exists()) {
        LOG.info("Ngram index already exists in directory '{}'.", index_dir.getAbsolutePath());
        if (_overwrite) {
            LOG.info("Overwriting index '{}',", index_dir);
            index_dir.delete();//from   w w w .  j  a v a2 s.  co  m
        } else
            return;
    }
    index_dir.mkdirs();

    Analyzer analyzer = new KeywordAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    iwc.setOpenMode(OpenMode.CREATE);
    // use 80 percent of the available total memory
    double total_mem_mb = (double) Runtime.getRuntime().maxMemory() / 1e6;
    double percentage_ram_buffer = Properties.ramBufferPercentage();
    if (percentage_ram_buffer > 0) {
        double percentage_ram_buffer_mb = total_mem_mb * percentage_ram_buffer;
        LOG.info(String.format("Setting ram buffer size to %.2f MB (%.2f%% from %.2f MB)",
                percentage_ram_buffer_mb, percentage_ram_buffer * 100, total_mem_mb));
        iwc.setRAMBufferSizeMB(percentage_ram_buffer_mb);
    }

    Directory directory = new MMapDirectory(index_dir);
    IndexWriter writer_ngram = new IndexWriter(directory, iwc);

    InputStream in = new FileInputStream(ngram_joined_counts_file);
    if (ngram_joined_counts_file.getName().endsWith(".gz"))
        in = new GZIPInputStream(in);
    LineIterator iter = new LineIterator(new BufferedReader(new InputStreamReader(in, "UTF-8")));

    Document doc = new Document();
    Field f_ngram = new StringField("ngram", "", Store.YES);
    doc.add(f_ngram);
    Field f_n = new IntField("cardinality", 0, Store.YES);
    doc.add(f_n);
    Field f_word = new StringField("word", "", Store.YES);
    doc.add(f_word);
    Field f_hist = new StringField("history", "", Store.YES);
    doc.add(f_hist);
    Field f_lower = new StringField("lower", "", Store.YES);
    doc.add(f_lower);
    Field f_count = new StoredField("num", 0L);
    doc.add(f_count);

    Field[] f_follow = new Field[4];
    f_follow[0] = new StoredField("nf_s", 0L);
    doc.add(f_follow[0]);
    f_follow[1] = new StoredField("nf_N1", 0L);
    doc.add(f_follow[1]);
    f_follow[2] = new StoredField("nf_N2", 0L);
    doc.add(f_follow[2]);
    f_follow[3] = new StoredField("nf_N3", 0L);
    doc.add(f_follow[3]);
    Field[] f_precede = new Field[4];
    f_precede[0] = new StoredField("np_s", 0L);
    doc.add(f_precede[0]);
    f_precede[1] = new StoredField("np_N1", 0L);
    doc.add(f_precede[1]);
    f_precede[2] = new StoredField("np_N2", 0L);
    doc.add(f_precede[2]);
    f_precede[3] = new StoredField("np_N3", 0L);
    doc.add(f_precede[3]);
    Field[] f_followerprecede = new Field[4];
    f_followerprecede[0] = new StoredField("nfp_s", 0L);
    doc.add(f_followerprecede[0]);
    f_followerprecede[1] = new StoredField("nfp_N1", 0L);
    doc.add(f_followerprecede[1]);
    f_followerprecede[2] = new StoredField("nfp_N2", 0L);
    doc.add(f_followerprecede[2]);
    f_followerprecede[3] = new StoredField("nfp_N3", 0L);
    doc.add(f_followerprecede[3]);

    Long[][] N = new Long[][] { { 0L, 0L, 0L, 0L, 0L, 0L } };
    Long[] S = new Long[] { 0L };
    long c = 0;
    while (iter.hasNext()) {
        if (++c % 100000 == 0)
            LOG.info("Adding {}'th ngram.", c);
        String line = iter.next();
        try {
            String[] splits = de.tudarmstadt.lt.utilities.StringUtils.rtrim(line).split("\t");
            String ngram_str = splits[0];
            if (de.tudarmstadt.lt.utilities.StringUtils.trim(ngram_str).isEmpty()) {
                LOG.warn("Ngram is empty, skipping line {}: '{}' (file '{}').", c, line,
                        ngram_joined_counts_file);
                continue;
            }

            List<String> ngram = Arrays.asList(ngram_str.split(" "));
            long num = Long.parseLong(splits[1]);
            int n = ngram.size();

            f_ngram.setStringValue(ngram_str);
            f_n.setIntValue(n);
            f_word.setStringValue(ngram.get(ngram.size() - 1));
            f_hist.setStringValue(StringUtils.join(ngram.subList(0, ngram.size() - 1), " "));
            f_lower.setStringValue(StringUtils.join(ngram.subList(1, ngram.size()), " "));
            f_count.setLongValue(num);

            for (int j = 0; j < f_follow.length; j++) {
                f_follow[j].setLongValue(0L);
                f_precede[j].setLongValue(0L);
                f_followerprecede[j].setLongValue(0L);
            }

            if (splits.length > 2 && !splits[2].isEmpty()) {
                // precede or follow or followerprecede
                String[] splits_ = splits[2].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }
            if (splits.length > 3 && !splits[3].isEmpty()) {
                // should be follow or followerprecede
                String[] splits_ = splits[3].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }
            if (splits.length > 4 && !splits[4].isEmpty()) {
                // should be followerprecede
                String[] splits_ = splits[4].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }

            writer_ngram.addDocument(doc);

            while (N.length <= n) {
                N = ArrayUtils.getConcatinatedArray(N, new Long[][] { { 0L, 0L, 0L, 0L, 0L, 0L } });
                S = ArrayUtils.getConcatinatedArray(S, new Long[] { 0L });
            }

            if (num == 1L)
                N[n][1]++;
            else if (num == 2L)
                N[n][2]++;
            else if (num == 3L)
                N[n][3]++;
            else if (num == 4L)
                N[n][4]++;
            else
                N[n][5]++;
            N[n][0]++;
            S[n] += num;

        } catch (Exception e) {
            LOG.error("Could not process line '{}' in file '{}:{}', malformed line.", line,
                    ngram_joined_counts_file, c, e);
        }
    }

    writer_ngram.forceMergeDeletes();
    writer_ngram.commit();
    writer_ngram.close();

    StringBuilder b = new StringBuilder(String.format(
            "#%n# Number of times where an ngram occurred: %n#  at_least_once, exactly_once, exactly_twice, exactly_three_times, exactly_four_times, five_times_or_more.%n#%nmax_n=%d%nmax_c=6%n",
            N.length - 1));
    for (int n = 1; n < N.length; n++)
        b.append(String.format("n%d=%s%n", n, StringUtils.join(N[n], ',')));
    for (int n = 1; n < S.length; n++)
        b.append(String.format("s%d=%d%n", n, S[n]));
    FileUtils.writeStringToFile(new File(_index_dir, "__sum_ngrams__"), b.toString());

}

From source file:org.apache.solr.legacy.TestLegacyField.java

License:Apache License

public void testLegacyLongField() throws Exception {
    Field fields[] = new Field[] { new LegacyLongField("foo", 5L, Field.Store.NO),
            new LegacyLongField("foo", 5L, Field.Store.YES) };

    for (Field field : fields) {
        trySetByteValue(field);/*from   w ww .  ja v a2 s.co m*/
        trySetBytesValue(field);
        trySetBytesRefValue(field);
        trySetDoubleValue(field);
        trySetIntValue(field);
        trySetFloatValue(field);
        field.setLongValue(6); // ok
        trySetReaderValue(field);
        trySetShortValue(field);
        trySetStringValue(field);
        trySetTokenStreamValue(field);

        assertEquals(6L, field.numericValue().longValue());
    }
}

From source file:org.apache.solr.legacy.TestLegacyField.java

License:Apache License

private void trySetLongValue(Field f) {
    expectThrows(IllegalArgumentException.class, () -> {
        f.setLongValue(Long.MAX_VALUE);
    });//ww w  .  ja  v  a 2s  .c  om
}

From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java

License:Apache License

private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
    Directory dir = newDirectory();//from   w  w  w. j a va 2  s  . co  m
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
    Field idField = new StringField("id", "", Field.Store.NO);
    Field indexedField = newStringField("indexed", "", Field.Store.NO);
    Field dvField = new NumericDocValuesField("dv", 0);

    // index some docs
    int numDocs = atLeast(300);
    // numDocs should be always > 256 so that in case of a codec that optimizes
    // for numbers of values <= 256, all storage layouts are tested
    assert numDocs > 256;
    for (int i = 0; i < numDocs; i++) {
        idField.setStringValue(Integer.toString(i));
        long value = longs.next();
        indexedField.setStringValue(Long.toString(value));
        dvField.setLongValue(value);
        Document doc = new Document();
        doc.add(idField);
        // 1/4 of the time we neglect to add the fields
        if (random().nextInt(4) > 0) {
            doc.add(indexedField);
            doc.add(dvField);
        }
        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }

    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }

    // merge some segments and ensure that at least one of them has more than
    // 256 values
    writer.forceMerge(numDocs / 256);

    writer.close();

    // compare
    DirectoryReader ir = DirectoryReader.open(dir);
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed", null);
        Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv", null);
        assertEquals(expected, actual);
    }
    ir.close();
    dir.close();
}

From source file:org.elasticsearch.common.lucene.uid.VersionsTests.java

License:Apache License

@Test
public void testVersions() throws Exception {
    Directory dir = newDirectory();//  ww w .j a  v  a2s. c  om
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
    DirectoryReader directoryReader = DirectoryReader.open(writer, true);
    MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));

    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    writer.addDocument(doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_SET));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(Versions.NOT_SET));

    doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(1l));

    doc = new Document();
    Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
    Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(2l));

    // test reuse of uid field
    doc = new Document();
    version.setLongValue(3);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);

    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(3l));

    writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
    directoryReader.close();
    writer.close();
    dir.close();
}

From source file:org.elasticsearch.common.lucene.uid.VersionsTests.java

License:Apache License

@Test
public void testMergingOldIndices() throws Exception {
    final IndexWriterConfig iwConf = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
    iwConf.setMergePolicy(new IndexUpgraderMergePolicy(iwConf.getMergePolicy()));
    final Directory dir = newDirectory();
    final IndexWriter iw = new IndexWriter(dir, iwConf);

    // 1st segment, no _version
    Document document = new Document();
    // Add a dummy field (enough to trigger #3237)
    document.add(new StringField("a", "b", Store.NO));
    StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
    document.add(uid);/*w  w w . j av a2s. co  m*/
    iw.addDocument(document);
    uid.setStringValue("2");
    iw.addDocument(document);
    iw.commit();

    // 2nd segment, old layout
    document = new Document();
    UidField uidAndVersion = new UidField("3", 3L);
    document.add(uidAndVersion);
    iw.addDocument(document);
    uidAndVersion.uid = "4";
    uidAndVersion.version = 4L;
    iw.addDocument(document);
    iw.commit();

    // 3rd segment new layout
    document = new Document();
    uid.setStringValue("5");
    Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
    document.add(uid);
    document.add(version);
    iw.addDocument(document);
    uid.setStringValue("6");
    version.setLongValue(6L);
    iw.addDocument(document);
    iw.commit();

    final Map<String, Long> expectedVersions = ImmutableMap.<String, Long>builder().put("1", 0L).put("2", 0L)
            .put("3", 0L).put("4", 4L).put("5", 5L).put("6", 6L).build();

    // Force merge and check versions
    iw.forceMerge(1);
    final AtomicReader ir = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(iw.getDirectory()));
    final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
    assertThat(versions, notNullValue());
    for (int i = 0; i < ir.maxDoc(); ++i) {
        final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
        final long expectedVersion = expectedVersions.get(uidValue);
        assertThat(versions.get(i), equalTo(expectedVersion));
    }

    iw.close();
    assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false));
    ir.close();
    dir.close();
}

From source file:org.elasticsearch.test.unit.common.lucene.uid.VersionsTests.java

License:Apache License

@Test
public void testVersions() throws Exception {
    Directory dir = newDirectory();/*from  ww w  .j  a  va2  s . c  o  m*/
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
    DirectoryReader directoryReader = DirectoryReader.open(writer, true);
    MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));

    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    writer.addDocument(doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_SET));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(Versions.NOT_SET));

    doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(UidFieldMapper.VERSION, 1));
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(1l));

    doc = new Document();
    Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
    Field version = new NumericDocValuesField(UidFieldMapper.VERSION, 2);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(2l));

    // test reuse of uid field
    doc = new Document();
    version.setLongValue(3);
    doc.add(uid);
    doc.add(version);
    writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);

    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version,
            equalTo(3l));

    writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
    directoryReader = reopen(directoryReader);
    assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")),
            equalTo(Versions.NOT_FOUND));
    assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
    directoryReader.close();
    writer.close();
    dir.close();
}