Example usage for org.apache.lucene.index FieldInfo getPointNumBytes

List of usage examples for org.apache.lucene.index FieldInfo getPointNumBytes

Introduction

In this page you can find the example usage for org.apache.lucene.index FieldInfo getPointNumBytes.

Prototype

public int getPointNumBytes() 

Source Link

Document

Return number of bytes per dimension

Usage

From source file:com.vmware.xenon.services.common.FieldInfoCache.java

License:Open Source License

public static boolean equals(FieldInfo a, FieldInfo b) {
    return a.number == b.number && a.name.equals(b.name)
            && a.getPointDimensionCount() == b.getPointDimensionCount()
            && a.getPointNumBytes() == b.getPointNumBytes() && a.getDocValuesGen() == b.getDocValuesGen()
            && a.getIndexOptions() == b.getIndexOptions() && a.hasPayloads() == b.hasPayloads()
            && a.hasVectors() == b.hasVectors() && a.omitsNorms() == b.omitsNorms()
            && a.hasNorms() == b.hasNorms();
}

From source file:com.vmware.xenon.services.common.Lucene60FieldInfosFormatWithCache.java

License:Open Source License

@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos,
        IOContext context) throws IOException {
    final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
    try (IndexOutput output = directory.createOutput(fileName, context)) {
        CodecUtil.writeIndexHeader(output, Lucene60FieldInfosFormatWithCache.CODEC_NAME,
                Lucene60FieldInfosFormatWithCache.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
        output.writeVInt(infos.size());//from   w ww  . ja v a 2  s  .  c o  m
        for (FieldInfo fi : infos) {
            fi.checkConsistency();

            output.writeString(fi.name);
            output.writeVInt(fi.number);

            byte bits = 0x0;
            if (fi.hasVectors()) {
                bits |= STORE_TERMVECTOR;
            }
            if (fi.omitsNorms()) {
                bits |= OMIT_NORMS;
            }
            if (fi.hasPayloads()) {
                bits |= STORE_PAYLOADS;
            }
            output.writeByte(bits);

            output.writeByte(indexOptionsByte(fi.getIndexOptions()));

            // pack the DV type and hasNorms in one byte
            output.writeByte(docValuesByte(fi.getDocValuesType()));
            output.writeLong(fi.getDocValuesGen());
            output.writeMapOfStrings(fi.attributes());
            int pointDimensionCount = fi.getPointDimensionCount();
            output.writeVInt(pointDimensionCount);
            if (pointDimensionCount != 0) {
                output.writeVInt(fi.getPointNumBytes());
            }
        }
        CodecUtil.writeFooter(output);
    }
}

From source file:org.apache.solr.uninverting.TestUninvertingReader.java

License:Apache License

public void testFieldInfos() throws IOException {
    Directory dir = newDirectory();/*from  w  ww. ja  v a  2s. c o  m*/
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));

    Document doc = new Document();
    BytesRef idBytes = new BytesRef("id");
    doc.add(new StringField("id", idBytes, Store.YES));
    doc.add(new LegacyIntField("int", 5, Store.YES));
    doc.add(new NumericDocValuesField("dv", 5));
    doc.add(new IntPoint("dint", 5));
    doc.add(new StoredField("stored", 5)); // not indexed
    iw.addDocument(doc);

    iw.forceMerge(1);
    iw.close();

    Map<String, Type> uninvertingMap = new HashMap<>();
    uninvertingMap.put("int", Type.LEGACY_INTEGER);
    uninvertingMap.put("dv", Type.LEGACY_INTEGER);
    uninvertingMap.put("dint", Type.INTEGER_POINT);

    DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), uninvertingMap);
    LeafReader leafReader = ir.leaves().get(0).reader();

    FieldInfo intFInfo = leafReader.getFieldInfos().fieldInfo("int");
    assertEquals(DocValuesType.NUMERIC, intFInfo.getDocValuesType());
    assertEquals(0, intFInfo.getPointDimensionCount());
    assertEquals(0, intFInfo.getPointNumBytes());

    FieldInfo dintFInfo = leafReader.getFieldInfos().fieldInfo("dint");
    assertEquals(DocValuesType.NUMERIC, dintFInfo.getDocValuesType());
    assertEquals(1, dintFInfo.getPointDimensionCount());
    assertEquals(4, dintFInfo.getPointNumBytes());

    FieldInfo dvFInfo = leafReader.getFieldInfos().fieldInfo("dv");
    assertEquals(DocValuesType.NUMERIC, dvFInfo.getDocValuesType());

    FieldInfo storedFInfo = leafReader.getFieldInfos().fieldInfo("stored");
    assertEquals(DocValuesType.NONE, storedFInfo.getDocValuesType());

    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.UninvertingReader.java

License:Apache License

/** 
 * Create a new UninvertingReader with the specified mapping 
 * <p>/*from ww w . jav a 2s.  c  om*/
 * Expert: This should almost never be used. Use {@link #wrap(DirectoryReader, Map)}
 * instead.
 *  
 * @lucene.internal
 */
public UninvertingReader(LeafReader in, Map<String, Type> mapping) {
    super(in);
    this.mapping = mapping;
    ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
    for (FieldInfo fi : in.getFieldInfos()) {
        DocValuesType type = fi.getDocValuesType();
        if (type == DocValuesType.NONE) {
            Type t = mapping.get(fi.name);
            if (t != null) {
                if (t == Type.INTEGER_POINT || t == Type.LONG_POINT || t == Type.FLOAT_POINT
                        || t == Type.DOUBLE_POINT) {
                    // type uses points
                    if (fi.getPointDimensionCount() == 0) {
                        continue;
                    }
                } else {
                    // type uses inverted index
                    if (fi.getIndexOptions() == IndexOptions.NONE) {
                        continue;
                    }
                }
                switch (t) {
                case INTEGER_POINT:
                case LONG_POINT:
                case FLOAT_POINT:
                case DOUBLE_POINT:
                case LEGACY_INTEGER:
                case LEGACY_LONG:
                case LEGACY_FLOAT:
                case LEGACY_DOUBLE:
                    type = DocValuesType.NUMERIC;
                    break;
                case BINARY:
                    type = DocValuesType.BINARY;
                    break;
                case SORTED:
                    type = DocValuesType.SORTED;
                    break;
                case SORTED_SET_BINARY:
                case SORTED_SET_INTEGER:
                case SORTED_SET_FLOAT:
                case SORTED_SET_LONG:
                case SORTED_SET_DOUBLE:
                    type = DocValuesType.SORTED_SET;
                    break;
                default:
                    throw new AssertionError();
                }
            }
        }
        filteredInfos.add(new FieldInfo(fi.name, fi.number, fi.hasVectors(), fi.omitsNorms(), fi.hasPayloads(),
                fi.getIndexOptions(), type, fi.getDocValuesGen(), fi.attributes(), fi.getPointDimensionCount(),
                fi.getPointNumBytes()));
    }
    fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
}

From source file:perf.DiskUsage.java

License:Apache License

static String features(FieldInfo fi) {
    StringBuilder sb = new StringBuilder();
    IndexOptions options = fi.getIndexOptions();
    if (options != IndexOptions.NONE) {
        String words[] = options.toString().split("_");
        sb.append(words[words.length - 1].toLowerCase());
        sb.append(" ");
    }/*from  w w w  .j a v a 2 s .c  o m*/
    if (fi.hasPayloads()) {
        sb.append("payloads ");
    }
    if (fi.hasNorms()) {
        sb.append("norms ");
    }
    if (fi.hasVectors()) {
        sb.append("vectors ");
    }
    if (fi.getPointDimensionCount() != 0) {
        sb.append(fi.getPointNumBytes());
        sb.append("bytes/");
        sb.append(fi.getPointDimensionCount());
        sb.append("D ");
    }
    DocValuesType dvType = fi.getDocValuesType();
    if (dvType != DocValuesType.NONE) {
        sb.append(dvType.toString().toLowerCase());
    }
    return sb.toString().trim();
}