Example usage for org.apache.lucene.index LeafReader getBinaryDocValues

List of usage examples for org.apache.lucene.index LeafReader getBinaryDocValues

Introduction

In this page you can find the example usage for org.apache.lucene.index LeafReader getBinaryDocValues.

Prototype

public abstract BinaryDocValues getBinaryDocValues(String field) throws IOException;

Source Link

Document

Returns BinaryDocValues for this field, or null if no binary doc values were indexed for this field.

Usage

From source file:com.b2international.index.lucene.DocValuesStringIndexField.java

License:Apache License

@Override
public BinaryDocValues getDocValues(LeafReader reader) throws IOException {
    return reader.getBinaryDocValues(fieldName());
}

From source file:com.qwazr.search.field.BinaryDocValuesType.java

License:Apache License

@Override
public final ValueConverter getConverter(final LeafReader reader) throws IOException {
    BinaryDocValues binaryDocValue = reader.getBinaryDocValues(fieldName);
    if (binaryDocValue == null)
        return super.getConverter(reader);
    return new ValueConverter.BinaryDVConverter(binaryDocValue);
}

From source file:com.qwazr.search.field.ValueConverter.java

License:Apache License

final static ValueConverter newConverter(FieldDefinition fieldDef, LeafReader dvReader, FieldInfo fieldInfo)
        throws IOException {
    if (fieldInfo == null)
        return null;
    DocValuesType type = fieldInfo.getDocValuesType();
    if (type == null)
        return null;
    switch (type) {
    case BINARY://  ww w  . j  a  v  a  2s .c om
        BinaryDocValues binaryDocValue = dvReader.getBinaryDocValues(fieldInfo.name);
        if (binaryDocValue == null)
            return null;
        return new BinaryDVConverter(binaryDocValue);
    case SORTED:
        SortedDocValues sortedDocValues = dvReader.getSortedDocValues(fieldInfo.name);
        if (sortedDocValues == null)
            return null;
        return new SortedDVConverter(sortedDocValues);
    case NONE:
        break;
    case NUMERIC:
        NumericDocValues numericDocValues = dvReader.getNumericDocValues(fieldInfo.name);
        if (numericDocValues == null)
            return null;
        return newNumericConverter(fieldDef, numericDocValues);
    case SORTED_NUMERIC:
        SortedNumericDocValues sortedNumericDocValues = dvReader.getSortedNumericDocValues(fieldInfo.name);
        if (sortedNumericDocValues == null)
            return null;
        return newSortedNumericConverter(fieldDef, sortedNumericDocValues);
    case SORTED_SET:
        SortedSetDocValues sortedSetDocValues = dvReader.getSortedSetDocValues(fieldInfo.name);
        if (sortedSetDocValues == null)
            return null;
        return null;
    default:
        throw new IOException("Unsupported doc value type: " + type + " for field: " + fieldInfo.name);
    }
    return null;
}

From source file:org.apache.solr.search.SolrDocumentFetcher.java

License:Apache License

/**
 * This will fetch and add the docValues fields to a given SolrDocument/SolrInputDocument
 *
 * @param doc/*from  w ww .  j av a 2 s.com*/
 *          A SolrDocument or SolrInputDocument instance where docValues will be added
 * @param docid
 *          The lucene docid of the document to be populated
 * @param fields
 *          The list of docValues fields to be decorated
 */
public void decorateDocValueFields(@SuppressWarnings("rawtypes") SolrDocumentBase doc, int docid,
        Set<String> fields) throws IOException {
    final List<LeafReaderContext> leafContexts = searcher.getLeafContexts();
    final int subIndex = ReaderUtil.subIndex(docid, leafContexts);
    final int localId = docid - leafContexts.get(subIndex).docBase;
    final LeafReader leafReader = leafContexts.get(subIndex).reader();
    for (String fieldName : fields) {
        final SchemaField schemaField = searcher.getSchema().getFieldOrNull(fieldName);
        if (schemaField == null || !schemaField.hasDocValues() || doc.containsKey(fieldName)) {
            log.warn("Couldn't decorate docValues for field: [{}], schemaField: [{}]", fieldName, schemaField);
            continue;
        }
        FieldInfo fi = searcher.getFieldInfos().fieldInfo(fieldName);
        if (fi == null) {
            continue; // Searcher doesn't have info about this field, hence ignore it.
        }
        final DocValuesType dvType = fi.getDocValuesType();
        switch (dvType) {
        case NUMERIC:
            final NumericDocValues ndv = leafReader.getNumericDocValues(fieldName);
            if (ndv == null) {
                continue;
            }
            Long val;
            if (ndv.advanceExact(localId)) {
                val = ndv.longValue();
            } else {
                continue;
            }
            Object newVal = val;
            if (schemaField.getType().isPointField()) {
                // TODO: Maybe merge PointField with TrieFields here
                NumberType type = schemaField.getType().getNumberType();
                switch (type) {
                case INTEGER:
                    newVal = val.intValue();
                    break;
                case LONG:
                    newVal = val.longValue();
                    break;
                case FLOAT:
                    newVal = Float.intBitsToFloat(val.intValue());
                    break;
                case DOUBLE:
                    newVal = Double.longBitsToDouble(val);
                    break;
                case DATE:
                    newVal = new Date(val);
                    break;
                default:
                    throw new AssertionError("Unexpected PointType: " + type);
                }
            } else {
                if (schemaField.getType() instanceof TrieIntField) {
                    newVal = val.intValue();
                } else if (schemaField.getType() instanceof TrieFloatField) {
                    newVal = Float.intBitsToFloat(val.intValue());
                } else if (schemaField.getType() instanceof TrieDoubleField) {
                    newVal = Double.longBitsToDouble(val);
                } else if (schemaField.getType() instanceof TrieDateField) {
                    newVal = new Date(val);
                } else if (schemaField.getType() instanceof EnumField) {
                    newVal = ((EnumField) schemaField.getType()).intValueToStringValue(val.intValue());
                }
            }
            doc.addField(fieldName, newVal);
            break;
        case BINARY:
            BinaryDocValues bdv = leafReader.getBinaryDocValues(fieldName);
            if (bdv == null) {
                continue;
            }
            BytesRef value;
            if (bdv.advanceExact(localId)) {
                value = BytesRef.deepCopyOf(bdv.binaryValue());
            } else {
                continue;
            }
            doc.addField(fieldName, value);
            break;
        case SORTED:
            SortedDocValues sdv = leafReader.getSortedDocValues(fieldName);
            if (sdv == null) {
                continue;
            }
            if (sdv.advanceExact(localId)) {
                final BytesRef bRef = sdv.binaryValue();
                // Special handling for Boolean fields since they're stored as 'T' and 'F'.
                if (schemaField.getType() instanceof BoolField) {
                    doc.addField(fieldName, schemaField.getType().toObject(schemaField, bRef));
                } else {
                    doc.addField(fieldName, bRef.utf8ToString());
                }
            }
            break;
        case SORTED_NUMERIC:
            final SortedNumericDocValues numericDv = leafReader.getSortedNumericDocValues(fieldName);
            NumberType type = schemaField.getType().getNumberType();
            if (numericDv != null) {
                if (numericDv.advance(localId) == localId) {
                    final List<Object> outValues = new ArrayList<Object>(numericDv.docValueCount());
                    for (int i = 0; i < numericDv.docValueCount(); i++) {
                        long number = numericDv.nextValue();
                        switch (type) {
                        case INTEGER:
                            outValues.add((int) number);
                            break;
                        case LONG:
                            outValues.add(number);
                            break;
                        case FLOAT:
                            outValues.add(NumericUtils.sortableIntToFloat((int) number));
                            break;
                        case DOUBLE:
                            outValues.add(NumericUtils.sortableLongToDouble(number));
                            break;
                        case DATE:
                            outValues.add(new Date(number));
                            break;
                        default:
                            throw new AssertionError("Unexpected PointType: " + type);
                        }
                    }
                    assert outValues.size() > 0;
                    doc.addField(fieldName, outValues);
                }
            }
        case SORTED_SET:
            final SortedSetDocValues values = leafReader.getSortedSetDocValues(fieldName);
            if (values != null && values.getValueCount() > 0) {
                if (values.advance(localId) == localId) {
                    final List<Object> outValues = new LinkedList<>();
                    for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values
                            .nextOrd()) {
                        value = values.lookupOrd(ord);
                        outValues.add(schemaField.getType().toObject(schemaField, value));
                    }
                    assert outValues.size() > 0;
                    doc.addField(fieldName, outValues);
                }
            }
        case NONE:
            break;
        }
    }
}

From source file:org.apache.solr.uninverting.FieldCacheImpl.java

License:Apache License

public BinaryDocValues getTerms(LeafReader reader, String field, float acceptableOverheadRatio)
        throws IOException {
    BinaryDocValues valuesIn = reader.getBinaryDocValues(field);
    if (valuesIn == null) {
        valuesIn = reader.getSortedDocValues(field);
    }/* w  w w  . j  ava2 s. c o m*/

    if (valuesIn != null) {
        // Not cached here by FieldCacheImpl (cached instead
        // per-thread by SegmentReader):
        return valuesIn;
    }

    final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
    if (info == null) {
        return DocValues.emptyBinary();
    } else if (info.getDocValuesType() != DocValuesType.NONE) {
        throw new IllegalStateException(
                "Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
    } else if (info.getIndexOptions() == IndexOptions.NONE) {
        return DocValues.emptyBinary();
    }

    BinaryDocValuesImpl impl = (BinaryDocValuesImpl) caches.get(BinaryDocValues.class).get(reader,
            new CacheKey(field, acceptableOverheadRatio));
    return impl.iterator();
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCache.java

License:Apache License

QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException {
    Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated();
    MapperService mapperService = indexShard.mapperService();
    LeafReader leafReader = context.reader();
    ShardId shardId = ShardUtils.extractShardId(leafReader);
    if (shardId == null) {
        throw new IllegalStateException("can't resolve shard id");
    }/*from  w  w w  . ja  v a2 s .  co  m*/
    if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
        // percolator cache insanity
        String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index "
                + indexSettings.getIndex();
        throw new IllegalStateException(message);
    }

    IntObjectHashMap<Query> queries = new IntObjectHashMap<>();
    boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1);
    if (legacyLoading) {
        PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME),
                PostingsEnum.NONE);
        if (postings != null) {
            LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
            for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings
                    .nextDoc()) {
                leafReader.document(docId, visitor);
                queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source));
                visitor.source = null; // reset
            }
        }
    } else {
        // Each type can have one percolator field mapper,
        // So for each type we check if there is a percolator field mapper
        // and parse all the queries for the documents of that type.
        IndexSearcher indexSearcher = new IndexSearcher(leafReader);
        for (DocumentMapper documentMapper : mapperService.docMappers(false)) {
            Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false);
            for (FieldMapper fieldMapper : documentMapper.mappers()) {
                if (fieldMapper instanceof PercolatorFieldMapper) {
                    PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
                    BinaryDocValues binaryDocValues = leafReader
                            .getBinaryDocValues(fieldType.getQueryBuilderFieldName());
                    if (binaryDocValues != null) {
                        // use the same leaf reader context the indexSearcher is using too:
                        Scorer scorer = queryWeight.scorer(leafReader.getContext());
                        if (scorer != null) {
                            DocIdSetIterator iterator = scorer.iterator();
                            for (int docId = iterator
                                    .nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator
                                            .nextDoc()) {
                                BytesRef qbSource = binaryDocValues.get(docId);
                                if (qbSource.length > 0) {
                                    queries.put(docId, parseQueryBuilder(docId, qbSource));
                                }
                            }
                        }
                    }
                    break;
                }
            }
        }
    }
    leafReader.addCoreClosedListener(this);
    return new QueriesLeaf(shardId, queries);
}

From source file:org.elasticsearch.percolator.PercolateQueryBuilder.java

License:Apache License

private static PercolateQuery.QueryStore createStore(PercolatorFieldMapper.FieldType fieldType,
        QueryShardContext context, boolean mapUnmappedFieldsAsString) {
    return ctx -> {
        LeafReader leafReader = ctx.reader();
        BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.queryBuilderField.name());
        if (binaryDocValues == null) {
            return docId -> null;
        }//from  www  . j ava  2s.c  om

        Bits bits = leafReader.getDocsWithField(fieldType.queryBuilderField.name());
        return docId -> {
            if (bits.get(docId)) {
                BytesRef qbSource = binaryDocValues.get(docId);
                if (qbSource.length > 0) {
                    XContent xContent = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent();
                    try (XContentParser sourceParser = xContent.createParser(qbSource.bytes, qbSource.offset,
                            qbSource.length)) {
                        return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
                    }
                } else {
                    return null;
                }
            } else {
                return null;
            }
        };
    };
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test filtering two binary dv fields/*from  w ww. j  a  v  a 2s.c  o m*/
 */
public void testBinaryDocValues() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new BinaryDocValuesField("fieldA", new BytesRef("testA")));
    doc.add(new BinaryDocValuesField("fieldB", new BytesRef("testB")));
    iw.addDocument(doc);

    // open reader
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw),
            new CharacterRunAutomaton(Automata.makeString("fieldA")));

    // see only one field
    LeafReader segmentReader = ir.leaves().get(0).reader();
    BinaryDocValues values = segmentReader.getBinaryDocValues("fieldA");
    assertNotNull(values);
    assertTrue(values.advanceExact(0));
    assertEquals(new BytesRef("testA"), values.binaryValue());
    assertNull(segmentReader.getBinaryDocValues("fieldB"));

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}