com.rocana.lucene.codec.v1.RocanaBlockTreeTermsReader.java Source code

Java tutorial

Introduction

Here is the source code for com.rocana.lucene.codec.v1.RocanaBlockTreeTermsReader.java

Source

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.rocana.lucene.codec.v1;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;

import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.PrefixQuery; // javadocs
import org.apache.lucene.search.TermRangeQuery; // javadocs
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.fst.ByteSequenceOutputs;
import org.apache.lucene.util.fst.Outputs;

/**
 * Fork of Lucene's {@link org.apache.lucene.codecs.blocktree.BlockTreeTermsReader}
 * from Lucene's git repository, tag: releases/lucene-solr/5.5.0
 *
 * Why we forked:
 *   - We can't extend because it has final fields that must be set in the constructor,
 *     and the constructor is the code we need to modify.
 *
 * What changed in the fork?
 *   - Commented out the constructor's call to {@link CodecUtil#checksumEntireFile(IndexInput)}
 *     for performance. The checksum is redundant since we store indexes on HDFS,
 *     which also checksums. This checksum happens at an inopportune time as
 *     we're trying to open a Rocana Search Searcher, which may happen during
 *     a rebalance, and we'd rather eliminate that penalty altogether or at
 *     least delay it until the rebalance finishes.
 *   - Use the other forked classes, like {@link RocanaFieldReader}.
 *   - Removed trailing whitespace.
 *   - Changed these javadocs.
 *   - Renamed class to have 'Rocana' in the name.
 *   - Moved to a different package.
 *
 * To see a full diff of changes in our fork: compare this version to the very first
 * commit in git history. That first commit is the exact file from Lucene with no
 * modifications.
 *
 * @see RocanaSearchCodecV1
 *
 * Original Lucene documentation:
 *  A block-based terms index and dictionary that assigns
 *  terms to variable length blocks according to how they
 *  share prefixes.  The terms index is a prefix trie
 *  whose leaves are term blocks.  The advantage of this
 *  approach is that seekExact is often able to
 *  determine a term cannot exist without doing any IO, and
 *  intersection with Automata is very fast.  Note that this
 *  terms dictionary has its own fixed terms index (ie, it
 *  does not support a pluggable terms index
 *  implementation).
 *
 *  <p><b>NOTE</b>: this terms dictionary supports
 *  min/maxItemsPerBlock during indexing to control how
 *  much memory the terms index uses.</p>
 *
 *  <p>If auto-prefix terms were indexed (see
 *  {@link BlockTreeTermsWriter}), then the {@link Terms#intersect}
 *  implementation here will make use of these terms only if the
 *  automaton has a binary sink state, i.e. an accept state
 *  which has a transition to itself accepting all byte values.
 *  For example, both {@link PrefixQuery} and {@link TermRangeQuery}
 *  pass such automata to {@link Terms#intersect}.</p>
 *
 *  <p>The data structure used by this implementation is very
 *  similar to a burst trie
 *  (http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.3499),
 *  but with added logic to break up too-large blocks of all
 *  terms sharing a given prefix into smaller ones.</p>
 *
 *  <p>Use {@link org.apache.lucene.index.CheckIndex} with the <code>-verbose</code>
 *  option to see summary statistics on the blocks in the
 *  dictionary.
 *
 *  See {@link BlockTreeTermsWriter}.
 *
 * @lucene.experimental
 */

public final class RocanaBlockTreeTermsReader extends FieldsProducer {

    static final Outputs<BytesRef> FST_OUTPUTS = ByteSequenceOutputs.getSingleton();

    static final BytesRef NO_OUTPUT = FST_OUTPUTS.getNoOutput();

    static final int OUTPUT_FLAGS_NUM_BITS = 2;
    static final int OUTPUT_FLAGS_MASK = 0x3;
    static final int OUTPUT_FLAG_IS_FLOOR = 0x1;
    static final int OUTPUT_FLAG_HAS_TERMS = 0x2;

    /** Extension of terms file */
    static final String TERMS_EXTENSION = "tim";
    final static String TERMS_CODEC_NAME = "BlockTreeTermsDict";

    /** Initial terms format. */
    public static final int VERSION_START = 0;

    /** Auto-prefix terms. */
    public static final int VERSION_AUTO_PREFIX_TERMS = 1;

    /** Conditional auto-prefix terms: we record at write time whether
     *  this field did write any auto-prefix terms. */
    public static final int VERSION_AUTO_PREFIX_TERMS_COND = 2;

    /** Current terms format. */
    public static final int VERSION_CURRENT = VERSION_AUTO_PREFIX_TERMS_COND;

    /** Extension of terms index file */
    static final String TERMS_INDEX_EXTENSION = "tip";
    final static String TERMS_INDEX_CODEC_NAME = "BlockTreeTermsIndex";

    // Open input to the main terms dict file (_X.tib)
    final IndexInput termsIn;

    //private static final boolean DEBUG = BlockTreeTermsWriter.DEBUG;

    // Reads the terms dict entries, to gather state to
    // produce DocsEnum on demand
    final PostingsReaderBase postingsReader;

    private final TreeMap<String, RocanaFieldReader> fields = new TreeMap<>();

    /** File offset where the directory starts in the terms file. */
    private long dirOffset;

    /** File offset where the directory starts in the index file. */
    private long indexDirOffset;

    final String segment;

    final int version;

    final boolean anyAutoPrefixTerms;

    /** Sole constructor. */
    public RocanaBlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state)
            throws IOException {
        boolean success = false;
        IndexInput indexIn = null;

        this.postingsReader = postingsReader;
        this.segment = state.segmentInfo.name;

        String termsName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_EXTENSION);
        try {
            termsIn = state.directory.openInput(termsName, state.context);
            version = CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC_NAME, VERSION_START, VERSION_CURRENT,
                    state.segmentInfo.getId(), state.segmentSuffix);

            if (version < VERSION_AUTO_PREFIX_TERMS) {
                // Old (pre-5.2.0) index, no auto-prefix terms:
                this.anyAutoPrefixTerms = false;
            } else if (version == VERSION_AUTO_PREFIX_TERMS) {
                // 5.2.x index, might have auto-prefix terms:
                this.anyAutoPrefixTerms = true;
            } else {
                // 5.3.x index, we record up front if we may have written any auto-prefix terms:
                assert version >= VERSION_AUTO_PREFIX_TERMS_COND;
                byte b = termsIn.readByte();
                if (b == 0) {
                    this.anyAutoPrefixTerms = false;
                } else if (b == 1) {
                    this.anyAutoPrefixTerms = true;
                } else {
                    throw new CorruptIndexException("invalid anyAutoPrefixTerms: expected 0 or 1 but got " + b,
                            termsIn);
                }
            }

            String indexName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_INDEX_EXTENSION);
            indexIn = state.directory.openInput(indexName, state.context);
            CodecUtil.checkIndexHeader(indexIn, TERMS_INDEX_CODEC_NAME, version, version, state.segmentInfo.getId(),
                    state.segmentSuffix);

            // IMPORTANT: comment out this one line to prevent checksumming the entire file.
            //            This is the reason we have a custom Lucene codec and forked Lucene classes.
            //CodecUtil.checksumEntireFile(indexIn);

            // Have PostingsReader init itself
            postingsReader.init(termsIn, state);

            // NOTE: data file is too costly to verify checksum against all the bytes on open,
            // but for now we at least verify proper structure of the checksum footer: which looks
            // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
            // such as file truncation.
            CodecUtil.retrieveChecksum(termsIn);

            // Read per-field details
            seekDir(termsIn, dirOffset);
            seekDir(indexIn, indexDirOffset);

            final int numFields = termsIn.readVInt();
            if (numFields < 0) {
                throw new CorruptIndexException("invalid numFields: " + numFields, termsIn);
            }

            for (int i = 0; i < numFields; ++i) {
                final int field = termsIn.readVInt();
                final long numTerms = termsIn.readVLong();
                if (numTerms <= 0) {
                    throw new CorruptIndexException("Illegal numTerms for field number: " + field, termsIn);
                }
                final int numBytes = termsIn.readVInt();
                if (numBytes < 0) {
                    throw new CorruptIndexException(
                            "invalid rootCode for field number: " + field + ", numBytes=" + numBytes, termsIn);
                }
                final BytesRef rootCode = new BytesRef(new byte[numBytes]);
                termsIn.readBytes(rootCode.bytes, 0, numBytes);
                rootCode.length = numBytes;
                final FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
                if (fieldInfo == null) {
                    throw new CorruptIndexException("invalid field number: " + field, termsIn);
                }
                final long sumTotalTermFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS ? -1
                        : termsIn.readVLong();
                final long sumDocFreq = termsIn.readVLong();
                final int docCount = termsIn.readVInt();
                final int longsSize = termsIn.readVInt();
                if (longsSize < 0) {
                    throw new CorruptIndexException(
                            "invalid longsSize for field: " + fieldInfo.name + ", longsSize=" + longsSize, termsIn);
                }
                BytesRef minTerm = readBytesRef(termsIn);
                BytesRef maxTerm = readBytesRef(termsIn);
                if (docCount < 0 || docCount > state.segmentInfo.maxDoc()) { // #docs with field must be <= #docs
                    throw new CorruptIndexException(
                            "invalid docCount: " + docCount + " maxDoc: " + state.segmentInfo.maxDoc(), termsIn);
                }
                if (sumDocFreq < docCount) { // #postings must be >= #docs with field
                    throw new CorruptIndexException("invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount,
                            termsIn);
                }
                if (sumTotalTermFreq != -1 && sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings
                    throw new CorruptIndexException(
                            "invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq,
                            termsIn);
                }
                final long indexStartFP = indexIn.readVLong();
                RocanaFieldReader previous = fields.put(fieldInfo.name,
                        new RocanaFieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq,
                                docCount, indexStartFP, longsSize, indexIn, minTerm, maxTerm));
                if (previous != null) {
                    throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
                }
            }

            indexIn.close();
            success = true;
        } finally {
            if (!success) {
                // this.close() will close in:
                IOUtils.closeWhileHandlingException(indexIn, this);
            }
        }
    }

    private static BytesRef readBytesRef(IndexInput in) throws IOException {
        BytesRef bytes = new BytesRef();
        bytes.length = in.readVInt();
        bytes.bytes = new byte[bytes.length];
        in.readBytes(bytes.bytes, 0, bytes.length);
        return bytes;
    }

    /** Seek {@code input} to the directory offset. */
    private void seekDir(IndexInput input, long dirOffset) throws IOException {
        input.seek(input.length() - CodecUtil.footerLength() - 8);
        dirOffset = input.readLong();
        input.seek(dirOffset);
    }

    // for debugging
    // private static String toHex(int v) {
    //   return "0x" + Integer.toHexString(v);
    // }

    @Override
    public void close() throws IOException {
        try {
            IOUtils.close(termsIn, postingsReader);
        } finally {
            // Clear so refs to terms index is GCable even if
            // app hangs onto us:
            fields.clear();
        }
    }

    @Override
    public Iterator<String> iterator() {
        return Collections.unmodifiableSet(fields.keySet()).iterator();
    }

    @Override
    public Terms terms(String field) throws IOException {
        assert field != null;
        return fields.get(field);
    }

    @Override
    public int size() {
        return fields.size();
    }

    // for debugging
    String brToString(BytesRef b) {
        if (b == null) {
            return "null";
        } else {
            try {
                return b.utf8ToString() + " " + b;
            } catch (Throwable t) {
                // If BytesRef isn't actually UTF8, or it's eg a
                // prefix of UTF8 that ends mid-unicode-char, we
                // fallback to hex:
                return b.toString();
            }
        }
    }

    @Override
    public long ramBytesUsed() {
        long sizeInBytes = postingsReader.ramBytesUsed();
        for (RocanaFieldReader reader : fields.values()) {
            sizeInBytes += reader.ramBytesUsed();
        }
        return sizeInBytes;
    }

    @Override
    public Collection<Accountable> getChildResources() {
        List<Accountable> resources = new ArrayList<>();
        resources.addAll(Accountables.namedAccountables("field", fields));
        resources.add(Accountables.namedAccountable("delegate", postingsReader));
        return Collections.unmodifiableList(resources);
    }

    @Override
    public void checkIntegrity() throws IOException {
        // term dictionary
        CodecUtil.checksumEntireFile(termsIn);

        // postings
        postingsReader.checkIntegrity();
    }

    @Override
    public String toString() {
        return getClass().getSimpleName() + "(fields=" + fields.size() + ",delegate=" + postingsReader + ")";
    }
}