Example usage for org.apache.lucene.codecs.compressing CompressingStoredFieldsReader getChunkSize

List of usage examples for org.apache.lucene.codecs.compressing CompressingStoredFieldsReader getChunkSize

Introduction

In this page you can find the example usage for org.apache.lucene.codecs.compressing CompressingStoredFieldsReader getChunkSize.

Prototype

int getChunkSize() 

Source Link

Usage

From source file:com.lucure.core.codec.CompressingStoredFieldsWriter.java

License:Apache License

@Override
public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;/*from w w w.j  av  a 2 s  .  c om*/

    AccessFilteredDocsAndPositionsEnum.enableMergeAuthorizations();

    for (AtomicReader reader : mergeState.readers) {
        final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
        CompressingStoredFieldsReader matchingFieldsReader = null;
        if (matchingSegmentReader != null) {
            final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
            // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
            if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
                matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
            }
        }

        final int maxDoc = reader.maxDoc();
        final Bits liveDocs = reader.getLiveDocs();

        if (matchingFieldsReader == null || matchingFieldsReader.getVersion() != VERSION_CURRENT // means reader version is not the same as the writer version
                || matchingFieldsReader.getCompressionMode() != compressionMode
                || matchingFieldsReader.getChunkSize() != chunkSize) { // the way data is decompressed depends on the chunk size
            // naive merge...
            for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs,
                    maxDoc)) {
                Document doc = reader.document(i);
                addDocument(doc, mergeState.fieldInfos);
                ++docCount;
                mergeState.checkAbort.work(300);
            }
        } else {
            int docID = nextLiveDoc(0, liveDocs, maxDoc);
            if (docID < maxDoc) {
                // not all docs were deleted
                final CompressingStoredFieldsReader.ChunkIterator it = matchingFieldsReader
                        .chunkIterator(docID);
                int[] startOffsets = new int[0];
                do {
                    // go to the next chunk that contains docID
                    it.next(docID);
                    // transform lengths into offsets
                    if (startOffsets.length < it.chunkDocs) {
                        startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
                    }
                    for (int i = 1; i < it.chunkDocs; ++i) {
                        startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
                    }

                    // decompress
                    it.decompress();
                    if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                        throw new CorruptIndexException(
                                "Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1]
                                        + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
                    }
                    // copy non-deleted docs
                    for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs,
                            maxDoc)) {
                        final int diff = docID - it.docBase;
                        startDocument();
                        bufferedDocs.writeBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff],
                                it.lengths[diff]);
                        numStoredFieldsInDoc = it.numStoredFields[diff];
                        finishDocument();
                        ++docCount;
                        mergeState.checkAbort.work(300);
                    }
                } while (docID < maxDoc);

                it.checkIntegrity();
            }
        }
    }

    AccessFilteredDocsAndPositionsEnum.disableMergeAuthorizations();

    finish(mergeState.fieldInfos, docCount);
    return docCount;
}