Example usage for org.apache.lucene.codecs CodecUtil writeHeader

List of usage examples for org.apache.lucene.codecs CodecUtil writeHeader

Introduction

In this page you can find the example usage for org.apache.lucene.codecs CodecUtil writeHeader.

Prototype

public static void writeHeader(DataOutput out, String codec, int version) throws IOException 

Source Link

Document

Writes a codec header, which records both a string to identify the file and a version number.

Usage

From source file:com.lucure.core.codec.CompressingStoredFieldsWriter.java

License:Apache License

/** Sole constructor. */
public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, String segmentSuffix,
        IOContext context, String formatName, CompressionMode compressionMode, int chunkSize)
        throws IOException {
    assert directory != null;
    this.directory = directory;
    this.segment = si.name;
    this.segmentSuffix = segmentSuffix;
    this.compressionMode = compressionMode;
    this.compressor = compressionMode.newCompressor();
    this.chunkSize = chunkSize;
    this.docBase = 0;
    this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
    this.numStoredFields = new int[16];
    this.endOffsets = new int[16];
    this.numBufferedDocs = 0;

    boolean success = false;
    IndexOutput indexStream = directory.createOutput(
            IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION), context);
    try {/*from   ww w  .  j a  v  a 2 s .co  m*/
        fieldsStream = directory.createOutput(
                IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);

        final String codecNameIdx = formatName + CODEC_SFX_IDX;
        final String codecNameDat = formatName + CODEC_SFX_DAT;
        CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
        CodecUtil.writeHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
        assert CodecUtil.headerLength(codecNameDat) == fieldsStream.getFilePointer();
        assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();

        indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
        indexStream = null;

        fieldsStream.writeVInt(chunkSize);
        fieldsStream.writeVInt(PackedInts.VERSION_CURRENT);

        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(indexStream);
            abort();
        }
    }
}

From source file:com.lucure.core.codec.LucurePostingsWriter.java

License:Apache License

/** Creates a postings writer with the specified PackedInts overhead ratio */
// TODO: does this ctor even make sense?
public LucurePostingsWriter(SegmentWriteState state, float acceptableOverheadRatio) throws IOException {
    super();/*www  .  j  av a2 s.c  o m*/

    docOut = state.directory.createOutput(IndexFileNames.segmentFileName(state.segmentInfo.name,
            state.segmentSuffix, LucurePostingsFormat.DOC_EXTENSION), state.context);
    IndexOutput posOut = null;
    IndexOutput payOut = null;
    boolean success = false;
    try {
        CodecUtil.writeHeader(docOut, DOC_CODEC, VERSION_CURRENT);
        forUtil = new ForUtil(acceptableOverheadRatio, docOut);
        if (state.fieldInfos.hasProx()) {
            posDeltaBuffer = new int[MAX_DATA_SIZE];
            posOut = state.directory.createOutput(IndexFileNames.segmentFileName(state.segmentInfo.name,
                    state.segmentSuffix, LucurePostingsFormat.POS_EXTENSION), state.context);
            CodecUtil.writeHeader(posOut, POS_CODEC, VERSION_CURRENT);

            if (state.fieldInfos.hasPayloads()) {
                payloadBytes = new byte[128];
                payloadLengthBuffer = new int[MAX_DATA_SIZE];
            } else {
                payloadBytes = null;
                payloadLengthBuffer = null;
            }

            if (state.fieldInfos.hasOffsets()) {
                offsetStartDeltaBuffer = new int[MAX_DATA_SIZE];
                offsetLengthBuffer = new int[MAX_DATA_SIZE];
            } else {
                offsetStartDeltaBuffer = null;
                offsetLengthBuffer = null;
            }

            if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) {
                payOut = state.directory.createOutput(IndexFileNames.segmentFileName(state.segmentInfo.name,
                        state.segmentSuffix, LucurePostingsFormat.PAY_EXTENSION), state.context);
                CodecUtil.writeHeader(payOut, PAY_CODEC, VERSION_CURRENT);
            }
        } else {
            posDeltaBuffer = null;
            payloadLengthBuffer = null;
            offsetStartDeltaBuffer = null;
            offsetLengthBuffer = null;
            payloadBytes = null;
        }
        this.payOut = payOut;
        this.posOut = posOut;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(docOut, posOut, payOut);
        }
    }

    docDeltaBuffer = new int[MAX_DATA_SIZE];
    freqBuffer = new int[MAX_DATA_SIZE];

    // TODO: should we try skipping every 2/4 blocks...?
    skipWriter = new LucureSkipWriter(maxSkipLevels, BLOCK_SIZE, state.segmentInfo.getDocCount(), docOut,
            posOut, payOut);

    encoded = new byte[MAX_ENCODED_SIZE];
}

From source file:com.lucure.core.codec.LucurePostingsWriter.java

License:Apache License

@Override
public void init(IndexOutput termsOut) throws IOException {
    CodecUtil.writeHeader(termsOut, TERMS_CODEC, VERSION_CURRENT);
    termsOut.writeVInt(BLOCK_SIZE);
}

From source file:com.sindicetech.siren.index.codecs.siren10.Siren10PostingsWriter.java

License:Open Source License

@Override
public void init(final IndexOutput termsOut) throws IOException {
    CodecUtil.writeHeader(termsOut, CODEC, VERSION_CURRENT);
    termsOut.writeInt(blockSkipInterval); // write skipInterval
    termsOut.writeInt(maxSkipLevels); // write maxSkipLevels
    termsOut.writeInt(blockSkipMinimum); // write skipMinimum
    termsOut.writeInt(maxBlockSize); // write maxBlockSize
}

From source file:org.apache.blur.lucene.codec.Blur022SegmentInfoWriter.java

License:Apache License

@Override
public void write(Directory dir, SegmentInfo si, FieldInfos fis, IOContext ioContext) throws IOException {
    final String fileName = IndexFileNames.segmentFileName(si.name, "", Blur022SegmentInfoFormat.SI_EXTENSION);
    si.addFile(fileName);//from w  ww .  j a va 2  s.c  om

    final IndexOutput output = dir.createOutput(fileName, ioContext);

    boolean success = false;
    try {
        CodecUtil.writeHeader(output, Blur022SegmentInfoFormat.CODEC_NAME,
                Blur022SegmentInfoFormat.VERSION_CURRENT);
        output.writeString(si.getVersion());
        output.writeInt(si.getDocCount());

        output.writeByte((byte) (si.getUseCompoundFile() ? SegmentInfo.YES : SegmentInfo.NO));
        output.writeStringStringMap(si.getDiagnostics());
        Map<String, String> attributes = si.attributes();
        TreeMap<String, String> newAttributes = new TreeMap<String, String>();
        if (attributes != null) {
            newAttributes.putAll(attributes);
        }
        newAttributes.put(Blur022StoredFieldsFormat.STORED_FIELDS_FORMAT_CHUNK_SIZE,
                Integer.toString(_compressionChunkSize));
        newAttributes.put(Blur022StoredFieldsFormat.STORED_FIELDS_FORMAT_COMPRESSION_MODE, _compressionMode);
        output.writeStringStringMap(newAttributes);
        output.writeStringSet(si.files());

        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(output);
            si.dir.deleteFile(fileName);
        } else {
            output.close();
        }
    }
}

From source file:org.apache.blur.lucene.codec.DiskDocValuesConsumer.java

License:Apache License

public DiskDocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec,
        String metaExtension) throws IOException {
    boolean success = false;
    try {/* w  ww .  j a va  2  s.c  om*/
        String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix,
                dataExtension);
        data = state.directory.createOutput(dataName, state.context);
        CodecUtil.writeHeader(data, dataCodec, DiskDocValuesFormat.VERSION_CURRENT);
        String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix,
                metaExtension);
        meta = state.directory.createOutput(metaName, state.context);
        CodecUtil.writeHeader(meta, metaCodec, DiskDocValuesFormat.VERSION_CURRENT);
        maxDoc = state.segmentInfo.getDocCount();
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(this);
        }
    }
}

From source file:org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.java

License:Apache License

private synchronized void persist() throws IOException {
    String fileName = SNAPSHOTS_PREFIX + nextWriteGen;
    IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT);
    boolean success = false;
    try {/*from  w ww  . j av  a  2s .c o m*/
        CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
        out.writeVInt(nameToDetailsMapping.size());
        for (Entry<String, SnapshotMetaData> ent : nameToDetailsMapping.entrySet()) {
            out.writeString(ent.getKey());
            out.writeString(ent.getValue().getIndexDirPath());
            out.writeVLong(ent.getValue().getGenerationNumber());
        }
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(out);
            IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
        } else {
            IOUtils.close(out);
        }
    }

    dir.sync(Collections.singletonList(fileName));

    if (nextWriteGen > 0) {
        String lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen - 1);
        // exception OK: likely it didn't exist
        IOUtils.deleteFilesIgnoringExceptions(dir, lastSaveFile);
    }

    nextWriteGen++;
}

From source file:org.codelibs.elasticsearch.search.suggest.completion2x.AnalyzingCompletionLookupProvider.java

License:Apache License

@Override
public FieldsConsumer consumer(final IndexOutput output) throws IOException {
    CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION_LATEST);
    return new FieldsConsumer() {
        private Map<String, Long> fieldOffsets = new HashMap<>();

        @Override/*from   ww  w  .j  a v a  2  s.c o  m*/
        public void close() throws IOException {
            try {
                /*
                 * write the offsets per field such that we know where
                 * we need to load the FSTs from
                 */
                long pointer = output.getFilePointer();
                output.writeVInt(fieldOffsets.size());
                for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) {
                    output.writeString(entry.getKey());
                    output.writeVLong(entry.getValue());
                }
                output.writeLong(pointer);
                CodecUtil.writeFooter(output);
            } finally {
                IOUtils.close(output);
            }
        }

        @Override
        public void write(Fields fields) throws IOException {
            for (String field : fields) {
                Terms terms = fields.terms(field);
                if (terms == null) {
                    continue;
                }
                terms.iterator();
                new SuggestPayload();
                throw new UnsupportedOperationException("QueryBuilders does not support this operation.");
                //                    final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(
                //                        maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
                //                    int docCount = 0;
                //                    while (true) {
                //                        BytesRef term = termsEnum.next();
                //                        if (term == null) {
                //                            break;
                //                        }
                //                        docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS);
                //                        builder.startTerm(term);
                //                        int docFreq = 0;
                //                        while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                //                            for (int i = 0; i < docsEnum.freq(); i++) {
                //                                final int position = docsEnum.nextPosition();
                //                                AnalyzingCompletionLookupProvider.this.parsePayload(docsEnum.getPayload(), spare);
                //                                builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight);
                //                                // multi fields have the same surface form so we sum up here
                //                                maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
                //                            }
                //                            docFreq++;
                //                            docCount = Math.max(docCount, docsEnum.docID()+1);
                //                        }
                //                        builder.finishTerm(docFreq);
                //                    }
                //                    /*
                //                     * Here we are done processing the field and we can
                //                     * buid the FST and write it to disk.
                //                     */
                //                    FST<Pair<Long, BytesRef>> build = builder.build();
                //                    assert build != null || docCount == 0: "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
                //                    /*
                //                     * it's possible that the FST is null if we have 2 segments that get merged
                //                     * and all docs that have a value in this field are deleted. This will cause
                //                     * a consumer to be created but it doesn't consume any values causing the FSTBuilder
                //                     * to return null.
                //                     */
                //                    if (build != null) {
                //                        fieldOffsets.put(field, output.getFilePointer());
                //                        build.save(output);
                //                        /* write some more meta-info */
                //                        output.writeVInt(maxAnalyzedPathsForOneInput);
                //                        output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
                //                        output.writeInt(maxGraphExpansions); // can be negative
                //                        int options = 0;
                //                        options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
                //                        options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
                //                        options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
                //                        output.writeVInt(options);
                //                        output.writeVInt(XAnalyzingSuggester.SEP_LABEL);
                //                        output.writeVInt(XAnalyzingSuggester.END_BYTE);
                //                        output.writeVInt(XAnalyzingSuggester.PAYLOAD_SEP);
                //                        output.writeVInt(XAnalyzingSuggester.HOLE_CHARACTER);
                //                    }
            }
        }
    };
}

From source file:org.elasticsearch.common.settings.KeyStoreWrapper.java

License:Apache License

/** Write the keystore to the given config directory. */
void save(Path configDir) throws Exception {
    char[] password = this.keystorePassword.get().getPassword();

    SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
    // write to tmp file first, then overwrite
    String tmpFile = KEYSTORE_FILENAME + ".tmp";
    try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) {
        CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION);
        output.writeByte(password.length == 0 ? (byte) 0 : (byte) 1);
        output.writeString(type);//from   w  w w. j a va2  s.c o  m
        output.writeString(secretFactory.getAlgorithm());

        ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream();
        keystore.get().store(keystoreBytesStream, password);
        byte[] keystoreBytes = keystoreBytesStream.toByteArray();
        output.writeInt(keystoreBytes.length);
        output.writeBytes(keystoreBytes, keystoreBytes.length);
        CodecUtil.writeFooter(output);
    }

    Path keystoreFile = keystorePath(configDir);
    Files.move(configDir.resolve(tmpFile), keystoreFile, StandardCopyOption.REPLACE_EXISTING,
            StandardCopyOption.ATOMIC_MOVE);
    PosixFileAttributeView attrs = Files.getFileAttributeView(keystoreFile, PosixFileAttributeView.class);
    if (attrs != null) {
        // don't rely on umask: ensure the keystore has minimal permissions
        attrs.setPermissions(PosixFilePermissions.fromString("rw-------"));
    }
}

From source file:org.elasticsearch.gateway.local.state.meta.MetaDataStateFormat.java

License:Apache License

/**
 * Writes the given state to the given directories. The state is written to a
 * state directory ({@value #STATE_DIR_NAME}) underneath each of the given file locations and is created if it
 * doesn't exist. The state is serialized to a temporary file in that directory and is then atomically moved to
 * it's target filename of the pattern <tt>{prefix}{version}.st</tt>.
 *
 * @param state the state object to write
 * @param version the version of the state
 * @param locations the locations where the state should be written to.
 * @throws IOException if an IOException occurs
 *//*  ww w  .  j  av  a 2 s .  c o  m*/
public final void write(final T state, final long version, final File... locations) throws IOException {
    Preconditions.checkArgument(locations != null, "Locations must not be null");
    Preconditions.checkArgument(locations.length > 0, "One or more locations required");
    final long maxStateId = findMaxStateId(prefix, locations) + 1;
    assert maxStateId >= 0 : "maxStateId must be positive but was: [" + maxStateId + "]";
    final String fileName = prefix + maxStateId + STATE_FILE_EXTENSION;
    Path stateLocation = Paths.get(locations[0].getPath(), STATE_DIR_NAME);
    Files.createDirectories(stateLocation);
    final Path tmpStatePath = stateLocation.resolve(fileName + ".tmp");
    final Path finalStatePath = stateLocation.resolve(fileName);
    try {
        try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(Files.newOutputStream(tmpStatePath),
                BUFFER_SIZE)) {
            CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
            out.writeInt(format.index());
            out.writeLong(version);
            try (XContentBuilder builder = newXContentBuilder(format,
                    new org.elasticsearch.common.lucene.store.OutputStreamIndexOutput(out) {
                        @Override
                        public void close() throws IOException {
                            // this is important since some of the XContentBuilders write bytes on close.
                            // in order to write the footer we need to prevent closing the actual index input.
                        }
                    })) {

                builder.startObject();
                {
                    toXContent(builder, state);
                }
                builder.endObject();
            }
            CodecUtil.writeFooter(out);
        }
        IOUtils.fsync(tmpStatePath.toFile(), false); // fsync the state file
        Files.move(tmpStatePath, finalStatePath, StandardCopyOption.ATOMIC_MOVE);
        IOUtils.fsync(stateLocation.toFile(), true);
        for (int i = 1; i < locations.length; i++) {
            stateLocation = Paths.get(locations[i].getPath(), STATE_DIR_NAME);
            Files.createDirectories(stateLocation);
            Path tmpPath = stateLocation.resolve(fileName + ".tmp");
            Path finalPath = stateLocation.resolve(fileName);
            try {
                Files.copy(finalStatePath, tmpPath);
                Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE); // we are on the same FileSystem / Partition here we can do an atomic move
                IOUtils.fsync(stateLocation.toFile(), true); // we just fsync the dir here..
            } finally {
                Files.deleteIfExists(tmpPath);
            }
        }
    } finally {
        Files.deleteIfExists(tmpStatePath);
    }
    cleanupOldFiles(prefix, fileName, locations);
}