List of usage examples for org.apache.lucene.store IndexOutput writeVInt
public final void writeVInt(int i) throws IOException
From source file:com.lucure.core.codec.ForUtil.java
License:Apache License
/** * Write a block of data (<code>For</code> format). * * @param data the data to write//www . j a va 2 s. c om * @param encoded a buffer to use to encode data * @param out the destination output * @throws IOException If there is a low-level I/O error */ void writeBlock(int[] data, byte[] encoded, IndexOutput out) throws IOException { if (isAllEqual(data)) { out.writeByte((byte) ALL_VALUES_EQUAL); out.writeVInt(data[0]); return; } final int numBits = bitsRequired(data); assert numBits > 0 && numBits <= 32 : numBits; final PackedInts.Encoder encoder = encoders[numBits]; final int iters = iterations[numBits]; assert iters * encoder.byteValueCount() >= BLOCK_SIZE; final int encodedSize = encodedSizes[numBits]; assert iters * encoder.byteBlockCount() >= encodedSize; out.writeByte((byte) numBits); encoder.encode(data, 0, encoded, 0, iters); out.writeBytes(encoded, encodedSize); }
From source file:com.lucure.core.codec.LucurePostingsWriter.java
License:Apache License
@Override public void init(IndexOutput termsOut) throws IOException { CodecUtil.writeHeader(termsOut, TERMS_CODEC, VERSION_CURRENT); termsOut.writeVInt(BLOCK_SIZE); }
From source file:com.lucure.core.codec.LucureSkipWriter.java
License:Apache License
@Override protected void writeSkipData(int level, IndexOutput skipBuffer) throws IOException { int delta = curDoc - lastSkipDoc[level]; // if (DEBUG) { // System.out.println("writeSkipData level=" + level + " lastDoc=" + curDoc + " delta=" + delta + " curDocPointer=" + curDocPointer); // }//from w w w. ja va2 s. c o m skipBuffer.writeVInt(delta); lastSkipDoc[level] = curDoc; skipBuffer.writeVInt((int) (curDocPointer - lastSkipDocPointer[level])); lastSkipDocPointer[level] = curDocPointer; if (fieldHasPositions) { // if (DEBUG) { // System.out.println(" curPosPointer=" + curPosPointer + " curPosBufferUpto=" + curPosBufferUpto); // } skipBuffer.writeVInt((int) (curPosPointer - lastSkipPosPointer[level])); lastSkipPosPointer[level] = curPosPointer; skipBuffer.writeVInt(curPosBufferUpto); if (fieldHasPayloads) { skipBuffer.writeVInt(curPayloadByteUpto); } if (fieldHasOffsets || fieldHasPayloads) { skipBuffer.writeVInt((int) (curPayPointer - lastSkipPayPointer[level])); lastSkipPayPointer[level] = curPayPointer; } } }
From source file:com.sindicetech.siren.index.codecs.siren10.Siren10SkipListWriter.java
License:Open Source License
@Override protected void writeSkipData(final int level, final IndexOutput skipBuffer) throws IOException { skipBuffer.writeVInt(curDoc - lastSkipDoc[level]); docIndex[level].mark();//ww w . ja v a 2 s . co m docIndex[level].write(skipBuffer, false); lastSkipDoc[level] = curDoc; }
From source file:org.apache.blur.lucene.warmup.IndexTracerResult.java
License:Apache License
private static void writeBoolean(IndexOutput output, boolean b) throws IOException { output.writeVInt(b ? 1 : 0); }
From source file:org.apache.blur.lucene.warmup.IndexWarmup.java
License:Apache License
private void write(List<IndexTracerResult> segmentTraces, IndexOutput output) throws IOException { output.writeVInt(segmentTraces.size()); for (IndexTracerResult r : segmentTraces) { r.write(output);// w w w . j av a 2s.c o m } }
From source file:org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager.java
License:Apache License
private synchronized void persist() throws IOException { String fileName = SNAPSHOTS_PREFIX + nextWriteGen; IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT); boolean success = false; try {//ww w . j a v a 2s.c o m CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT); out.writeVInt(nameToDetailsMapping.size()); for (Entry<String, SnapshotMetaData> ent : nameToDetailsMapping.entrySet()) { out.writeString(ent.getKey()); out.writeString(ent.getValue().getIndexDirPath()); out.writeVLong(ent.getValue().getGenerationNumber()); } success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(out); IOUtils.deleteFilesIgnoringExceptions(dir, fileName); } else { IOUtils.close(out); } } dir.sync(Collections.singletonList(fileName)); if (nextWriteGen > 0) { String lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen - 1); // exception OK: likely it didn't exist IOUtils.deleteFilesIgnoringExceptions(dir, lastSaveFile); } nextWriteGen++; }
From source file:org.codelibs.elasticsearch.search.suggest.completion2x.AnalyzingCompletionLookupProvider.java
License:Apache License
@Override public FieldsConsumer consumer(final IndexOutput output) throws IOException { CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION_LATEST); return new FieldsConsumer() { private Map<String, Long> fieldOffsets = new HashMap<>(); @Override/*from w w w . ja v a 2s .c o m*/ public void close() throws IOException { try { /* * write the offsets per field such that we know where * we need to load the FSTs from */ long pointer = output.getFilePointer(); output.writeVInt(fieldOffsets.size()); for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) { output.writeString(entry.getKey()); output.writeVLong(entry.getValue()); } output.writeLong(pointer); CodecUtil.writeFooter(output); } finally { IOUtils.close(output); } } @Override public void write(Fields fields) throws IOException { for (String field : fields) { Terms terms = fields.terms(field); if (terms == null) { continue; } terms.iterator(); new SuggestPayload(); throw new UnsupportedOperationException("QueryBuilders does not support this operation."); // final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder( // maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP); // int docCount = 0; // while (true) { // BytesRef term = termsEnum.next(); // if (term == null) { // break; // } // docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS); // builder.startTerm(term); // int docFreq = 0; // while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { // for (int i = 0; i < docsEnum.freq(); i++) { // final int position = docsEnum.nextPosition(); // AnalyzingCompletionLookupProvider.this.parsePayload(docsEnum.getPayload(), spare); // builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight); // // multi fields have the same surface form so we sum up here // maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1); // } // docFreq++; // docCount = Math.max(docCount, docsEnum.docID()+1); // } // builder.finishTerm(docFreq); // } // /* // * Here we are done processing the field and we can // * buid the FST and write it to disk. // */ // FST<Pair<Long, BytesRef>> build = builder.build(); // assert build != null || docCount == 0: "the FST is null but docCount is != 0 actual value: [" + docCount + "]"; // /* // * it's possible that the FST is null if we have 2 segments that get merged // * and all docs that have a value in this field are deleted. This will cause // * a consumer to be created but it doesn't consume any values causing the FSTBuilder // * to return null. // */ // if (build != null) { // fieldOffsets.put(field, output.getFilePointer()); // build.save(output); // /* write some more meta-info */ // output.writeVInt(maxAnalyzedPathsForOneInput); // output.writeVInt(maxSurfaceFormsPerAnalyzedForm); // output.writeInt(maxGraphExpansions); // can be negative // int options = 0; // options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0; // options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0; // options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0; // output.writeVInt(options); // output.writeVInt(XAnalyzingSuggester.SEP_LABEL); // output.writeVInt(XAnalyzingSuggester.END_BYTE); // output.writeVInt(XAnalyzingSuggester.PAYLOAD_SEP); // output.writeVInt(XAnalyzingSuggester.HOLE_CHARACTER); // } } } }; }
From source file:org.elasticsearch.common.compress.snappy.SnappyCompressedIndexOutput.java
License:Apache License
@Override protected void writeHeader(IndexOutput out) throws IOException { out.writeBytes(SnappyCompressor.HEADER, SnappyCompressor.HEADER.length); out.writeVInt(context.compressChunkLength()); out.writeVInt(context.compressMaxCompressedChunkLength()); }
From source file:org.elasticsearch.common.compress.snappy.xerial.XerialSnappyCompressedIndexOutput.java
License:Apache License
@Override protected void compress(byte[] data, int offset, int len, IndexOutput out) throws IOException { int compressedLength = Snappy.rawCompress(data, offset, len, compressedBuffer, 0); // use uncompressed input if less than 12.5% compression if (compressedLength >= (len - (len / 8))) { out.writeByte((byte) 0); out.writeVInt(len); out.writeBytes(data, offset, len); } else {/* w w w. java 2 s.c o m*/ out.writeByte((byte) 1); out.writeVInt(compressedLength); out.writeBytes(compressedBuffer, 0, compressedLength); } }