List of usage examples for org.apache.lucene.store IndexInput readByte
public abstract byte readByte() throws IOException;
From source file:com.bah.lucene.BaseDirectoryTestSuite.java
License:Apache License
private void testEof(String name, Directory directory, long length) throws IOException { IndexInput input = directory.openInput(name, IOContext.DEFAULT); try {/*from w w w . j av a2s . co m*/ input.seek(length); input.readByte(); fail("should throw eof"); } catch (IOException e) { } }
From source file:com.bah.lucene.blockcache.BlockDirectoryTest.java
License:Apache License
private void testEof(String name, Directory directory, long length) throws IOException { IndexInput input = directory.openInput(name, IOContext.DEFAULT); input.seek(length);/*from w ww . jav a 2 s .c om*/ try { input.readByte(); fail("should throw eof"); } catch (IOException e) { } }
From source file:com.bah.lucene.blockcache_v2.CacheIndexOutputTest.java
License:Apache License
@Test public void test1() throws IOException { Random random = new Random(seed); RAMDirectory directory = new RAMDirectory(); IndexOutput output = directory.createOutput("test", IOContext.DEFAULT); Cache cache = CacheIndexInputTest.getCache(); CacheIndexOutput indexOutput = new CacheIndexOutput(null, "test", output, cache); indexOutput.writeByte((byte) 1); indexOutput.writeByte((byte) 2); byte[] b = new byte[16000]; random.nextBytes(b);/*from ww w.j a v a2 s.c o m*/ indexOutput.writeBytes(b, 16000); indexOutput.close(); IndexInput input = directory.openInput("test", IOContext.DEFAULT); assertEquals(16002, input.length()); assertEquals(1, input.readByte()); assertEquals(2, input.readByte()); byte[] buf = new byte[16000]; input.readBytes(buf, 0, 16000); input.close(); assertArrayEquals(b, buf); directory.close(); }
From source file:com.github.lucene.store.jdbc.index.AbstractIndexInputOutputITest.java
License:Apache License
private void verifyData() throws IOException { final byte[] test = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; Assert.assertTrue(jdbcDirectory.fileExists("value1")); Assert.assertEquals(36, jdbcDirectory.fileLength("value1")); final IndexInput indexInput = jdbcDirectory.openInput("value1", new IOContext()); Assert.assertEquals(-1, indexInput.readInt()); Assert.assertEquals(10, indexInput.readLong()); Assert.assertEquals(0, indexInput.readInt()); Assert.assertEquals(0, indexInput.readInt()); indexInput.readBytes(test, 0, 8);// www. ja va 2s .co m Assert.assertEquals((byte) 1, test[0]); Assert.assertEquals((byte) 8, test[7]); indexInput.readBytes(test, 0, 5); Assert.assertEquals((byte) 1, test[0]); Assert.assertEquals((byte) 5, test[4]); indexInput.seek(28); Assert.assertEquals((byte) 1, indexInput.readByte()); indexInput.seek(30); Assert.assertEquals((byte) 3, indexInput.readByte()); indexInput.close(); }
From source file:com.liferay.portal.search.lucene.dump.DumpIndexDeletionPolicyTest.java
License:Open Source License
private void _assertContent(String fileName, IndexInput sourceIndexInput, IndexInput targetIndexInput) throws Exception { for (long i = 0; i < sourceIndexInput.length(); i++) { if (sourceIndexInput.readByte() != targetIndexInput.readByte()) { fail(fileName + " has different source and target byte value at " + i); }// w w w.j a v a 2 s . c o m } sourceIndexInput.close(); targetIndexInput.close(); }
From source file:com.lucure.core.codec.ForUtil.java
License:Apache License
/** * Read the next block of data (<code>For</code> format). * * @param in the input to use to read data * @param encoded a buffer that can be used to store encoded data * @param decoded where to write decoded data * @throws IOException If there is a low-level I/O error *//*from w w w . ja va 2s. com*/ void readBlock(IndexInput in, byte[] encoded, int[] decoded) throws IOException { final int numBits = in.readByte(); assert numBits <= 32 : numBits; if (numBits == ALL_VALUES_EQUAL) { final int value = in.readVInt(); Arrays.fill(decoded, 0, BLOCK_SIZE, value); return; } final int encodedSize = encodedSizes[numBits]; in.readBytes(encoded, 0, encodedSize); final PackedInts.Decoder decoder = decoders[numBits]; final int iters = iterations[numBits]; assert iters * decoder.byteValueCount() >= BLOCK_SIZE; decoder.decode(encoded, 0, decoded, 0, iters); }
From source file:com.lucure.core.codec.ForUtil.java
License:Apache License
/** * Skip the next block of data.//from w w w. j a v a 2s .c om * * @param in the input where to read data * @throws IOException If there is a low-level I/O error */ void skipBlock(IndexInput in) throws IOException { final int numBits = in.readByte(); if (numBits == ALL_VALUES_EQUAL) { in.readVInt(); return; } assert numBits > 0 && numBits <= 32 : numBits; final int encodedSize = encodedSizes[numBits]; in.seek(in.getFilePointer() + encodedSize); }
From source file:com.nearinfinity.mele.zookeeper.ZookeeperWrapperDirectory.java
License:Apache License
private IndexInput wrapRef(final String name, final IndexInput indexInput) { final String refPath = ZookeeperIndexDeletionPolicy.createRef(zk, indexRefPath, name); return new IndexInput() { @Override/*from ww w . ja v a2 s . c o m*/ public void close() throws IOException { indexInput.close(); ZookeeperIndexDeletionPolicy.removeRef(zk, refPath); } @Override public long getFilePointer() { return indexInput.getFilePointer(); } @Override public long length() { return indexInput.length(); } @Override public byte readByte() throws IOException { return indexInput.readByte(); } @Override public void readBytes(byte[] b, int offset, int len) throws IOException { indexInput.readBytes(b, offset, len); } @Override public void seek(long pos) throws IOException { indexInput.seek(pos); } @Override public Object clone() { return indexInput.clone(); } }; }
From source file:org.apache.blur.lucene.codec.Blur022SegmentInfoReader.java
License:Apache License
@Override public SegmentInfo read(Directory dir, String segment, IOContext context) throws IOException { final String fileName = IndexFileNames.segmentFileName(segment, "", Blur022SegmentInfoFormat.SI_EXTENSION); final IndexInput input = dir.openInput(fileName, context); boolean success = false; try {/*from ww w . j a v a 2 s . co m*/ CodecUtil.checkHeader(input, Blur022SegmentInfoFormat.CODEC_NAME, Blur022SegmentInfoFormat.VERSION_START, Blur022SegmentInfoFormat.VERSION_CURRENT); final String version = input.readString(); final int docCount = input.readInt(); if (docCount < 0) { throw new CorruptIndexException("invalid docCount: " + docCount + " (resource=" + input + ")"); } final boolean isCompoundFile = input.readByte() == SegmentInfo.YES; final Map<String, String> diagnostics = input.readStringStringMap(); final Map<String, String> attributes = input.readStringStringMap(); final Set<String> files = input.readStringSet(); if (input.getFilePointer() != input.length()) { throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")"); } final SegmentInfo si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics, Collections.unmodifiableMap(attributes)); si.setFiles(files); success = true; return si; } finally { if (!success) { IOUtils.closeWhileHandlingException(input); } else { input.close(); } } }
From source file:org.apache.blur.lucene.codec.DiskDocValuesProducer.java
License:Apache License
private void readFields(IndexInput meta, FieldInfos infos) throws IOException { int fieldNumber = meta.readVInt(); while (fieldNumber != -1) { byte type = meta.readByte(); if (type == DiskDocValuesFormat.NUMERIC) { numerics.put(fieldNumber, readNumericEntry(meta)); } else if (type == DiskDocValuesFormat.BINARY) { BinaryEntry b = readBinaryEntry(meta); binaries.put(fieldNumber, b); } else if (type == DiskDocValuesFormat.SORTED) { // sorted = binary + numeric if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt"); }/* w w w .j a v a 2 s . c om*/ if (meta.readByte() != DiskDocValuesFormat.BINARY) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt"); } BinaryEntry b = readBinaryEntry(meta); binaries.put(fieldNumber, b); if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt"); } if (meta.readByte() != DiskDocValuesFormat.NUMERIC) { throw new CorruptIndexException("sorted entry for field: " + fieldNumber + " is corrupt"); } NumericEntry n = readNumericEntry(meta); ords.put(fieldNumber, n); } else if (type == DiskDocValuesFormat.SORTED_SET) { // sortedset = binary + numeric + ordIndex if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } if (meta.readByte() != DiskDocValuesFormat.BINARY) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } BinaryEntry b = readBinaryEntry(meta); binaries.put(fieldNumber, b); if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } if (meta.readByte() != DiskDocValuesFormat.NUMERIC) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } NumericEntry n1 = readNumericEntry(meta); ords.put(fieldNumber, n1); if (meta.readVInt() != fieldNumber) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } if (meta.readByte() != DiskDocValuesFormat.NUMERIC) { throw new CorruptIndexException("sortedset entry for field: " + fieldNumber + " is corrupt"); } NumericEntry n2 = readNumericEntry(meta); ordIndexes.put(fieldNumber, n2); } else { throw new CorruptIndexException("invalid type: " + type + ", resource=" + meta); } fieldNumber = meta.readVInt(); } }