Example usage for org.apache.hadoop.fs FSDataInputStream read

List of usage examples for org.apache.hadoop.fs FSDataInputStream read

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataInputStream read.

Prototype

@Override
    public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts)
            throws IOException, UnsupportedOperationException 

Source Link

Usage

From source file:FormatStorageBasicTest.java

License:Open Source License

public void testChunkToRecord() {
    try {//  w w w . j  a v  a2 s.  co  m
        String fileName = prefix + "testChunkToRecord";
        Path path = new Path(fileName);
        FileSystem fs = FileSystem.get(new Configuration());
        FSDataOutputStream out = fs.create(path);

        short fieldNum = 3;
        Record record = new Record(fieldNum);

        byte[] lb = new byte[ConstVar.Sizeof_Long];
        long l = 4;
        Util.long2bytes(lb, l);
        FieldValue fieldValue4 = new FieldValue(ConstVar.FieldType_Long, ConstVar.Sizeof_Long, lb, (short) 13);
        record.addValue(fieldValue4);

        byte[] fb = new byte[ConstVar.Sizeof_Float];
        float f = (float) 5.5;
        Util.float2bytes(fb, f);
        FieldValue fieldValue5 = new FieldValue(ConstVar.FieldType_Float, ConstVar.Sizeof_Float, fb,
                (short) 14);
        record.addValue(fieldValue5);

        String str = "hello konten";
        FieldValue fieldValue7 = new FieldValue(ConstVar.FieldType_String, (short) str.length(), str.getBytes(),
                (short) 16);
        record.addValue(fieldValue7);

        DataChunk chunk = new DataChunk(record);

        out.write(chunk.values, 0, (int) chunk.len);

        if (out.getPos() != chunk.len) {
            fail("error pos:" + out.getPos() + "chunk.len:" + chunk.len);
        }
        out.close();

        FSDataInputStream in = fs.open(path);

        FixedBitSet bitSet = new FixedBitSet(fieldNum);
        in.read(bitSet.bytes(), 0, bitSet.size());
        for (int i = 0; i < fieldNum; i++) {
            if (!bitSet.get(i)) {
                fail("should set:" + i);
            }
        }

        byte[] value = new byte[8];
        in.readFully(value);
        long lv = Util.bytes2long(value, 0, 8);
        if (lv != 4) {
            fail("error long value:" + lv);
        }

        value = new byte[4];
        in.readFully(value);
        float fv = Util.bytes2float(value, 0);
        if (fv != 5.5) {
            fail("error float value:" + fv);
        }

        short strLen = in.readShort();
        if (strLen != str.length()) {
            fail("error strLen:" + strLen);
        }
        value = new byte[strLen];
        in.readFully(value);
        String strv = new String(value);
        if (!strv.equals(str)) {
            fail("error strv:" + strv);
        }

        FieldMap fieldMap = new FieldMap();
        fieldMap.addField(new Field(ConstVar.FieldType_Long, 8, (short) 13));
        fieldMap.addField(new Field(ConstVar.FieldType_Float, 4, (short) 14));
        fieldMap.addField(new Field(ConstVar.FieldType_String, 8, (short) 16));

        in.seek(0);
        int valuelen = 1 + 8 + 4 + 2 + 12;
        DataChunk chunk2 = new DataChunk(fieldNum);

        ArrayList<byte[]> arrayList = new ArrayList<byte[]>(64);
        DataInputBuffer inputBuffer = new DataInputBuffer();
        byte[] buf = new byte[valuelen];
        in.read(buf, 0, valuelen);
        inputBuffer.reset(buf, 0, valuelen);
        chunk2.unpersistent(0, valuelen, inputBuffer);
        Record record2 = chunk2.toRecord(fieldMap, true, arrayList);

        bitSet = chunk2.fixedBitSet;
        if (bitSet.length() != (fieldNum / 8 + 1) * 8) {
            fail("bitSet.len:" + bitSet.length());
        }

        for (int i = 0; i < fieldNum; i++) {
            if (!bitSet.get(i)) {
                fail("bitSet should set:" + i);
            }
        }
        record = record2;

        int index = 0;
        byte type = record2.fieldValues().get(index).type;
        int len = record2.fieldValues().get(index).len;
        short idx = record2.fieldValues().get(index).idx;
        value = record2.fieldValues().get(index).value;
        if (len != ConstVar.Sizeof_Long) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_Long) {
            fail("error fieldType:" + type);
        }
        if (idx != 13) {
            fail("error idx:" + idx);
        }
        if (value == null) {
            fail("error value null");
        }

        {
        }
        lv = Util.bytes2long(value, 0, len);
        if (lv != 4) {
            fail("error long value:" + lv);
        }

        index = 1;
        type = record.fieldValues().get(index).type;
        len = record.fieldValues().get(index).len;
        idx = record.fieldValues().get(index).idx;
        value = record.fieldValues().get(index).value;

        if (len != ConstVar.Sizeof_Float) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_Float) {
            fail("error fieldType:" + type);
        }
        if (idx != 14) {
            fail("error idx:" + idx);
        }
        if (value == null) {
            fail("error value null");
        }
        {
        }
        fv = Util.bytes2float(value, 0);
        if (fv != 5.5) {
            fail("error float value:" + fv);
        }

        index = 2;
        type = record.fieldValues().get(index).type;
        len = record.fieldValues().get(index).len;
        idx = record.fieldValues().get(index).idx;
        value = record.fieldValues().get(index).value;

        str = "hello konten";
        if (len != str.length()) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_String) {
            fail("error fieldType:" + type);
        }
        if (idx != 16) {
            fail("error idx:" + idx);
        }
        if (value == null) {
            fail("error value null");
        }
        {
        }
        String sv = new String(value, 0, len);
        if (!str.equals(sv)) {
            fail("error string value:" + sv);
        }

    } catch (Exception e) {
        fail("should not exception:" + e.getMessage());
    }
}

From source file:FormatStorageBasicTest.java

License:Open Source License

public void testChunkToRecordNull() {
    try {//from   w  ww  . j av a  2 s  .com
        String fileName = prefix + "testChunkToRecord2";
        Path path = new Path(fileName);
        FileSystem fs = FileSystem.get(new Configuration());
        FSDataOutputStream out = fs.create(path);

        short fieldNum = 3;
        Record record = new Record(fieldNum);

        byte[] lb = new byte[ConstVar.Sizeof_Long];
        long l = 4;
        Util.long2bytes(lb, l);
        FieldValue fieldValue4 = new FieldValue(ConstVar.FieldType_Long, ConstVar.Sizeof_Long, lb, (short) 13);
        record.addValue(fieldValue4);

        FieldValue fieldValue5 = new FieldValue(ConstVar.FieldType_Float, ConstVar.Sizeof_Float, null,
                (short) 14);
        record.addValue(fieldValue5);

        String str = "hello konten";
        FieldValue fieldValue7 = new FieldValue(ConstVar.FieldType_String, (short) str.length(), str.getBytes(),
                (short) 16);
        record.addValue(fieldValue7);

        DataChunk chunk = new DataChunk(record);

        out.write(chunk.values, 0, (int) chunk.len);

        if (out.getPos() != chunk.len) {
            fail("error pos:" + out.getPos() + "chunk.len:" + chunk.len);
        }
        out.close();

        FSDataInputStream in = fs.open(path);

        FixedBitSet bitSet = new FixedBitSet(fieldNum);
        in.read(bitSet.bytes(), 0, bitSet.size());

        for (int i = 0; i < fieldNum; i++) {
            if (bitSet.get(1)) {
                fail("shoud not set");
            }

            if (!bitSet.get(i) && i != 1) {
                fail("should set:" + i);
            }
        }

        byte[] value = new byte[8];
        in.readFully(value);
        long lv = Util.bytes2long(value, 0, 8);
        if (lv != 4) {
            fail("error long value:" + lv);
        }

        in.readFloat();

        short strLen = in.readShort();
        if (strLen != str.length()) {
            fail("error strLen:" + strLen);
        }
        value = new byte[strLen];
        in.readFully(value);
        String strv = new String(value, 0, strLen);
        if (!strv.equals(str)) {
            fail("error strv:" + strv);
        }

        FieldMap fieldMap = new FieldMap();
        fieldMap.addField(new Field(ConstVar.FieldType_Long, 8, (short) 13));
        fieldMap.addField(new Field(ConstVar.FieldType_Float, 4, (short) 14));
        fieldMap.addField(new Field(ConstVar.FieldType_String, 8, (short) 16));

        in.seek(0);
        int valuelen = 1 + 8 + 4 + 2 + 12;
        DataChunk chunk2 = new DataChunk(fieldNum);

        ArrayList<byte[]> arrayList = new ArrayList<byte[]>(64);

        DataInputBuffer inputBuffer = new DataInputBuffer();
        byte[] buf = new byte[valuelen];
        in.read(buf, 0, valuelen);
        inputBuffer.reset(buf, 0, valuelen);
        chunk2.unpersistent(0, valuelen, inputBuffer);
        Record record2 = chunk2.toRecord(fieldMap, true, arrayList);

        bitSet = chunk2.fixedBitSet;

        for (int i = 0; i < fieldNum; i++) {
            if (bitSet.get(1)) {
                fail("shoud not set");
            }

            if (!bitSet.get(i) && i != 1) {
                fail("should set:" + i);
            }
        }
        record = record2;

        int index = 0;
        byte type = record2.fieldValues().get(index).type;
        int len = record2.fieldValues().get(index).len;
        short idx = record2.fieldValues().get(index).idx;
        value = record2.fieldValues().get(index).value;
        if (len != ConstVar.Sizeof_Long) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_Long) {
            fail("error fieldType:" + type);
        }
        if (idx != 13) {
            fail("error idx:" + idx);
        }
        if (value == null) {
            fail("error value null");
        }
        {
        }
        lv = Util.bytes2long(value, 0, 8);
        if (lv != 4) {
            fail("error long value:" + lv);
        }

        index = 1;
        type = record.fieldValues().get(index).type;
        len = record.fieldValues().get(index).len;
        idx = record.fieldValues().get(index).idx;
        value = record.fieldValues().get(index).value;

        if (len != ConstVar.Sizeof_Float) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_Float) {
            fail("error fieldType:" + type);
        }
        if (idx != 14) {
            fail("error idx:" + idx);
        }
        if (value != null) {
            fail("error value not null");
        }

        index = 2;
        type = record.fieldValues().get(index).type;
        len = record.fieldValues().get(index).len;
        idx = record.fieldValues().get(index).idx;
        value = record.fieldValues().get(index).value;

        str = "hello konten";
        if (len != str.length()) {
            fail("error len:" + len);
        }
        if (type != ConstVar.FieldType_String) {
            fail("error fieldType:" + type);
        }
        if (idx != 16) {
            fail("error idx:" + idx);
        }
        if (value == null) {
            fail("error value null");
        }
        {
        }
        String sv = new String(value, 0, len);
        if (!str.equals(sv)) {
            fail("error string value:" + sv);
        }

    } catch (Exception e) {
        e.printStackTrace();
        fail("should not exception:" + e.getMessage());
    }
}

From source file:HdfsCacheReader.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length < 1) {
        System.err.println("HdfsReader [FileSize i.e. 1g/10g/100g/200g]");
        return 1;
    }//from w  ww  .j  a v  a 2s .c  o m

    double fileSize;
    double fileSizeInMB;
    if (args[0].equals("1g")) {
        fileSize = 1073741824.0;
        fileSizeInMB = 1024.0;
    } else if (args[0].equals("10g")) {
        fileSize = 10737418240.0;
        fileSizeInMB = 10240.0;
    } else if (args[0].equals("100g")) {
        fileSize = 107374182400.0;
        fileSizeInMB = 102400.0;
    } else if (args[0].equals("200g")) {
        fileSize = 214748364800.0;
        fileSizeInMB = 204800.0;
    } else {
        throw new IllegalArgumentException("Invalid arg: " + args[0]);
    }

    String fileName = "cacheRead-" + args[0] + "-avg.txt";
    File avgFile = new File(fileName);
    PrintWriter avgPW = new PrintWriter(avgFile);
    fileName = "cacheRead-" + args[0] + "-min.txt";
    File minFile = new File(fileName);
    PrintWriter minPW = new PrintWriter(minFile);
    fileName = "cacheRead-" + args[0] + "-max.txt";
    File maxFile = new File(fileName);
    PrintWriter maxPW = new PrintWriter(maxFile);

    int numIters = 10;
    int bufferSize = 65536;
    long blockSize[] = new long[] { 67108864, 134217728, 268435456, 536870912, 1073741824 };
    short replication[] = new short[] { 1, 4 };
    String hdfsFile = "/hdfs_test/" + args[0] + "/1.in";
    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);
    Path hdfsFilePath = new Path(hdfsFile);

    for (int i = 0; i < 5; i++) { // blockSize
        for (int j = 0; j < 2; j++) { // replication
            OutputStream os = fs.create(hdfsFilePath, true, bufferSize, replication[j], blockSize[i]);
            byte[] buf = new byte[bufferSize];
            for (int m = 0; m < bufferSize; m += 4) {
                buf[m] = (byte) m;
            }
            double numBufPerFile = fileSize / (double) bufferSize;

            for (double m = 0.0; m < numBufPerFile; m++) {
                os.write(buf);
            }
            os.close();
            String cmdStr = "/usr/local/hadoop/bin/hdfs cacheadmin -addDirective -path " + hdfsFile
                    + " -pool hdfs_test";
            Process p = Runtime.getRuntime().exec(cmdStr);
            p.waitFor();
            String cmdOutLine = "";
            StringBuffer cmdOut = new StringBuffer();
            BufferedReader cmdOutReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
            while ((cmdOutLine = cmdOutReader.readLine()) != null) {
                cmdOut.append(cmdOutLine + "\n");
            }
            // System.out.println (cmdOut.toString());

            long avg = 0, min = Long.MAX_VALUE, max = Long.MIN_VALUE;
            for (int k = 0; k < numIters; k++) {
                FSDataInputStream in = fs.open(hdfsFilePath);
                ByteBuffer bbuf = null;
                ElasticByteBufferPool ebbp = new ElasticByteBufferPool();
                long startTime = System.currentTimeMillis();
                while ((bbuf = in.read(ebbp, bufferSize, EnumSet.of(ReadOption.SKIP_CHECKSUMS))) != null) {
                    in.releaseBuffer(bbuf);
                }
                long endTime = System.currentTimeMillis();
                in.close();
                long duration = (endTime - startTime);
                avg += duration;
                if (duration < min) {
                    min = duration;
                }
                if (duration > max) {
                    max = duration;
                }
            }
            // write result to output
            double avgBW = fileSizeInMB * 1000.0 * (double) numIters / (double) avg;
            avgPW.print(avgBW);
            avgPW.print("\t");
            double minBW = fileSizeInMB * 1000.0 / (double) max;
            minPW.print(minBW);
            minPW.print("\t");
            double maxBW = fileSizeInMB * 1000.0 / (double) min;
            maxPW.print(maxBW);
            maxPW.print("\t");
            cmdStr = "/usr/local/hadoop/bin/hdfs cacheadmin -removeDirectives -path " + hdfsFile;
            p = Runtime.getRuntime().exec(cmdStr);
            p.waitFor();
            cmdOutLine = "";
            cmdOut.setLength(0);
            cmdOutReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
            while ((cmdOutLine = cmdOutReader.readLine()) != null) {
                cmdOut.append(cmdOutLine + "\n");
            }
            // System.out.println (cmdOut.toString());
            fs.delete(hdfsFilePath, true);
        }
        avgPW.println();
        minPW.println();
        maxPW.println();
    }
    avgPW.close();
    minPW.close();
    maxPW.close();
    return 0;
}

From source file:co.cask.tigon.logging.LogFileReader.java

License:Apache License

private long determineTrueFileSize(Path path, FileStatus status) throws IOException {
    FSDataInputStream stream = fileSystem.open(path);
    try {/*from  w  w  w.j a v  a  2  s . c o  m*/
        stream.seek(status.getLen());
        // we need to read repeatedly until we reach the end of the file
        byte[] buffer = new byte[1024 * 1024];
        while (stream.read(buffer, 0, buffer.length) >= 0) {
            // empty body.
        }
        long trueSize = stream.getPos();
        return trueSize;
    } finally {
        stream.close();
    }
}

From source file:com.aliyun.fs.oss.TestAliyunOSSInputStream.java

License:Apache License

@Test
public void testReadFile() throws Exception {
    final int bufLen = 256;
    final int sizeFlag = 5;
    String filename = "readTestFile_" + sizeFlag + ".txt";
    Path readTestFile = setPath("/test/" + filename);
    long size = sizeFlag * 1024 * 1024;

    ContractTestUtils.generateTestFile(this.fs, readTestFile, size, 256, 255);
    LOG.info(sizeFlag + "MB file created: /test/" + filename);

    FSDataInputStream instream = this.fs.open(readTestFile);
    byte[] buf = new byte[bufLen];
    long bytesRead = 0;
    while (bytesRead < size) {
        int bytes;
        if (size - bytesRead < bufLen) {
            int remaining = (int) (size - bytesRead);
            bytes = instream.read(buf, 0, remaining);
        } else {/*from   w  w  w . j a  v  a 2  s. c o  m*/
            bytes = instream.read(buf, 0, bufLen);
        }
        bytesRead += bytes;

        if (bytesRead % (1024 * 1024) == 0) {
            int available = instream.available();
            int remaining = (int) (size - bytesRead);
            assertTrue("expected remaining:" + remaining + ", but got:" + available, remaining == available);
            LOG.info("Bytes read: " + Math.round((double) bytesRead / (1024 * 1024)) + " MB");
        }
    }
    assertTrue(instream.available() == 0);
    IOUtils.closeStream(instream);
}

From source file:com.cloudera.ByteBufferRecordReader.java

License:Apache License

@Override
public synchronized boolean nextKeyValue() throws IOException {
    if (key == null) {
        key = new LongWritable();
    }//w w  w .ja  v  a2 s .  c om
    if (value == null) {
        value = new ByteBufferWritable();
    }
    if (pos >= end) {
        return false;
    }

    int numBytesRead = 0;
    // Use zero-copy ByteBuffer reads if available
    if (inputStream instanceof FSDataInputStream) {
        FSDataInputStream fsIn = (FSDataInputStream) inputStream;
        ByteBuffer buf = fsIn.read(bufferPool, (int) (end - start), readOption);
        numBytesRead += buf.limit();
        pos += buf.limit();
        // Update stats
        InputStream wrappedStream = fsIn.getWrappedStream();
        if (wrappedStream instanceof DFSInputStream) {
            DFSInputStream dfsIn = (DFSInputStream) wrappedStream;
            updateStats(dfsIn.getReadStatistics());
        }
        // Switch out the buffers
        if (value.getBuffer() != null) {
            fsIn.releaseBuffer(value.getBuffer());
        }
        value.setByteBuffer(buf);
    }
    // Fallback to normal byte[] based reads with a copy to the ByteBuffer
    else {
        byte[] b = new byte[(int) (end - start)];
        IOUtils.readFully(inputStream, b);
        numBytesRead += b.length;
        pos += b.length;
        value.setByteBuffer(ByteBuffer.wrap(b));
    }

    return numBytesRead > 0;
}

From source file:com.google.cloud.hadoop.fs.gcs.HadoopFileSystemTestBase.java

License:Open Source License

/**
 * Tests read() when invalid arguments are passed.
 *//*from  w  w  w.  java 2 s. c om*/
@Test
public void testReadInvalidArgs() throws IOException {
    URI path = GoogleCloudStorageFileSystemIntegrationTest.getTempFilePath();
    Path hadoopPath = ghfsHelper.castAsHadoopPath(path);
    ghfsHelper.writeFile(hadoopPath, "file text", 1, true);
    FSDataInputStream readStream = ghfs.open(hadoopPath, GoogleHadoopFileSystemBase.BUFFERSIZE_DEFAULT);
    byte[] buffer = new byte[1];

    // Verify that normal read works.
    int numBytesRead = readStream.read(buffer, 0, buffer.length);
    Assert.assertEquals("Expected exactly 1 byte to be read", 1, numBytesRead);

    // Null buffer.
    testReadInvalidArgsHelper(readStream, null, 0, 1, NullPointerException.class);

    // offset < 0
    testReadInvalidArgsHelper(readStream, buffer, -1, 1, IndexOutOfBoundsException.class);

    // length < 0
    testReadInvalidArgsHelper(readStream, buffer, 0, -1, IndexOutOfBoundsException.class);

    // length > buffer.length - offset
    testReadInvalidArgsHelper(readStream, buffer, 0, 2, IndexOutOfBoundsException.class);
}

From source file:com.google.cloud.hadoop.fs.gcs.HadoopFileSystemTestBase.java

License:Open Source License

private void testReadInvalidArgsHelper(FSDataInputStream readStream, byte[] buffer, int offset, int length,
        Class<? extends Exception> exceptionClass) {
    try {/*ww w.  j a va 2  s.co m*/
        readStream.read(buffer, offset, length);
        Assert.fail("Expected " + exceptionClass.getName());
    } catch (Exception e) {
        if (e.getClass() != exceptionClass) {
            Assert.fail("Unexpected exception: " + e);
        }
    }
}

From source file:com.ibm.crail.hdfs.tools.HdfsIOBenchmark.java

License:Apache License

private int read(FSDataInputStream stream, byte[] buf) throws IOException {
    int off = 0;//from w w  w  . j  av a 2  s  . c  om
    int len = buf.length;
    int ret = stream.read(buf, off, len);
    while (ret > 0 && len - ret > 0) {
        len -= ret;
        off += ret;
        ret = stream.read(buf, off, len);
    }
    return off > 0 || ret > 0 ? ret : -1;
}

From source file:com.linkedin.cubert.block.BlockUtils.java

License:Open Source License

@SuppressWarnings("unchecked")
public static Block loadBlock(BlockProperties props, IndexEntry indexEntry, Configuration conf, JsonNode json,
        BlockSerializationType serializationType, boolean isInMemoryBlock) throws IOException,
        ClassNotFoundException, InstantiationException, IllegalAccessException, InterruptedException {
    Block block;/* w ww  . ja v  a2s  .c  o m*/
    if (indexEntry == null) {
        if (emptyForMissing)
            return new EmptyBlock(props);

        throw new IOException(String.format("Index entry is null"));
    }

    // populate props
    props.setBlockId(indexEntry.getBlockId());
    props.setNumRecords(indexEntry.getNumRecords());

    // Open the file and seek to the offset for this block
    Path file = new Path(indexEntry.getFile());
    FileSystem fs = file.getFileSystem(conf);
    FSDataInputStream fsin = fs.open(file, BLOCK_BUFFER_SIZE);
    fsin.seek(indexEntry.getOffset());

    // Gather information needed to read this block
    Class<Tuple> valueClass = (Class<Tuple>) TupleFactory.getInstance().newTuple().getClass();
    CompressionCodec codec = new CompressionCodecFactory(conf).getCodec(file);

    // Load the block now
    if (isInMemoryBlock) {
        print.f("LOADING IN MEMORY the block %d", indexEntry.getBlockId());

        ByteBuffer byteBuffer = inMemoryBlockCache.get(indexEntry);

        if (byteBuffer == null) {
            int read = 0;
            byte[] data = new byte[(int) indexEntry.getLength()];
            while (read != data.length) {
                read += fsin.read(data, read, data.length - read);
            }
            fsin.close();

            byteBuffer = ByteBuffer.wrap(data);

            inMemoryBlockCache.put(indexEntry, byteBuffer);
        } else {
            print.f("REUSED FROM CACHE!!");
            byteBuffer.rewind();
        }

        block = new RubixMemoryBlock(props, conf, byteBuffer, valueClass, codec, serializationType);
        block.configure(json);
        return block;
    } else {
        print.f("STREAMING the block %d", indexEntry.getBlockId());
        InputStream in = new BlockInputStream(fsin, indexEntry.getLength());

        if (codec != null) {
            in = codec.createInputStream(in);
        }

        block = new CubertBlock(props,
                new BlockIterator<Tuple>(conf, in, valueClass, serializationType, props.getSchema()));
        block.configure(json);

        print.f("Loaded block id=%d from file=%s offset=%d length=%d", indexEntry.getBlockId(), file.toString(),
                indexEntry.getOffset(), indexEntry.getLength());

        return block;
    }
}