Example usage for org.apache.hadoop.fs FileStatus getBlockSize

List of usage examples for org.apache.hadoop.fs FileStatus getBlockSize

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getBlockSize.

Prototype

public long getBlockSize() 

Source Link

Document

Get the block size of the file.

Usage

From source file:io.hops.erasure_coding.ReedSolomonDecoder.java

License:Apache License

protected int[] buildInputs(FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile,
        boolean fixSource, long errorOffset, FSDataInputStream[] inputs) throws IOException {
    LOG.info("Building inputs to recover block starting at " + errorOffset);
    try {//from   w w  w  .  j  a  va2 s  .c  om
        FileStatus srcStat = fs.getFileStatus(srcFile);
        FileStatus parityStat = fs.getFileStatus(parityFile);
        long blockSize = srcStat.getBlockSize();
        long blockIdx = (int) (errorOffset / blockSize);
        long stripeIdx;
        if (fixSource) {
            stripeIdx = blockIdx / stripeSize;
        } else {
            stripeIdx = blockIdx / paritySize;
        }

        LOG.info("FileSize = " + srcStat.getLen() + ", blockSize = " + blockSize + ", blockIdx = " + blockIdx
                + ", stripeIdx = " + stripeIdx);
        ArrayList<Integer> erasedLocations = new ArrayList<Integer>();
        // First open streams to the parity blocks.
        for (int i = 0; i < paritySize; i++) {
            long offset = blockSize * (stripeIdx * paritySize + i);
            if ((!fixSource) && offset == errorOffset) {
                LOG.info(parityFile + ":" + offset + " is known to have error, adding zeros as input " + i);
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(offset + blockSize));
                erasedLocations.add(i);
            } else if (offset > parityStat.getLen()) {
                LOG.info(parityFile + ":" + offset + " is past file size, adding zeros as input " + i);
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(offset + blockSize));
            } else {
                FSDataInputStream in = parityFs.open(parityFile, conf.getInt("io.file.buffer.size", 64 * 1024));
                in.seek(offset);
                LOG.info("Adding " + parityFile + ":" + offset + " as input " + i);
                inputs[i] = in;
            }
        }
        // Now open streams to the data blocks.
        for (int i = paritySize; i < paritySize + stripeSize; i++) {
            long offset = blockSize * (stripeIdx * stripeSize + i - paritySize);
            if (fixSource && offset == errorOffset) {
                LOG.info(srcFile + ":" + offset + " is known to have error, adding zeros as input " + i);
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(offset + blockSize));
                erasedLocations.add(i);
            } else if (offset > srcStat.getLen()) {
                LOG.info(srcFile + ":" + offset + " is past file size, adding zeros as input " + i);
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(offset + blockSize));
            } else {
                FSDataInputStream in = fs.open(srcFile, conf.getInt("io.file.buffer.size", 64 * 1024));
                in.seek(offset);
                LOG.info("Adding " + srcFile + ":" + offset + " as input " + i);
                inputs[i] = in;
            }
        }
        if (erasedLocations.size() > paritySize) {
            String msg = "Too many erased locations: " + erasedLocations.size();
            LOG.error(msg);
            throw new IOException(msg);
        }
        int[] locs = new int[erasedLocations.size()];
        for (int i = 0; i < locs.length; i++) {
            locs[i] = erasedLocations.get(i);
        }
        return locs;
    } catch (IOException e) {
        RaidUtils.closeStreams(inputs);
        throw e;
    }

}

From source file:io.hops.erasure_coding.StripeReader.java

License:Apache License

/**
 * Builds (codec.stripeLength + codec.parityLength) inputs given some erased
 * locations.//from  w w w  . j  av a2 s  .  co  m
 * Outputs:
 * - the array of input streams @param inputs
 * - the list of erased locations @param erasedLocations.
 * - the list of locations that are not read @param locationsToNotRead.
 */
public InputStream[] buildInputs(FileSystem srcFs, Path srcFile, FileStatus srcStat, FileSystem parityFs,
        Path parityFile, FileStatus parityStat, int stripeIdx, long offsetInBlock,
        List<Integer> erasedLocations, List<Integer> locationsToRead, ErasureCode code) throws IOException {
    InputStream[] inputs = new InputStream[codec.stripeLength + codec.parityLength];
    boolean redo = false;
    do {
        /*
         * In the first iteration locationsToRead is empty.
         * It is populated according to locationsToReadForDecode.
         * In consecutive iterations (if a stream failed to open)
         * the list is cleared and re-populated.
         */
        locationsToRead.clear();
        locationsToRead.addAll(code.locationsToReadForDecode(erasedLocations));

        for (int i = 0; i < inputs.length; i++) {
            boolean isErased = (erasedLocations.indexOf(i) != -1);
            boolean shouldRead = (locationsToRead.indexOf(i) != -1);
            try {
                InputStream stm = null;
                if (isErased || !shouldRead) {
                    if (isErased) {
                        LOG.info("Location " + i + " is erased, using zeros");
                    } else {
                        LOG.info("Location " + i + " need not be read, using zeros");
                    }

                    stm = new RaidUtils.ZeroInputStream(srcStat.getBlockSize()
                            * ((i < codec.parityLength) ? stripeIdx * codec.parityLength + i
                                    : stripeIdx * codec.stripeLength + i - codec.parityLength));
                } else {
                    stm = buildOneInput(i, offsetInBlock, srcFs, srcFile, srcStat, parityFs, parityFile,
                            parityStat);
                }
                inputs[i] = stm;
            } catch (IOException e) {
                if (e instanceof BlockMissingException || e instanceof ChecksumException) {
                    erasedLocations.add(i);
                    redo = true;
                    RaidUtils.closeStreams(inputs);
                    break;
                } else {
                    throw e;
                }
            }
        }
    } while (redo);
    return inputs;
}

From source file:io.hops.erasure_coding.StripeReader.java

License:Apache License

protected InputStream getParityFileInput(int locationIndex, Path parityFile, FileSystem parityFs,
        FileStatus parityStat, long offsetInBlock) throws IOException {
    // Dealing with a parity file here.
    int parityBlockIdx = (int) (codec.parityLength * stripeStartIdx + locationIndex);
    long offset = parityStat.getBlockSize() * parityBlockIdx + offsetInBlock;
    assert (offset < parityStat.getLen());
    LOG.info("Opening " + parityFile + ":" + offset + " for location " + locationIndex);
    FSDataInputStream s = parityFs.open(parityFile, conf.getInt("io.file.buffer.size", 64 * 1024));
    s.seek(offset);//from   w w w .j  av  a 2 s. com
    return s;
}

From source file:io.hops.erasure_coding.TestBlockReconstructor.java

License:Apache License

@Test
public void testSourceBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);//from   www  . j  a  va  2s .  c o m
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);

    LocatedBlocks missingBlocks = new LocatedBlocks(testFileStatus.getLen(), false,
            new ArrayList<LocatedBlock>(), null, true);
    missingBlocks.getLocatedBlocks().add(lb);
    BlockReconstructor blockReconstructor = new BlockReconstructor(conf);
    Decoder decoder = new Decoder(conf, Util.getCodec(Util.Codecs.SRC));
    blockReconstructor.processFile(testFile, testParityFile, missingBlocks, decoder, null);

    // Block is recovered to the same data node so no need to wait for the block report
    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading failed", e);
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestBlockReconstructor.java

License:Apache License

@Test
public void testParityBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);/*from   w ww.j a v a 2  s . c  o m*/
    FileStatus parityFileStatus = dfs.getFileStatus(testParityFile);

    String path = parityFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (parityFileStatus.getLen() / parityFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);

    LocatedBlocks missingBlocks = new LocatedBlocks(parityFileStatus.getLen(), false,
            new ArrayList<LocatedBlock>(), null, true);
    missingBlocks.getLocatedBlocks().add(lb);
    BlockReconstructor blockReconstructor = new BlockReconstructor(conf);
    Decoder decoder = new Decoder(conf, Util.getCodec(Util.Codecs.SRC));
    blockReconstructor.processParityFile(testFile, testParityFile, missingBlocks, decoder, null);

    // Block is recovered to the same data node so no need to wait for the block report
    try {
        FSDataInputStream in = dfs.open(testParityFile);
        byte[] buff = new byte[DFS_TEST_BLOCK_SIZE * codec.parityLength];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading failed", e);
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManager.java

License:Apache License

@Test
public void testSourceRepair() throws IOException, InterruptedException {
    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*from   w ww . j  a v  a2 s .  c o  m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0));

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Losing block " + lb.toString());

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0)
            + 2 * conf.getInt(DFSConfigKeys.RECHECK_INTERVAL_KEY, 0));

    while (true) {
        Thread.sleep(10000);
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        LOG.info("Current status is " + status2.getStatus());
        if (status2.getStatus() == EncodingStatus.Status.ENCODED) {
            break;
        }
    }

    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManager.java

License:Apache License

@Test
public void testParityRepair() throws IOException, InterruptedException {
    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {//from  ww  w .  j a v a  2 s  . com
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0));

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path(conf.get(DFSConfigKeys.PARITY_FOLDER, DFSConfigKeys.DEFAULT_PARITY_FOLDER),
            status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    int blockToLoose = new Random(seed).nextInt((int) (parityStatus.getLen() / parityStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(parityPath.toUri().getPath(), 0, Long.MAX_VALUE)
            .get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Losing block " + lb.toString());

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
        fail("Successfully read parity file which should have been broken.");
    } catch (BlockMissingException e) {
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0)
            + 2 * conf.getInt(DFSConfigKeys.RECHECK_INTERVAL_KEY, 0));

    while (true) {
        Thread.sleep(10000);
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        LOG.info("Current status is " + status2);
        if (status2.getParityStatus() == EncodingStatus.ParityStatus.HEALTHY) {
            break;
        }
    }

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManagerEndless.java

License:Apache License

@Ignore
public void endlessSourceTest() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();

    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*ww  w .ja v a  2s .co  m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Loosing block " + lb.toString());

    EncodingStatus lastStatus = null;
    while (true) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.warn("Was interrupted", e);
        }
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        if (status2.equals(lastStatus) == false) {
            LOG.info("New status is " + status2.getStatus());
            lastStatus = status2;
        }
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManagerEndless.java

License:Apache License

@Ignore
public void endlessParityTest() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();

    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*from   ww  w  . j a v  a2  s.  co  m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    int blockToLoose = new Random(seed).nextInt((int) (parityStatus.getLen() / parityStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(parityPath.toUri().getPath(), 0, Long.MAX_VALUE)
            .get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Loosing block " + lb.toString());

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
        fail("Successfully read parity file which should have been broken.");
    } catch (BlockMissingException e) {
    }

    EncodingStatus lastStatus = null;
    while (true) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.warn("Was interrupted", e);
        }
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        if (status2.equals(lastStatus) == false) {
            LOG.info("New status is " + status2);
            lastStatus = status2;
        }
    }
}

From source file:io.hops.erasure_coding.TestMapReduceBlockRepairManager.java

License:Apache License

@Test
public void testBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);//from   www.  ja va2 s  .c o m

    MapReduceEncodingManager encodingManager = new MapReduceEncodingManager(conf);

    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE);
    Codec.initializeCodecs(conf);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    encodingManager.encodeFile(policy, testFile, parityFile);

    // Busy waiting until the encoding is done
    while (encodingManager.computeReports().size() > 0) {
        ;
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);
    LOG.info("Loosing block " + lb.toString());
    getCluster().triggerBlockReports();

    MapReduceBlockRepairManager repairManager = new MapReduceBlockRepairManager(conf);
    repairManager.repairSourceBlocks("src", testFile, parityFile);

    while (true) {
        List<Report> reports = repairManager.computeReports();
        if (reports.size() == 0) {
            break;
        }
        LOG.info(reports.get(0).getStatus());
        System.out.println("WAIT");
        Thread.sleep(1000);
    }

    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}