Example usage for org.apache.hadoop.hdfs.protocol LocatedBlock toString

List of usage examples for org.apache.hadoop.hdfs.protocol LocatedBlock toString

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol LocatedBlock toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:fm.last.hadoop.tools.ReplicationPolicyFixer.java

License:Apache License

private void findMissReplicatedFiles(FileStatus file, Set<Path> missReplicatedFiles) throws IOException {
    Path path = file.getPath();/*from  w  w  w  . j a  va  2 s  .c  o  m*/

    if (file.isDir()) {
        FileStatus[] files = fs.listStatus(path);
        if (files == null) {
            return;
        }
        for (FileStatus subFile : files) {
            findMissReplicatedFiles(subFile, missReplicatedFiles);
        }
        return;
    }

    int pathNameLength = path.toUri().getPath().length();
    String padding = StringUtils.repeat(" ", Math.max(0, lastPathNameLength - pathNameLength));
    lastPathNameLength = pathNameLength;
    out.print(path.toUri().getPath() + padding + "\r");
    out.flush();

    LocatedBlocks blocks = nameNode.getBlockLocations(path.toUri().getPath(), 0, file.getLen());
    if (blocks == null) { // the file is deleted
        return;
    }
    if (blocks.isUnderConstruction()) {
        out.println("\nNot checking open file : " + path.toString());
        return;
    }

    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        if (lBlk.isCorrupt()) {
            out.println("\n" + lBlk.toString() + " is corrupt so skipping file : " + path.toString());
            return;
        }

        Block block = lBlk.getBlock();
        DatanodeInfo[] locs = lBlk.getLocations();
        short targetFileReplication = file.getReplication();
        // verify block placement policy
        int missingRacks = verifyBlockPlacement(lBlk, targetFileReplication, cluster);
        if (missingRacks > 0 && locs.length > 0) {
            out.println("\nReplica placement policy is violated for " + block.toString() + " of file "
                    + path.toString() + ". Block should be additionally replicated on " + missingRacks
                    + " more rack(s).");
            missReplicatedFiles.add(path);
        }
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManager.java

License:Apache License

@Test
public void testSourceRepair() throws IOException, InterruptedException {
    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*w w  w .  j a  v  a  2s .com*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0));

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Losing block " + lb.toString());

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0)
            + 2 * conf.getInt(DFSConfigKeys.RECHECK_INTERVAL_KEY, 0));

    while (true) {
        Thread.sleep(10000);
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        LOG.info("Current status is " + status2.getStatus());
        if (status2.getStatus() == EncodingStatus.Status.ENCODED) {
            break;
        }
    }

    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManager.java

License:Apache License

@Test
public void testParityRepair() throws IOException, InterruptedException {
    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {//from w  w w.  j a va2 s .  co m
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0));

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path(conf.get(DFSConfigKeys.PARITY_FOLDER, DFSConfigKeys.DEFAULT_PARITY_FOLDER),
            status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    int blockToLoose = new Random(seed).nextInt((int) (parityStatus.getLen() / parityStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(parityPath.toUri().getPath(), 0, Long.MAX_VALUE)
            .get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Losing block " + lb.toString());

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
        fail("Successfully read parity file which should have been broken.");
    } catch (BlockMissingException e) {
    }

    Thread.sleep(2 * conf.getInt("dfs.blockreport.intervalMsec", 0)
            + 2 * conf.getInt(DFSConfigKeys.RECHECK_INTERVAL_KEY, 0));

    while (true) {
        Thread.sleep(10000);
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        LOG.info("Current status is " + status2);
        if (status2.getParityStatus() == EncodingStatus.ParityStatus.HEALTHY) {
            break;
        }
    }

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManagerEndless.java

License:Apache License

@Ignore
public void endlessSourceTest() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();

    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*from  w ww  .  j  a va 2 s .c  o m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Loosing block " + lb.toString());

    EncodingStatus lastStatus = null;
    while (true) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.warn("Was interrupted", e);
        }
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        if (status2.equals(lastStatus) == false) {
            LOG.info("New status is " + status2.getStatus());
            lastStatus = status2;
        }
    }
}

From source file:io.hops.erasure_coding.TestErasureCodingManagerEndless.java

License:Apache License

@Ignore
public void endlessParityTest() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();

    Codec.initializeCodecs(getConfig());
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE, policy);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    while (!dfs.getEncodingStatus(testFile.toUri().getPath()).isEncoded()) {
        try {/*from  w ww .j a  v a 2s  .c om*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.error("Wait for encoding thread was interrupted.");
        }
    }

    EncodingStatus status = dfs.getEncodingStatus(testFile.toUri().getPath());
    Path parityPath = new Path("/parity/" + status.getParityFileName());
    FileStatus parityStatus = dfs.getFileStatus(parityPath);
    assertEquals(parityStatus.getLen(), TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE);
    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading parity failed", e);
        fail("Parity could not be read.");
    }

    int blockToLoose = new Random(seed).nextInt((int) (parityStatus.getLen() / parityStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(parityPath.toUri().getPath(), 0, Long.MAX_VALUE)
            .get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    LOG.info("Loosing block " + lb.toString());

    try {
        FSDataInputStream in = dfs.open(parityPath);
        byte[] buff = new byte[TEST_STRIPE_COUNT * TEST_PARITY_LENGTH * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
        fail("Successfully read parity file which should have been broken.");
    } catch (BlockMissingException e) {
    }

    EncodingStatus lastStatus = null;
    while (true) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.warn("Was interrupted", e);
        }
        EncodingStatus status2 = dfs.getEncodingStatus(testFile.toUri().getPath());
        if (status2.equals(lastStatus) == false) {
            LOG.info("New status is " + status2);
            lastStatus = status2;
        }
    }
}

From source file:io.hops.erasure_coding.TestMapReduceBlockRepairManager.java

License:Apache License

@Test
public void testBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);/*from   www . j a  v a 2s  .  com*/

    MapReduceEncodingManager encodingManager = new MapReduceEncodingManager(conf);

    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE);
    Codec.initializeCodecs(conf);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    encodingManager.encodeFile(policy, testFile, parityFile);

    // Busy waiting until the encoding is done
    while (encodingManager.computeReports().size() > 0) {
        ;
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);
    LOG.info("Loosing block " + lb.toString());
    getCluster().triggerBlockReports();

    MapReduceBlockRepairManager repairManager = new MapReduceBlockRepairManager(conf);
    repairManager.repairSourceBlocks("src", testFile, parityFile);

    while (true) {
        List<Report> reports = repairManager.computeReports();
        if (reports.size() == 0) {
            break;
        }
        LOG.info(reports.get(0).getStatus());
        System.out.println("WAIT");
        Thread.sleep(1000);
    }

    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestMapReduceBlockRepairManager.java

License:Apache License

@Test
public void testCorruptedRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);//  w w  w.j av  a2  s .c o m

    MapReduceEncodingManager encodingManager = new MapReduceEncodingManager(conf);

    Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE);
    Codec.initializeCodecs(conf);
    FileStatus testFileStatus = dfs.getFileStatus(testFile);
    EncodingPolicy policy = new EncodingPolicy("src", (short) 1);
    encodingManager.encodeFile(policy, testFile, parityFile);

    // Busy waiting until the encoding is done
    while (encodingManager.computeReports().size() > 0) {
        ;
    }

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);
    LOG.info("Loosing block " + lb.toString());
    getCluster().triggerBlockReports();

    dfs.getClient().addBlockChecksum(testFile.toUri().getPath(),
            (int) (lb.getStartOffset() / lb.getBlockSize()), 0);

    MapReduceBlockRepairManager repairManager = new MapReduceBlockRepairManager(conf);
    repairManager.repairSourceBlocks("src", testFile, parityFile);

    while (true) {
        List<Report> reports = repairManager.computeReports();
        if (reports.size() == 0) {
            break;
        }
        LOG.info(reports.get(0).getStatus());
        System.out.println("WAIT");
        Thread.sleep(1000);
    }

    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
        fail("Repair succeeded with bogus checksum.");
    } catch (BlockMissingException e) {
    }
}