Example usage for org.apache.hadoop.hdfs.protocol LocatedBlocks getLocatedBlocks

List of usage examples for org.apache.hadoop.hdfs.protocol LocatedBlocks getLocatedBlocks

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol LocatedBlocks getLocatedBlocks.

Prototype

public List<LocatedBlock> getLocatedBlocks() 

Source Link

Document

Get located blocks.

Usage

From source file:fm.last.hadoop.tools.ReplicationPolicyFixer.java

License:Apache License

private void findMissReplicatedFiles(FileStatus file, Set<Path> missReplicatedFiles) throws IOException {
    Path path = file.getPath();//from ww w . ja  v a 2 s  . com

    if (file.isDir()) {
        FileStatus[] files = fs.listStatus(path);
        if (files == null) {
            return;
        }
        for (FileStatus subFile : files) {
            findMissReplicatedFiles(subFile, missReplicatedFiles);
        }
        return;
    }

    int pathNameLength = path.toUri().getPath().length();
    String padding = StringUtils.repeat(" ", Math.max(0, lastPathNameLength - pathNameLength));
    lastPathNameLength = pathNameLength;
    out.print(path.toUri().getPath() + padding + "\r");
    out.flush();

    LocatedBlocks blocks = nameNode.getBlockLocations(path.toUri().getPath(), 0, file.getLen());
    if (blocks == null) { // the file is deleted
        return;
    }
    if (blocks.isUnderConstruction()) {
        out.println("\nNot checking open file : " + path.toString());
        return;
    }

    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        if (lBlk.isCorrupt()) {
            out.println("\n" + lBlk.toString() + " is corrupt so skipping file : " + path.toString());
            return;
        }

        Block block = lBlk.getBlock();
        DatanodeInfo[] locs = lBlk.getLocations();
        short targetFileReplication = file.getReplication();
        // verify block placement policy
        int missingRacks = verifyBlockPlacement(lBlk, targetFileReplication, cluster);
        if (missingRacks > 0 && locs.length > 0) {
            out.println("\nReplica placement policy is violated for " + block.toString() + " of file "
                    + path.toString() + ". Block should be additionally replicated on " + missingRacks
                    + " more rack(s).");
            missReplicatedFiles.add(path);
        }
    }
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

private boolean checkReplication(LocatedBlocks blocks, int replication) {
    for (LocatedBlock locatedBlock : blocks.getLocatedBlocks()) {
        if (locatedBlock.getLocations().length != replication) {
            return false;
        }//from  w ww  .j  a v  a2  s  .c om
    }
    return true;
}

From source file:io.hops.erasure_coding.TestBlockReconstructor.java

License:Apache License

@Test
public void testSourceBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);//from  ww  w.  j  ava 2s  .co  m
    FileStatus testFileStatus = dfs.getFileStatus(testFile);

    String path = testFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (testFileStatus.getLen() / testFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);

    LocatedBlocks missingBlocks = new LocatedBlocks(testFileStatus.getLen(), false,
            new ArrayList<LocatedBlock>(), null, true);
    missingBlocks.getLocatedBlocks().add(lb);
    BlockReconstructor blockReconstructor = new BlockReconstructor(conf);
    Decoder decoder = new Decoder(conf, Util.getCodec(Util.Codecs.SRC));
    blockReconstructor.processFile(testFile, testParityFile, missingBlocks, decoder, null);

    // Block is recovered to the same data node so no need to wait for the block report
    try {
        FSDataInputStream in = dfs.open(testFile);
        byte[] buff = new byte[TEST_BLOCK_COUNT * DFS_TEST_BLOCK_SIZE];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading failed", e);
        fail("Repair failed. Missing a block.");
    }
}

From source file:io.hops.erasure_coding.TestBlockReconstructor.java

License:Apache License

@Test
public void testParityBlockRepair() throws IOException, InterruptedException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    TestDfsClient testDfsClient = new TestDfsClient(getConfig());
    testDfsClient.injectIntoDfs(dfs);//  www .  j av  a2  s  . co m
    FileStatus parityFileStatus = dfs.getFileStatus(testParityFile);

    String path = parityFileStatus.getPath().toUri().getPath();
    int blockToLoose = new Random(seed)
            .nextInt((int) (parityFileStatus.getLen() / parityFileStatus.getBlockSize()));
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(path, 0, Long.MAX_VALUE).get(blockToLoose);
    DataNodeUtil.loseBlock(getCluster(), lb);
    List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
    lostBlocks.add(lb);
    LocatedBlocks locatedBlocks = new LocatedBlocks(0, false, lostBlocks, null, true);
    testDfsClient.setMissingLocatedBlocks(locatedBlocks);

    LocatedBlocks missingBlocks = new LocatedBlocks(parityFileStatus.getLen(), false,
            new ArrayList<LocatedBlock>(), null, true);
    missingBlocks.getLocatedBlocks().add(lb);
    BlockReconstructor blockReconstructor = new BlockReconstructor(conf);
    Decoder decoder = new Decoder(conf, Util.getCodec(Util.Codecs.SRC));
    blockReconstructor.processParityFile(testFile, testParityFile, missingBlocks, decoder, null);

    // Block is recovered to the same data node so no need to wait for the block report
    try {
        FSDataInputStream in = dfs.open(testParityFile);
        byte[] buff = new byte[DFS_TEST_BLOCK_SIZE * codec.parityLength];
        in.readFully(0, buff);
    } catch (BlockMissingException e) {
        LOG.error("Reading failed", e);
        fail("Repair failed. Missing a block.");
    }
}

From source file:org.apache.impala.testutil.BlockIdGenerator.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {

    if (args.length != 1) {
        throw new Exception("Invalid args: BlockIdGenerator <output_file>");
    }/*from w  w  w.  ja  v a  2  s .  c o m*/

    HdfsConfiguration hdfsConfig = new HdfsConfiguration();
    File output = new File(args[0]);
    FileWriter writer = null;

    try {
        writer = new FileWriter(output);

        // Load all tables in the catalog
        Catalog catalog = CatalogServiceTestCatalog.create();
        for (Db database : catalog.getDbs(PatternMatcher.MATCHER_MATCH_ALL)) {
            for (String tableName : database.getAllTableNames()) {
                Table table = database.getTable(tableName);
                // Only do this for hdfs tables
                if (table == null || !(table instanceof HdfsTable)) {
                    continue;
                }
                HdfsTable hdfsTable = (HdfsTable) table;

                // Write the output as <tablename>: <blockid1> <blockid2> <etc>
                writer.write(tableName + ":");
                for (HdfsPartition partition : hdfsTable.getPartitions()) {
                    // Ignore the default partition.
                    if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) {
                        continue;
                    }
                    List<FileDescriptor> fileDescriptors = partition.getFileDescriptors();
                    for (FileDescriptor fd : fileDescriptors) {
                        Path p = new Path(partition.getLocation(), fd.getFileName());

                        // Use a deprecated API to get block ids
                        DistributedFileSystem dfs = (DistributedFileSystem) p.getFileSystem(hdfsConfig);
                        LocatedBlocks locations = dfs.getClient().getNamenode()
                                .getBlockLocations(p.toUri().getPath(), 0, fd.getFileLength());

                        for (LocatedBlock lb : locations.getLocatedBlocks()) {
                            long id = lb.getBlock().getBlockId();
                            writer.write(" " + id);
                        }
                    }
                }
                writer.write("\n");
            }
        }
    } finally {
        if (writer != null)
            writer.close();
    }
}