List of usage examples for org.apache.hadoop.hdfs.protocol Block toString
@Override
public String toString()
From source file:fm.last.hadoop.tools.ReplicationPolicyFixer.java
License:Apache License
private void findMissReplicatedFiles(FileStatus file, Set<Path> missReplicatedFiles) throws IOException { Path path = file.getPath();//w w w. j a va 2 s. c o m if (file.isDir()) { FileStatus[] files = fs.listStatus(path); if (files == null) { return; } for (FileStatus subFile : files) { findMissReplicatedFiles(subFile, missReplicatedFiles); } return; } int pathNameLength = path.toUri().getPath().length(); String padding = StringUtils.repeat(" ", Math.max(0, lastPathNameLength - pathNameLength)); lastPathNameLength = pathNameLength; out.print(path.toUri().getPath() + padding + "\r"); out.flush(); LocatedBlocks blocks = nameNode.getBlockLocations(path.toUri().getPath(), 0, file.getLen()); if (blocks == null) { // the file is deleted return; } if (blocks.isUnderConstruction()) { out.println("\nNot checking open file : " + path.toString()); return; } for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { if (lBlk.isCorrupt()) { out.println("\n" + lBlk.toString() + " is corrupt so skipping file : " + path.toString()); return; } Block block = lBlk.getBlock(); DatanodeInfo[] locs = lBlk.getLocations(); short targetFileReplication = file.getReplication(); // verify block placement policy int missingRacks = verifyBlockPlacement(lBlk, targetFileReplication, cluster); if (missingRacks > 0 && locs.length > 0) { out.println("\nReplica placement policy is violated for " + block.toString() + " of file " + path.toString() + ". Block should be additionally replicated on " + missingRacks + " more rack(s)."); missReplicatedFiles.add(path); } } }