List of usage examples for org.apache.hadoop.hdfs DFSClient datanodeReport
public DatanodeInfo[] datanodeReport(DatanodeReportType type) throws IOException
From source file:com.cloudera.llama.am.MiniLlama.java
License:Apache License
private Map<String, String> getDataNodeNodeManagerMapping(Configuration conf) throws Exception { Map<String, String> map = new HashMap<String, String>(); DFSClient dfsClient = new DFSClient(new URI(conf.get("fs.defaultFS")), conf); DatanodeInfo[] DNs = dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.ALL); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf);//from w w w .j a va 2 s. c o m yarnClient.start(); List<NodeId> nodeIds = getYarnNodeIds(conf); if (nodeIds.size() != DNs.length) { throw new RuntimeException("Number of DNs and NMs differ, MiniLlama " + "node mapping requires them to be equal at startup"); } LOG.info("HDFS/YARN mapping:"); for (int i = 0; i < DNs.length; i++) { String key = DNs[i].getXferAddr(); NodeId nodeId = nodeIds.get(i); String value = nodeId.getHost() + ":" + nodeId.getPort(); map.put(key, value); LOG.info(" DN/NM: " + key + "/" + value); } yarnClient.stop(); nodes = map.size(); verifySingleHost(map.keySet(), "DataNode"); verifySingleHost(map.values(), "NodeManager"); return map; }
From source file:com.mellanox.r4h.MiniDFSCluster.java
License:Apache License
/** Wait until the given namenode gets registration from all the datanodes */ public void waitActive(int nnIndex) throws IOException { if (nameNodes.length == 0 || nameNodes[nnIndex] == null || nameNodes[nnIndex].nameNode == null) { return;// www . j a va 2 s . co m } InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress(); assert addr.getPort() != 0; DFSClient client = new DFSClient(addr, conf); // ensure all datanodes have registered and sent heartbeat to the namenode while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) { try { LOG.info("Waiting for cluster to become active"); Thread.sleep(100); } catch (InterruptedException e) { } } client.close(); }
From source file:com.mozilla.hadoop.ClusterHealth.java
License:Apache License
public static void main(String[] args) { int retCode = 0; Configuration conf = new Configuration(); System.out.println("HDFS NameNode: " + conf.get("fs.default.name")); DFSClient dfsClient = null; try {/* ww w .j a v a 2s .c o m*/ dfsClient = new DFSClient(conf); DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE); for (DatanodeInfo dni : liveNodes) { long dfsUsed = dni.getDfsUsed(); long nonDfsUsed = dni.getNonDfsUsed(); long capacity = dni.getCapacity(); float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f; System.out.println( String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d", new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage, dni.getXceiverCount() })); } DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD); if (deadNodes.length > 0) { retCode = 2; for (DatanodeInfo dni : deadNodes) { System.out.println(dni.getHostName() + " DataNode - [ DEAD ]"); } } } catch (IOException e) { retCode = 2; System.out.println("IOException occurred while checking HDFS cluster status!"); e.printStackTrace(System.err); } finally { if (dfsClient != null) { try { dfsClient.close(); } catch (IOException e) { System.out.println("IOException occurred while closing DFS client!"); e.printStackTrace(System.err); } } } Configuration hbaseConf = HBaseConfiguration.create(conf); HBaseAdmin hbaseAdmin; try { System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir")); hbaseAdmin = new HBaseAdmin(hbaseConf); ClusterStatus hcs = hbaseAdmin.getClusterStatus(); int regionsCount = hcs.getRegionsCount(); int requestsCount = hcs.getRequestsCount(); for (HServerInfo serverInfo : hcs.getServerInfo()) { HServerLoad hsl = serverInfo.getLoad(); float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f; float regionsPercentage = regionsCount == 0 ? 0.0f : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f; float requestsPercentage = requestsCount == 0 ? 0.0f : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f; System.out.println(String.format( "%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%", new Object[] { serverInfo.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(), heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage, hsl.getNumberOfRequests(), requestsCount, requestsPercentage })); } if (hcs.getDeadServers() > 0) { retCode = 2; for (String server : hcs.getDeadServerNames()) { System.out.println(server + " RegionServer - [ DEAD ]"); } } } catch (MasterNotRunningException e) { System.out.println("HBase Master is not running!"); retCode = 2; } catch (IOException e) { System.out.println("IOException occurred while checking HBase cluster status!"); retCode = 2; } int failures = 0; for (String host : args) { if (!ClusterHealth.testThrift(host)) { failures++; } } if (failures > 0) { retCode = 2; } System.exit(retCode); }
From source file:com.pinterest.terrapin.controller.ClusterStatusServlet.java
License:Apache License
/** * Get all data nodes/*from w w w. jav a2 s .c o m*/ * @param hdfsClient client instance for HDFS * @return live data nodes * @throws IOException if client goes wrong when communicating with server */ public static List<String> getAllNodeNames(DFSClient hdfsClient) throws IOException { DatanodeInfo[] allNodes = hdfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE); List<String> allNodeNames = new ArrayList<String>(allNodes.length); for (DatanodeInfo nodeInfo : allNodes) { allNodeNames.add(TerrapinUtil.getHelixInstanceFromHDFSHost(nodeInfo.getHostName())); } return allNodeNames; }
From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java
License:Apache License
public static String[] getDataNodes(Configuration conf) throws IOException { List<String> datanodes = new ArrayList<String>(); DFSClient client = new DFSClient(NameNode.getAddress(conf), conf); DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE); for (DatanodeInfo nodeinfo : datanodeReport) { datanodes.add(nodeinfo.getHostName().trim()); }// w ww . j a va 2 s. c o m return datanodes.toArray(new String[0]); }
From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java
License:Apache License
public static String getDataNodesCommaSeparated(Configuration conf) throws IOException { StringBuilder sb = new StringBuilder(); DFSClient client = new DFSClient(NameNode.getAddress(conf), conf); DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE); for (DatanodeInfo nodeinfo : datanodeReport) { if (sb.length() != 0) { sb.append(","); }//w ww. j av a2s . c om sb.append(nodeinfo.getHostName().trim()); } return sb.toString(); }