Example usage for org.apache.hadoop.hdfs.protocol DatanodeInfo getXceiverCount

List of usage examples for org.apache.hadoop.hdfs.protocol DatanodeInfo getXceiverCount

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol DatanodeInfo getXceiverCount.

Prototype

public int getXceiverCount() 

Source Link

Document

number of active connections

Usage

From source file:com.bigstep.datalake.JsonUtil.java

License:Apache License

/** Convert a DatanodeInfo to a Json map. */
static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
    if (datanodeinfo == null) {
        return null;
    }//  w  ww  .  j  a  v a2  s  .c o m

    // TODO: Fix storageID
    final Map<String, Object> m = new TreeMap<String, Object>();
    m.put("ipAddr", datanodeinfo.getIpAddr());
    // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x)
    // expects this instead of the two fields.
    m.put("name", datanodeinfo.getXferAddr());
    m.put("hostName", datanodeinfo.getHostName());
    m.put("storageID", datanodeinfo.getDatanodeUuid());
    m.put("xferPort", datanodeinfo.getXferPort());
    m.put("infoPort", datanodeinfo.getInfoPort());
    m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
    m.put("ipcPort", datanodeinfo.getIpcPort());

    m.put("capacity", datanodeinfo.getCapacity());
    m.put("dfsUsed", datanodeinfo.getDfsUsed());
    m.put("remaining", datanodeinfo.getRemaining());
    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
    m.put("cacheCapacity", datanodeinfo.getCacheCapacity());
    m.put("cacheUsed", datanodeinfo.getCacheUsed());
    m.put("lastUpdate", datanodeinfo.getLastUpdate());
    m.put("lastUpdateMonotonic", datanodeinfo.getLastUpdateMonotonic());
    m.put("xceiverCount", datanodeinfo.getXceiverCount());
    m.put("networkLocation", datanodeinfo.getNetworkLocation());
    m.put("adminState", datanodeinfo.getAdminState().name());
    return m;
}

From source file:com.mozilla.hadoop.ClusterHealth.java

License:Apache License

public static void main(String[] args) {
    int retCode = 0;

    Configuration conf = new Configuration();
    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;/*from  w  w  w. j  a v  a 2 s .c  o m*/
    try {
        dfsClient = new DFSClient(conf);

        DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
        for (DatanodeInfo dni : liveNodes) {
            long dfsUsed = dni.getDfsUsed();
            long nonDfsUsed = dni.getNonDfsUsed();
            long capacity = dni.getCapacity();
            float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f;
            System.out.println(
                    String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d",
                            new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage,
                                    dni.getXceiverCount() }));
        }
        DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
        if (deadNodes.length > 0) {
            retCode = 2;
            for (DatanodeInfo dni : deadNodes) {
                System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
            }
        }
    } catch (IOException e) {
        retCode = 2;
        System.out.println("IOException occurred while checking HDFS cluster status!");
        e.printStackTrace(System.err);
    } finally {
        if (dfsClient != null) {
            try {
                dfsClient.close();
            } catch (IOException e) {
                System.out.println("IOException occurred while closing DFS client!");
                e.printStackTrace(System.err);
            }
        }
    }

    Configuration hbaseConf = HBaseConfiguration.create(conf);
    HBaseAdmin hbaseAdmin;
    try {
        System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir"));
        hbaseAdmin = new HBaseAdmin(hbaseConf);
        ClusterStatus hcs = hbaseAdmin.getClusterStatus();
        int regionsCount = hcs.getRegionsCount();
        int requestsCount = hcs.getRequestsCount();
        for (HServerInfo serverInfo : hcs.getServerInfo()) {
            HServerLoad hsl = serverInfo.getLoad();
            float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f;
            float regionsPercentage = regionsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f;
            float requestsPercentage = requestsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f;
            System.out.println(String.format(
                    "%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%",
                    new Object[] { serverInfo.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(),
                            heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage,
                            hsl.getNumberOfRequests(), requestsCount, requestsPercentage }));
        }
        if (hcs.getDeadServers() > 0) {
            retCode = 2;
            for (String server : hcs.getDeadServerNames()) {
                System.out.println(server + " RegionServer - [ DEAD ]");
            }
        }

    } catch (MasterNotRunningException e) {
        System.out.println("HBase Master is not running!");
        retCode = 2;
    } catch (IOException e) {
        System.out.println("IOException occurred while checking HBase cluster status!");
        retCode = 2;
    }

    int failures = 0;
    for (String host : args) {
        if (!ClusterHealth.testThrift(host)) {
            failures++;
        }
    }
    if (failures > 0) {
        retCode = 2;
    }

    System.exit(retCode);
}

From source file:org.openflamingo.remote.thrift.thriftfs.ThriftUtils.java

License:Apache License

public static DatanodeInfo toThrift(org.apache.hadoop.hdfs.protocol.DatanodeInfo node,
        Map<DatanodeID, Integer> thriftPorts) {
    if (node == null) {
        return new DatanodeInfo();
    }//from w  w  w.  ja  v  a2s  .co  m

    DatanodeInfo ret = new DatanodeInfo();
    ret.name = node.getName();
    ret.storageID = node.storageID;
    ret.host = node.getHost();
    Integer p = thriftPorts.get(node);
    if (p == null) {
        LOG.warn("Unknown Thrift port for datanode " + node.name);
        ret.thriftPort = Constants.UNKNOWN_THRIFT_PORT;
    } else {
        ret.thriftPort = p.intValue();
    }

    ret.capacity = node.getCapacity();
    ret.dfsUsed = node.getDfsUsed();
    ret.remaining = node.getRemaining();
    ret.xceiverCount = node.getXceiverCount();
    ret.state = node.isDecommissioned() ? DatanodeState.DECOMMISSIONED
            : node.isDecommissionInProgress() ? DatanodeState.DECOMMISSION_INPROGRESS
                    : DatanodeState.NORMAL_STATE;
    ret.httpPort = node.getInfoPort();

    long timestamp = node.getLastUpdate();
    long currentTime = System.currentTimeMillis();
    ret.millisSinceUpdate = currentTime - timestamp;

    return ret;
}