Example usage for org.apache.hadoop.hdfs.protocol DatanodeInfo getHostName

List of usage examples for org.apache.hadoop.hdfs.protocol DatanodeInfo getHostName

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol DatanodeInfo getHostName.

Prototype

public String getHostName() 

Source Link

Usage

From source file:backup.datanode.ipc.DataNodeBackupRPC.java

License:Apache License

public static DataNodeBackupRPC getDataNodeBackupRPC(DatanodeInfo datanodeInfo, Configuration conf,
        UserGroupInformation ugi) throws IOException, InterruptedException {
    String ipcHostname = datanodeInfo.getHostName();
    int ipcPort = datanodeInfo.getIpcPort();
    InetSocketAddress dataNodeIPCAddress = new InetSocketAddress(ipcHostname, ipcPort);
    return getDataNodeBackupRPC(dataNodeIPCAddress, conf, ugi);
}

From source file:backup.namenode.NameNodeBackupServicePlugin.java

License:Apache License

private BackupWebService<Stats> getBackupWebService(UserGroupInformation ugi, BlockManager blockManager,
        NameNodeRestoreProcessor restoreProcessor) throws IOException {
    File reportPath = restoreProcessor.getReportPath();
    return new BackupWebService<Stats>() {
        @Override//from ww w  . ja  v  a  2s  .  c  om
        public StatsWritable getStats() throws IOException {
            StatsWritable stats = new StatsWritable();
            Set<DatanodeDescriptor> datanodes = blockManager.getDatanodeManager().getDatanodes();
            for (DatanodeInfo datanodeInfo : datanodes) {
                try {
                    DataNodeBackupRPC backup = DataNodeBackupRPC.getDataNodeBackupRPC(datanodeInfo, getConf(),
                            ugi);
                    stats.add(backup.getBackupStats());
                    stats.add(backup.getRestoreStats());
                } catch (Exception e) {
                    LOG.error("Error while trying to read hdfs backup stats from datanode {}",
                            datanodeInfo.getHostName());
                }
            }
            return stats;
        }

        @Override
        public void runReport(boolean debug) throws IOException {
            restoreProcessor.runReport(debug);
        }

        @Override
        public List<String> listReports() throws IOException {
            Builder<String> builder = ImmutableList.builder();
            if (!reportPath.exists()) {
                return builder.build();
            }
            File[] list = reportPath.listFiles((dir, name) -> name.startsWith("report."));
            if (list != null) {
                Arrays.sort(list, Collections.reverseOrder());
                for (File f : list) {
                    builder.add(f.getName());
                }
            }
            return builder.build();
        }

        @Override
        public InputStream getReport(String id) throws IOException {
            File file = new File(reportPath, id);
            if (file.exists()) {
                return new FileInputStream(file);
            }
            return null;
        }
    };
}

From source file:com.bigstep.datalake.JsonUtil.java

License:Apache License

/** Convert a DatanodeInfo to a Json map. */
static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
    if (datanodeinfo == null) {
        return null;
    }/* w ww. ja  va 2s  .co  m*/

    // TODO: Fix storageID
    final Map<String, Object> m = new TreeMap<String, Object>();
    m.put("ipAddr", datanodeinfo.getIpAddr());
    // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x)
    // expects this instead of the two fields.
    m.put("name", datanodeinfo.getXferAddr());
    m.put("hostName", datanodeinfo.getHostName());
    m.put("storageID", datanodeinfo.getDatanodeUuid());
    m.put("xferPort", datanodeinfo.getXferPort());
    m.put("infoPort", datanodeinfo.getInfoPort());
    m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
    m.put("ipcPort", datanodeinfo.getIpcPort());

    m.put("capacity", datanodeinfo.getCapacity());
    m.put("dfsUsed", datanodeinfo.getDfsUsed());
    m.put("remaining", datanodeinfo.getRemaining());
    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
    m.put("cacheCapacity", datanodeinfo.getCacheCapacity());
    m.put("cacheUsed", datanodeinfo.getCacheUsed());
    m.put("lastUpdate", datanodeinfo.getLastUpdate());
    m.put("lastUpdateMonotonic", datanodeinfo.getLastUpdateMonotonic());
    m.put("xceiverCount", datanodeinfo.getXceiverCount());
    m.put("networkLocation", datanodeinfo.getNetworkLocation());
    m.put("adminState", datanodeinfo.getAdminState().name());
    return m;
}

From source file:com.mozilla.hadoop.ClusterHealth.java

License:Apache License

public static void main(String[] args) {
    int retCode = 0;

    Configuration conf = new Configuration();
    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;/* w  w  w  .java 2  s . co m*/
    try {
        dfsClient = new DFSClient(conf);

        DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
        for (DatanodeInfo dni : liveNodes) {
            long dfsUsed = dni.getDfsUsed();
            long nonDfsUsed = dni.getNonDfsUsed();
            long capacity = dni.getCapacity();
            float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f;
            System.out.println(
                    String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d",
                            new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage,
                                    dni.getXceiverCount() }));
        }
        DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
        if (deadNodes.length > 0) {
            retCode = 2;
            for (DatanodeInfo dni : deadNodes) {
                System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
            }
        }
    } catch (IOException e) {
        retCode = 2;
        System.out.println("IOException occurred while checking HDFS cluster status!");
        e.printStackTrace(System.err);
    } finally {
        if (dfsClient != null) {
            try {
                dfsClient.close();
            } catch (IOException e) {
                System.out.println("IOException occurred while closing DFS client!");
                e.printStackTrace(System.err);
            }
        }
    }

    Configuration hbaseConf = HBaseConfiguration.create(conf);
    HBaseAdmin hbaseAdmin;
    try {
        System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir"));
        hbaseAdmin = new HBaseAdmin(hbaseConf);
        ClusterStatus hcs = hbaseAdmin.getClusterStatus();
        int regionsCount = hcs.getRegionsCount();
        int requestsCount = hcs.getRequestsCount();
        for (HServerInfo serverInfo : hcs.getServerInfo()) {
            HServerLoad hsl = serverInfo.getLoad();
            float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f;
            float regionsPercentage = regionsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f;
            float requestsPercentage = requestsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f;
            System.out.println(String.format(
                    "%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%",
                    new Object[] { serverInfo.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(),
                            heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage,
                            hsl.getNumberOfRequests(), requestsCount, requestsPercentage }));
        }
        if (hcs.getDeadServers() > 0) {
            retCode = 2;
            for (String server : hcs.getDeadServerNames()) {
                System.out.println(server + " RegionServer - [ DEAD ]");
            }
        }

    } catch (MasterNotRunningException e) {
        System.out.println("HBase Master is not running!");
        retCode = 2;
    } catch (IOException e) {
        System.out.println("IOException occurred while checking HBase cluster status!");
        retCode = 2;
    }

    int failures = 0;
    for (String host : args) {
        if (!ClusterHealth.testThrift(host)) {
            failures++;
        }
    }
    if (failures > 0) {
        retCode = 2;
    }

    System.exit(retCode);
}

From source file:com.pinterest.terrapin.controller.ClusterStatusServlet.java

License:Apache License

/**
 * Get all data nodes/*from www  .ja va 2  s  .  c om*/
 * @param hdfsClient client instance for HDFS
 * @return live data nodes
 * @throws IOException if client goes wrong when communicating with server
 */
public static List<String> getAllNodeNames(DFSClient hdfsClient) throws IOException {
    DatanodeInfo[] allNodes = hdfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
    List<String> allNodeNames = new ArrayList<String>(allNodes.length);
    for (DatanodeInfo nodeInfo : allNodes) {
        allNodeNames.add(TerrapinUtil.getHelixInstanceFromHDFSHost(nodeInfo.getHostName()));
    }
    return allNodeNames;
}

From source file:com.sun.grid.herd.HerdLoadSensor.java

License:Open Source License

/**
 * Get the info object for this datanode.
 * @throws IOException Thrown if there is an error while communicating
 * with the namenode.//  w  w  w.  j  a v a 2s. c  om
 */
private void findDatanode() throws IOException {
    DatanodeInfo[] datanodes = client.namenode.getDatanodeReport(DatanodeReportType.LIVE);

    for (DatanodeInfo info : datanodes) {
        if (hostName.equals(info.getHostName())) {
            node = info;
        }
    }
}

From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java

License:Apache License

public static String[] getDataNodes(Configuration conf) throws IOException {
    List<String> datanodes = new ArrayList<String>();
    DFSClient client = new DFSClient(NameNode.getAddress(conf), conf);
    DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
    for (DatanodeInfo nodeinfo : datanodeReport) {
        datanodes.add(nodeinfo.getHostName().trim());
    }/*from  w w  w.  j  av  a  2 s . c o  m*/

    return datanodes.toArray(new String[0]);
}

From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java

License:Apache License

public static String getDataNodesCommaSeparated(Configuration conf) throws IOException {
    StringBuilder sb = new StringBuilder();
    DFSClient client = new DFSClient(NameNode.getAddress(conf), conf);
    DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
    for (DatanodeInfo nodeinfo : datanodeReport) {
        if (sb.length() != 0) {
            sb.append(",");
        }//  w  w w. java2s.c om
        sb.append(nodeinfo.getHostName().trim());
    }

    return sb.toString();
}

From source file:mzb.Balancer.java

License:Apache License

private void initNodes(DatanodeInfo[] datanodes) {

    shuffleArray(datanodes);/*from   www  .jav a2  s.co m*/
    for (DatanodeInfo datanode : datanodes) {
        if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) {
            continue; // ignore decommissioning or decommissioned nodes
        }
        cluster.add(datanode);
        BalancerDatanode datanodeS = new Source(datanode);
        //this.datanodes.put(datanode.getHostName(), datanodeS);
        this.hostDatanodes.put(datanode.getHostName(), datanodeS);
        this.datanodes.put(datanode.getStorageID(), datanodeS);
        this.sources.add((Source) datanodeS);
        BalancerDatanode target = new BalancerDatanode(datanode);
        this.targets.add(target);
        this.targetsNodes.put(datanode.getHostName(), target);
    }
}

From source file:wanggang1987.bigdataapi.hadoopapi.HadoopClientAPI.java

/**
 * DataNode???/*www.  j  av a2s  .c o m*/
 *
 * @return hostname list
 */
public ArrayList<String> listDataNodes() {
    ArrayList<String> list = null;
    try {
        list = new ArrayList<>();
        for (DatanodeInfo dataNodeStat : hdfs.getDataNodeStats()) {
            list.add(dataNodeStat.getHostName());
        }
    } catch (Exception e) {
        logger.error("listDataNodes failed", e);
    }
    return list;
}