Example usage for org.apache.hadoop.hdfs DFSClient close

List of usage examples for org.apache.hadoop.hdfs DFSClient close

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSClient close.

Prototype

@Override
public synchronized void close() throws IOException 

Source Link

Document

Close the file system, abandoning all of the leases and files being created and close connections to the namenode.

Usage

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/** Wait until the given namenode gets registration from all the datanodes */
public void waitActive(int nnIndex) throws IOException {
    if (nameNodes.length == 0 || nameNodes[nnIndex] == null || nameNodes[nnIndex].nameNode == null) {
        return;/*from w w  w.j  a v a  2s.  c  om*/
    }
    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
    assert addr.getPort() != 0;
    DFSClient client = new DFSClient(addr, conf);

    // ensure all datanodes have registered and sent heartbeat to the namenode
    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
        try {
            LOG.info("Waiting for cluster to become active");
            Thread.sleep(100);
        } catch (InterruptedException e) {
        }
    }

    client.close();
}

From source file:com.mozilla.hadoop.ClusterHealth.java

License:Apache License

public static void main(String[] args) {
    int retCode = 0;

    Configuration conf = new Configuration();
    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;
    try {//w ww .j  a va2 s .co  m
        dfsClient = new DFSClient(conf);

        DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
        for (DatanodeInfo dni : liveNodes) {
            long dfsUsed = dni.getDfsUsed();
            long nonDfsUsed = dni.getNonDfsUsed();
            long capacity = dni.getCapacity();
            float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f;
            System.out.println(
                    String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d",
                            new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage,
                                    dni.getXceiverCount() }));
        }
        DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
        if (deadNodes.length > 0) {
            retCode = 2;
            for (DatanodeInfo dni : deadNodes) {
                System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
            }
        }
    } catch (IOException e) {
        retCode = 2;
        System.out.println("IOException occurred while checking HDFS cluster status!");
        e.printStackTrace(System.err);
    } finally {
        if (dfsClient != null) {
            try {
                dfsClient.close();
            } catch (IOException e) {
                System.out.println("IOException occurred while closing DFS client!");
                e.printStackTrace(System.err);
            }
        }
    }

    Configuration hbaseConf = HBaseConfiguration.create(conf);
    HBaseAdmin hbaseAdmin;
    try {
        System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir"));
        hbaseAdmin = new HBaseAdmin(hbaseConf);
        ClusterStatus hcs = hbaseAdmin.getClusterStatus();
        int regionsCount = hcs.getRegionsCount();
        int requestsCount = hcs.getRequestsCount();
        for (HServerInfo serverInfo : hcs.getServerInfo()) {
            HServerLoad hsl = serverInfo.getLoad();
            float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f;
            float regionsPercentage = regionsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f;
            float requestsPercentage = requestsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f;
            System.out.println(String.format(
                    "%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%",
                    new Object[] { serverInfo.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(),
                            heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage,
                            hsl.getNumberOfRequests(), requestsCount, requestsPercentage }));
        }
        if (hcs.getDeadServers() > 0) {
            retCode = 2;
            for (String server : hcs.getDeadServerNames()) {
                System.out.println(server + " RegionServer - [ DEAD ]");
            }
        }

    } catch (MasterNotRunningException e) {
        System.out.println("HBase Master is not running!");
        retCode = 2;
    } catch (IOException e) {
        System.out.println("IOException occurred while checking HBase cluster status!");
        retCode = 2;
    }

    int failures = 0;
    for (String host : args) {
        if (!ClusterHealth.testThrift(host)) {
            failures++;
        }
    }
    if (failures > 0) {
        retCode = 2;
    }

    System.exit(retCode);
}

From source file:com.sun.grid.herd.HerdJsv.java

License:Open Source License

/**
 * Convert the given HDFS path into a list of HDFS data blocks.  If the
 * path is a directory, it will be recursively processed to include the data
 * blocks for all files contained under the directory path.
 * @param path an HDFS path//ww  w.j  ava2 s . c o  m
 * @param conf the Hadoop configuration
 * @param dfs the DFSClient to use
 * @param blocks the list to populate with blocks
 * @return a list of HDFS data blocks
 * @throws IOException Thrown if there is an error while communcating
 * with the HDFS Namenode
 */
private static Set<LocatedBlock> getBlocks(String path, Configuration conf, DFSClient dfs,
        Set<LocatedBlock> blocks) throws IOException {
    FileStatus s = FileSystem.get(conf).getFileStatus(new Path(path));

    if (!s.isDir()) {
        blocks.addAll(dfs.namenode.getBlockLocations(path, 0, Long.MAX_VALUE).getLocatedBlocks());
    } else {
        for (FileStatus fs : dfs.listPaths(path)) {
            blocks.addAll(getBlocks(fs.getPath().toString(), conf, dfs, blocks));
        }
    }

    dfs.close();

    return blocks;
}

From source file:org.opencloudengine.flamingo.mapreduce.util.HdfsUtils.java

License:Apache License

/**
 *   ? ?  ??  HDFS? ./*from   w w  w  .  j a  v  a 2 s. c om*/
 *
 * @param hdfsUrl          HDFS URL
 * @param filename         HDFS? Path?  ?
 * @param hdfsPath         HDFS? Path
 * @param downloadFilePath  ? ?   ?
 * @throws java.io.IOException HDFS ?  
 */
public static void uploadToHdfs(String hdfsUrl, String filename, String hdfsPath, String downloadFilePath)
        throws IOException {
    String hdfsFullPath = hdfsPath + "/" + filename;
    File inputFile = new File(downloadFilePath);
    DFSClient dfsClient = HdfsUtils.createDFSClient(hdfsUrl);
    copyFromLocalFileToHdfsFile(inputFile, dfsClient, hdfsFullPath);
    dfsClient.close();
}

From source file:org.opencloudengine.flamingo.mapreduce.util.HdfsUtils.java

License:Apache License

/**
 * HDFS?  ?  ??  ?  ? ?? .//w  w w.  ja  v  a  2s.  c o  m
 *
 * @param hdfsUrl HDFS URL
 * @param ext     ?(: <tt>.dat</tt>)
 * @param path    
 * @return "<tt>.dat</tt>" ?  ? ?
 * @throws java.io.IOException HDFS ?  
 */
public static String[] getHdfsFiles(String hdfsUrl, String ext, String path) throws IOException {
    ArrayList<String> files = new ArrayList<String>();
    DFSClient client = HdfsUtils.createDFSClient(hdfsUrl);
    makeDirectoryIfNotExists(path, hdfsUrl);
    client.close();
    return StringUtils.toStringArray(files);
}

From source file:org.opencloudengine.flamingo.mapreduce.util.HdfsUtils.java

License:Apache License

/**
 *  ? ??  ?./*  w  w w  .  ja v a2 s .  co m*/
 *
 * @param hdfsUrl HDFS URL
 * @param path      ?  
 * @return  <tt>true</tt>
 * @throws java.io.IOException ?     , HDFS?    
 */
public static boolean isExist(String hdfsUrl, String path) throws IOException {
    DFSClient client = HdfsUtils.createDFSClient(hdfsUrl);
    HdfsFileStatus status = client.getFileInfo(path);
    if (status != null && !status.isDir()) {
        client.close();
        return true;
    }
    client.close();
    return false;
}

From source file:org.openflamingo.fs.hdfs.HdfsFileSystemProvider.java

License:Apache License

@Override
public Map<String, Object> getFileSystemStatus(String type) {
    Map<String, Object> map = new HashMap();
    DFSClient dfsClient = null;
    try {// w  w w .  jav a 2s . c om
        dfsClient = new DFSClient(fs.getConf());
        map.put("canonicalServiceName", fs.getCanonicalServiceName());
        map.put("defaultReplication", fs.getDefaultReplication());
        map.put("defaultBlockSize", fs.getDefaultBlockSize());
        map.put("workingDirectory", fs.getWorkingDirectory().toUri().getPath());
        map.put("homeDirectory", fs.getHomeDirectory().toUri().getPath());
        map.put("corruptBlocksCount", dfsClient.getCorruptBlocksCount());
        map.put("missingBlocksCount", dfsClient.getMissingBlocksCount());
        map.put("underReplicatedBlocksCount", dfsClient.getUnderReplicatedBlocksCount());
        map.put("capacity", dfsClient.getDiskStatus().getCapacity());
        map.put("used", dfsClient.getDiskStatus().getDfsUsed());
        map.put("remaining", dfsClient.getDiskStatus().getRemaining());
        map.put("deadNodes", dfsClient.namenode.getDatanodeReport(FSConstants.DatanodeReportType.DEAD).length);
        map.put("liveNodes", dfsClient.namenode.getDatanodeReport(FSConstants.DatanodeReportType.LIVE).length);
        map.put("humanCapacity", byteDesc(dfsClient.getDiskStatus().getCapacity()));
        map.put("humanUsed", byteDesc(dfsClient.getDiskStatus().getDfsUsed()));
        map.put("humanProgressPercent", formatPercent((double) dfsClient.getDiskStatus().getRemaining()
                / (double) dfsClient.getDiskStatus().getCapacity(), 2));
        map.put("humanProgress", (float) dfsClient.getDiskStatus().getRemaining()
                / (float) dfsClient.getDiskStatus().getCapacity());
        map.put("humanRemaining", byteDesc(dfsClient.getDiskStatus().getRemaining()));
        map.put("humanDefaultBlockSize", byteDesc(fs.getDefaultBlockSize()));
        dfsClient.close();
        return map;
    } catch (Exception ex) {
        throw new FileSystemException(bundle.message("S_FS", "CANNOT_ACCESS_FS_STATUS"), ex);
    } finally {
        IOUtils.closeQuietly(dfsClient);
    }
}

From source file:org.sf.xrime.algorithms.pagerank.PageRankCorrectionMapper.java

License:Apache License

private void recordContinue() throws IOException {
    if (changeFlag) {
        return;//from   w ww . j  a  va  2  s.c  o m
    }

    changeFlag = true;

    if (continueFile != null) {
        DFSClient client = new DFSClient(job);
        client.mkdirs(continueFile);
        client.close();
    }
}