Example usage for org.apache.hadoop.hdfs DFSClient DFSClient

List of usage examples for org.apache.hadoop.hdfs DFSClient DFSClient

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSClient DFSClient.

Prototype

@Deprecated
public DFSClient(Configuration conf) throws IOException 

Source Link

Document

Same as this(NameNode.getNNAddress(conf), conf);

Usage

From source file:com.mozilla.hadoop.ClusterHealth.java

License:Apache License

public static void main(String[] args) {
    int retCode = 0;

    Configuration conf = new Configuration();
    System.out.println("HDFS NameNode: " + conf.get("fs.default.name"));
    DFSClient dfsClient = null;/*from w  w w  .ja  v  a2  s.c o m*/
    try {
        dfsClient = new DFSClient(conf);

        DatanodeInfo[] liveNodes = dfsClient.datanodeReport(DatanodeReportType.LIVE);
        for (DatanodeInfo dni : liveNodes) {
            long dfsUsed = dni.getDfsUsed();
            long nonDfsUsed = dni.getNonDfsUsed();
            long capacity = dni.getCapacity();
            float capacityPercentage = ((float) (dfsUsed + nonDfsUsed) / (float) capacity) * 100.0f;
            System.out.println(
                    String.format("%s DataNode - [ ALIVE ] - DFS Capacity: (%d + %d / %d) %.2f%%; xceivers: %d",
                            new Object[] { dni.getHostName(), dfsUsed, nonDfsUsed, capacity, capacityPercentage,
                                    dni.getXceiverCount() }));
        }
        DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
        if (deadNodes.length > 0) {
            retCode = 2;
            for (DatanodeInfo dni : deadNodes) {
                System.out.println(dni.getHostName() + " DataNode - [ DEAD ]");
            }
        }
    } catch (IOException e) {
        retCode = 2;
        System.out.println("IOException occurred while checking HDFS cluster status!");
        e.printStackTrace(System.err);
    } finally {
        if (dfsClient != null) {
            try {
                dfsClient.close();
            } catch (IOException e) {
                System.out.println("IOException occurred while closing DFS client!");
                e.printStackTrace(System.err);
            }
        }
    }

    Configuration hbaseConf = HBaseConfiguration.create(conf);
    HBaseAdmin hbaseAdmin;
    try {
        System.out.println("HBase Rootdir: " + hbaseConf.get("hbase.rootdir"));
        hbaseAdmin = new HBaseAdmin(hbaseConf);
        ClusterStatus hcs = hbaseAdmin.getClusterStatus();
        int regionsCount = hcs.getRegionsCount();
        int requestsCount = hcs.getRequestsCount();
        for (HServerInfo serverInfo : hcs.getServerInfo()) {
            HServerLoad hsl = serverInfo.getLoad();
            float heapPercentage = ((float) hsl.getUsedHeapMB() / (float) hsl.getMaxHeapMB()) * 100.0f;
            float regionsPercentage = regionsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRegions() / (float) regionsCount) * 100.0f;
            float requestsPercentage = requestsCount == 0 ? 0.0f
                    : ((float) hsl.getNumberOfRequests() / (float) requestsCount) * 100.0f;
            System.out.println(String.format(
                    "%s RegionServer - [ ALIVE ] - Memory Heap: (%d / %d MB) %.2f%%, Regions: (%d / %d) %.2f%%, Requests: (%d / %d) %.2f%%",
                    new Object[] { serverInfo.getHostname(), hsl.getUsedHeapMB(), hsl.getMaxHeapMB(),
                            heapPercentage, hsl.getNumberOfRegions(), regionsCount, regionsPercentage,
                            hsl.getNumberOfRequests(), requestsCount, requestsPercentage }));
        }
        if (hcs.getDeadServers() > 0) {
            retCode = 2;
            for (String server : hcs.getDeadServerNames()) {
                System.out.println(server + " RegionServer - [ DEAD ]");
            }
        }

    } catch (MasterNotRunningException e) {
        System.out.println("HBase Master is not running!");
        retCode = 2;
    } catch (IOException e) {
        System.out.println("IOException occurred while checking HBase cluster status!");
        retCode = 2;
    }

    int failures = 0;
    for (String host : args) {
        if (!ClusterHealth.testThrift(host)) {
            failures++;
        }
    }
    if (failures > 0) {
        retCode = 2;
    }

    System.exit(retCode);
}

From source file:com.pinterest.terrapin.controller.TerrapinControllerHandler.java

License:Apache License

public void start() throws Exception {
    String zookeeperQuorum = TerrapinUtil.getZKQuorumFromConf(configuration);
    // Ensure that the cluster exists and connect through helix.
    this.helixAdmin = new ZKHelixAdmin(zookeeperQuorum);
    this.clusterName = configuration.getString(Constants.HELIX_CLUSTER, Constants.HELIX_CLUSTER_NAME_DEFAULT);
    setUpHelixCluster(zookeeperQuorum, this.clusterName);

    String namenode = configuration.getString(Constants.HDFS_NAMENODE);
    int hdfsReplicationFactor = configuration.getInt(Constants.HDFS_REPLICATION,
            Constants.DEFAULT_HDFS_REPLICATION);

    // Start the zookeeper manager and watch filesets/compressed views.
    ZooKeeperClient zkClient = TerrapinUtil.getZooKeeperClient(zookeeperQuorum, 30);
    this.zkManager = new ZooKeeperManager(zkClient, this.clusterName);
    this.zkManager.createClusterPaths();
    this.zkManager.registerWatchAllFileSets();
    ClusterInfo clusterInfo = new ClusterInfo(namenode, hdfsReplicationFactor);
    // Set the HDFS replication factor and namenode address.
    this.zkManager.setClusterInfo(clusterInfo);

    // Initialize a sample client instance
    this.sampleClient = new TerrapinClient(new FileSetViewManager(zkClient, clusterName), clusterName,
            configuration.getInt(Constants.THRIFT_PORT, Constants.DEFAULT_THRIFT_PORT), 1000, 1000);

    List<String> resourceList = this.helixAdmin.getResourcesInCluster(this.clusterName);
    reconcileViews(resourceList);/*w  w w.  j  a  v a  2s. c  o m*/

    // Instantiate HDFS client.
    LOG.info("Connecting to HDFS: " + namenode);
    Configuration conf = new Configuration();
    conf.set("fs.default.name", namenode);
    this.hdfsClient = new DFSClient(conf);

    LOG.info("Starting Helix against " + zookeeperQuorum);
    int thriftPort = configuration.getInt(Constants.THRIFT_PORT, Constants.DEFAULT_THRIFT_PORT);
    String instanceName = InetAddress.getLocalHost().getHostName() + "_" + thriftPort;

    // Connect as spectator.
    this.spectatorManager = HelixManagerFactory.getZKHelixManager(this.clusterName, instanceName,
            InstanceType.SPECTATOR, zookeeperQuorum);
    this.spectatorManager.connect();
    this.routingTableProvider = new TerrapinRoutingTableProvider(zkManager, resourceList);
    this.routingTableProvider.start();
    this.gaugeManager = new GaugeManager(zkManager, Constants.GAUGE_MANAGER_EXEC_INTERVAL_SECONDS_DEFAULT);
    this.spectatorManager.addExternalViewChangeListener(this.routingTableProvider);

    this.hdfsManager = new HdfsManager(configuration, zkManager, this.clusterName, helixAdmin, this.hdfsClient,
            this.routingTableProvider);

    // Connect as controller.
    this.helixManager = HelixManagerFactory.getZKHelixManager(this.clusterName,
            InetAddress.getLocalHost().getHostName() + "_" + thriftPort, InstanceType.CONTROLLER,
            zookeeperQuorum);
    this.helixManager.connect();
    this.helixManager.addControllerListener(this.hdfsManager);

    LOG.info("Starting thrift server on " + thriftPort);
    // Start up the thrift server.
    startThriftServer(thriftPort);

    // Start the Helix Web App.
    this.webApp = new HelixAdminWebApp(zookeeperQuorum,
            configuration.getInt(Constants.HELIX_WEBAPP_PORT, 60000));
    this.webApp.start();

    // Start the status server to serve servlets
    this.statusServer = new StatusServer("status",
            configuration.getString(Constants.STATUS_SERVER_BINDING_ADDRESS, "0.0.0.0"),
            configuration.getInt(Constants.STATUS_SERVER_BINDING_PORT, 50030), false, this.clusterName,
            this.zkManager, this.hdfsClient, this.sampleClient);
    this.statusServer.start();
}

From source file:com.sun.grid.herd.HerdJsv.java

License:Open Source License

/**
 * Convert the given HDFS path into a list of HDFS data blocks.  If the
 * path is a directory, it will be recursively processed to include the data
 * blocks for all files contained under the directory path.
 * @param path an HDFS path/*from   w  w  w .j  a  v  a  2  s. c o  m*/
 * @param conf the Hadoop configuration
 * @return a list of HDFS data blocks
 * @throws IOException Thrown if there is an error while communcating
 * with the HDFS Namenode
 */
private static Collection<LocatedBlock> getBlocks(String path, Configuration conf) throws IOException {
    Set<LocatedBlock> blocks = new TreeSet<LocatedBlock>(new Comparator<LocatedBlock>() {
        public int compare(LocatedBlock o1, LocatedBlock o2) {
            return o1.getBlock().compareTo(o2.getBlock());
        }
    });

    DFSClient dfs = new DFSClient(conf);

    return getBlocks(path, conf, dfs, blocks);
}

From source file:com.sun.grid.herd.HerdLoadSensor.java

License:Open Source License

public int run(String[] args) throws Exception {
    log.info("Started Herd load sensor");

    conf = getConf();//  ww  w  .  j a v a2 s .  c  o m
    client = new DFSClient(conf);
    namenode = createNamenode(conf);

    if (args.length > 0) {
        hostName = args[0];
    } else {
        throw new IllegalArgumentException("Usage: java com.sun.grid.herd.HerdLoadSensor hostname");
    }

    findDatanode();

    LoadSensorManager mgr = new LoadSensorManager(this);

    mgr.parse();

    return 0;
}

From source file:com.sun.kohsuke.hadoop.importer.App.java

License:Open Source License

public static void main(String[] args) throws Exception {
    if (args.length != 3) {
        System.out.println("Usage: java -jar importer.jar [HDFS URL] [local directory] [HDFS directory]");
        System.exit(-1);/*from ww w.  j a  v a  2s.co  m*/
    }

    Configuration conf = new Configuration();
    conf.set("fs.default.name", args[0]);
    DFSClient dfs = new DFSClient(conf);

    File in = new File(args[1]);
    String out = args[2];

    File[] children = in.listFiles(new FileFilter() {
        public boolean accept(File child) {
            return child.isFile();
        }
    });
    if (children == null) {
        System.out.println("No such directory exists: " + in);
        System.exit(-1);
    }
    int cnt = 1;
    for (File f : children) {
        String dest = out + '/' + f.getName();
        FileStatus i = dfs.getFileInfo(dest);
        if (i == null || i.getModificationTime() != f.lastModified() || i.getLen() != f.length()) {
            System.out.printf("(%d/%d) Importing %s\n", cnt, children.length, f);
            try {
                IOUtils.copyBytes(new FileInputStream(f), dfs.create(dest, true), conf);
                dfs.setTimes(dest, f.lastModified(), f.lastModified());
            } catch (RemoteException e) {
                // failure to create
                e.printStackTrace();
            }
        } else {
            System.out.printf("(%d/%d) Skipping %s\n", cnt, children.length, f);
        }
        cnt++;
    }
}

From source file:org.openflamingo.fs.hdfs.HdfsFileSystemProvider.java

License:Apache License

@Override
public Map<String, Object> getFileSystemStatus(String type) {
    Map<String, Object> map = new HashMap();
    DFSClient dfsClient = null;//from  w  w w.  j a  va 2 s  . co m
    try {
        dfsClient = new DFSClient(fs.getConf());
        map.put("canonicalServiceName", fs.getCanonicalServiceName());
        map.put("defaultReplication", fs.getDefaultReplication());
        map.put("defaultBlockSize", fs.getDefaultBlockSize());
        map.put("workingDirectory", fs.getWorkingDirectory().toUri().getPath());
        map.put("homeDirectory", fs.getHomeDirectory().toUri().getPath());
        map.put("corruptBlocksCount", dfsClient.getCorruptBlocksCount());
        map.put("missingBlocksCount", dfsClient.getMissingBlocksCount());
        map.put("underReplicatedBlocksCount", dfsClient.getUnderReplicatedBlocksCount());
        map.put("capacity", dfsClient.getDiskStatus().getCapacity());
        map.put("used", dfsClient.getDiskStatus().getDfsUsed());
        map.put("remaining", dfsClient.getDiskStatus().getRemaining());
        map.put("deadNodes", dfsClient.namenode.getDatanodeReport(FSConstants.DatanodeReportType.DEAD).length);
        map.put("liveNodes", dfsClient.namenode.getDatanodeReport(FSConstants.DatanodeReportType.LIVE).length);
        map.put("humanCapacity", byteDesc(dfsClient.getDiskStatus().getCapacity()));
        map.put("humanUsed", byteDesc(dfsClient.getDiskStatus().getDfsUsed()));
        map.put("humanProgressPercent", formatPercent((double) dfsClient.getDiskStatus().getRemaining()
                / (double) dfsClient.getDiskStatus().getCapacity(), 2));
        map.put("humanProgress", (float) dfsClient.getDiskStatus().getRemaining()
                / (float) dfsClient.getDiskStatus().getCapacity());
        map.put("humanRemaining", byteDesc(dfsClient.getDiskStatus().getRemaining()));
        map.put("humanDefaultBlockSize", byteDesc(fs.getDefaultBlockSize()));
        dfsClient.close();
        return map;
    } catch (Exception ex) {
        throw new FileSystemException(bundle.message("S_FS", "CANNOT_ACCESS_FS_STATUS"), ex);
    } finally {
        IOUtils.closeQuietly(dfsClient);
    }
}

From source file:org.sf.xrime.algorithms.pagerank.PageRankCorrectionMapper.java

License:Apache License

private void recordContinue() throws IOException {
    if (changeFlag) {
        return;//from ww w  .  j  a va  2 s .  com
    }

    changeFlag = true;

    if (continueFile != null) {
        DFSClient client = new DFSClient(job);
        client.mkdirs(continueFile);
        client.close();
    }
}

From source file:util.ResourceFile.java

License:Open Source License

public static InputStream getInputStream(String fileURI) throws IOException {
    if (isHDFSFile(fileURI)) {
        // Meta data file is a file within HDFS
        Configuration conf = new Configuration();
        DFSClient dfsClient = new DFSClient(conf);
        String hdfsFileName = hdfsFileName(fileURI);

        if (!dfsClient.exists(hdfsFileName))
            return null;

        Log.info("Opening HDFS file (" + hdfsFileName + ")");
        return dfsClient.open(hdfsFileName);
    } else {/*from  w w  w . j av a  2  s.co  m*/
        File metaFile = new File(fileURI);
        if (metaFile.exists()) {
            // Meta data file is a file on local file system
            Log.info("Opening local file (" + metaFile + ")");
            return new FileInputStream(metaFile);
        } else {
            // Try meta data file as resource available in jar
            try {
                throw new IOException("foo");
            } catch (IOException e) {
                String className = e.getStackTrace()[1].getClassName();
                try {
                    Class<?> callerClass = Class.forName(className);
                    Log.info("Opening resource file (" + fileURI + ")");
                    Log.info("using class (" + className + ")");
                    return callerClass.getResourceAsStream(fileURI);
                } catch (ClassNotFoundException e1) {
                    e1.printStackTrace();
                    throw new RuntimeException(e1);
                }
            }
        }
    }
}

From source file:util.ResourceFile.java

License:Open Source License

public static OutputStream getOutputStream(String fileURI) throws IOException {
    OutputStream os = null;// ww w . j  av  a2 s. c o  m

    if (isHDFSFile(fileURI)) {
        // Meta data file is a file within HDFS
        Configuration conf = new Configuration();
        DFSClient dfsClient = new DFSClient(conf);
        String hdfsFileName = hdfsFileName(fileURI);

        // Create file in HDFS
        os = dfsClient.create(hdfsFileName, true);
    } else {
        // Create file on the local file system
        File metaFile = new File(fileURI);
        os = new FileOutputStream(metaFile);
    }
    // Create a GZIP File if 'requested'
    if (isGzipFile(fileURI)) {
        os = new GZIPOutputStream(os);
    }
    return os;
}