Example usage for org.apache.hadoop.hdfs DFSClient DFSClient

List of usage examples for org.apache.hadoop.hdfs DFSClient DFSClient

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSClient DFSClient.

Prototype

public DFSClient(URI nameNodeUri, Configuration conf) throws IOException 

Source Link

Document

Same as this(nameNodeUri, conf, null);

Usage

From source file:MStress_Client.java

License:Open Source License

public static void main(String args[]) {
    parseOptions(args);//  w  ww .  j  a v  a 2s .c  om
    int result = 0;

    try {
        Configuration conf = new Configuration(true);
        String confSet = "hdfs://" + dfsServer_ + ":" + dfsPort_;
        conf.set("fs.default.name", confSet);
        conf.set("fs.trash.interval", "0");
        InetSocketAddress inet = new InetSocketAddress(dfsServer_, dfsPort_);
        dfsClient_ = new DFSClient(inet, conf);

        if (parsePlanFile() < 0) {
            System.exit(-1);
        }

        if (testName_.equals("create")) {
            result = createDFSPaths();
        } else if (testName_.equals("stat")) {
            result = statDFSPaths();
        } else if (testName_.equals("readdir")) {
            result = listDFSPaths();
        } else if (testName_.equals("delete")) {
            result = removeDFSPaths();
        } else {
            System.out.printf("Error: unrecognized test \'%s\'\n", testName_);
            System.exit(-1);
        }
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(-1);
    }

    if (result != 0) {
        System.exit(-1);
    }

    return;
}

From source file:INotifyUtil.java

License:Apache License

/**
 * Poll events and output the details.//from  w  w  w .ja  v  a 2 s .co  m
 * Ctrl + C to stop polling.
 * @param args the parameter is not used.
 * @throws IOException if configuration error or I/O error happens.
 */
public static void main(String args[]) throws IOException {
    Configuration conf = new HdfsConfiguration();
    DFSClient client = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
    DFSInotifyEventInputStream iStream = client.getInotifyEventStream();
    while (true) {
        try {
            EventBatch eventBatch = iStream.take();
            for (Event event : eventBatch.getEvents()) {
                System.out.println(event.toString());
            }
        } catch (InterruptedException e) {
            System.out.println("Interrupted. Exiting...");
            return;
        } catch (MissingEventsException e) {
            e.printStackTrace();
            return;
        }
    }
}

From source file:ch.cern.db.hdfs.DistributedFileSystemMetadata.java

License:GNU General Public License

public HashMap<String, Integer> getNumberOfDataDirsPerHost() {
    HashMap<String, Integer> disksPerHost = new HashMap<>();

    try {/* w w w  . ja va 2  s . c o  m*/
        @SuppressWarnings("resource")
        DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());

        DatanodeStorageReport[] datanodeStorageReports = dfsClient
                .getDatanodeStorageReport(DatanodeReportType.ALL);

        for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
            disksPerHost.put(datanodeStorageReport.getDatanodeInfo().getHostName(),
                    datanodeStorageReport.getStorageReports().length);

        }
    } catch (IOException e) {
        LOG.warn(
                "number of data directories (disks) per node could not be collected (requieres higher privilegies).");
    }

    return disksPerHost;
}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Map<String, String> getDataNodeNodeManagerMapping(Configuration conf) throws Exception {
    Map<String, String> map = new HashMap<String, String>();
    DFSClient dfsClient = new DFSClient(new URI(conf.get("fs.defaultFS")), conf);
    DatanodeInfo[] DNs = dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.ALL);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);// ww w  .j  av a 2  s  .  c  o m
    yarnClient.start();
    List<NodeId> nodeIds = getYarnNodeIds(conf);
    if (nodeIds.size() != DNs.length) {
        throw new RuntimeException("Number of DNs and NMs differ, MiniLlama "
                + "node mapping requires them to be equal at startup");
    }
    LOG.info("HDFS/YARN mapping:");
    for (int i = 0; i < DNs.length; i++) {
        String key = DNs[i].getXferAddr();
        NodeId nodeId = nodeIds.get(i);
        String value = nodeId.getHost() + ":" + nodeId.getPort();
        map.put(key, value);
        LOG.info("  DN/NM: " + key + "/" + value);
    }
    yarnClient.stop();
    nodes = map.size();
    verifySingleHost(map.keySet(), "DataNode");
    verifySingleHost(map.values(), "NodeManager");
    return map;
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/** Wait until the given namenode gets registration from all the datanodes */
public void waitActive(int nnIndex) throws IOException {
    if (nameNodes.length == 0 || nameNodes[nnIndex] == null || nameNodes[nnIndex].nameNode == null) {
        return;//from  w w w  .j  a  v a2s.  c  om
    }
    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
    assert addr.getPort() != 0;
    DFSClient client = new DFSClient(addr, conf);

    // ensure all datanodes have registered and sent heartbeat to the namenode
    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
        try {
            LOG.info("Waiting for cluster to become active");
            Thread.sleep(100);
        } catch (InterruptedException e) {
        }
    }

    client.close();
}

From source file:com.pinterest.terrapin.hadoop.HdfsUploader.java

License:Apache License

public HdfsUploader(TerrapinUploaderOptions uploaderOptions, String absoluteHdfsDir)
        throws IOException, URISyntaxException {
    super(uploaderOptions);
    Path hdfsPathTmp = new Path(absoluteHdfsDir);
    URI namenodeUri = new URI(hdfsPathTmp.toUri().getScheme(), hdfsPathTmp.toUri().getAuthority(), null, null);
    this.dfsClient = new DFSClient(namenodeUri, new Configuration());
    this.hdfsDir = new Path(hdfsPathTmp.toUri().getPath());
}

From source file:com.qfs.mstress.MStress_Client.java

License:Apache License

public static void main(String args[]) {
    parseOptions(args);/*from ww w  .j  ava2  s.  c  o m*/
    int result = 0;

    try {
        Configuration conf = new Configuration(true);
        String confSet = "hdfs://" + dfsServer_ + ":" + dfsPort_;
        conf.set("fs.default.name", confSet);
        conf.set("fs.trash.interval", "0");
        InetSocketAddress inet = new InetSocketAddress(dfsServer_, dfsPort_);
        dfsClient_ = new DFSClient(inet, conf);

        if (parsePlanFile() < 0) {
            System.exit(-1);
        }

        if (testName_.equals("create")) {
            result = createDFSPaths();
        } else if (testName_.equals("stat")) {
            result = statDFSPaths();
        } else if (testName_.equals("readdir")) {
            result = listDFSPaths();
        } else if (testName_.equals("delete")) {
            result = removeDFSPaths();
        } else {
            System.out.printf("Error: unrecognized test \'%s\'\n", testName_);
            System.exit(-1);
        }

        dfsClient_.close();

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(-1);
    }

    if (result != 0) {
        System.exit(-1);
    }

    return;
}

From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java

License:Apache License

public static String[] getDataNodes(Configuration conf) throws IOException {
    List<String> datanodes = new ArrayList<String>();
    DFSClient client = new DFSClient(NameNode.getAddress(conf), conf);
    DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
    for (DatanodeInfo nodeinfo : datanodeReport) {
        datanodes.add(nodeinfo.getHostName().trim());
    }//  w ww.j  av  a2s.  c  o m

    return datanodes.toArray(new String[0]);
}

From source file:hsyndicate.hadoop.utils.DFSNodeInfoUtils.java

License:Apache License

public static String getDataNodesCommaSeparated(Configuration conf) throws IOException {
    StringBuilder sb = new StringBuilder();
    DFSClient client = new DFSClient(NameNode.getAddress(conf), conf);
    DatanodeInfo[] datanodeReport = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
    for (DatanodeInfo nodeinfo : datanodeReport) {
        if (sb.length() != 0) {
            sb.append(",");
        }/*from   w ww .jav  a 2 s .  c o  m*/
        sb.append(nodeinfo.getHostName().trim());
    }

    return sb.toString();
}

From source file:hudson.gridmaven.gridlayer.PluginImpl.java

License:Open Source License

/**
 * Connects to this HDFS.//w  ww .java  2  s .  com
 */
public DFSClient createDFSClient() throws IOException {
    return new DFSClient(getHdfsAddress(), new Configuration(false));
}