Example usage for org.apache.hadoop.util StringUtils startupShutdownMessage

List of usage examples for org.apache.hadoop.util StringUtils startupShutdownMessage

Introduction

In this page you can find the example usage for org.apache.hadoop.util StringUtils startupShutdownMessage.

Prototype

static void startupShutdownMessage(Class<?> clazz, String[] args, final LogAdapter LOG) 

Source Link

Usage

From source file:common.DataNode.java

License:Apache License

/**
 *///from w  w w.j a v a2s.c o m
public static void main(String args[]) {
    try {
        StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
        DataNode datanode = createDataNode(args, null);
        if (datanode != null)
            datanode.join();
    } catch (Throwable e) {
        LOG.error(StringUtils.stringifyException(e));
        System.exit(-1);
    }
}

From source file:common.NameNode.java

License:Apache License

/**
 *//* w ww .  ja  va  2  s. c  o m*/
public static void main(String argv[]) throws Exception {
    try {
        StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
        NameNode namenode = createNameNode(argv, null);
        if (namenode != null)
            namenode.join();
    } catch (Throwable e) {
        LOG.error(StringUtils.stringifyException(e));
        System.exit(-1);
    }
}

From source file:org.apache.hama.BSPMasterRunner.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    StringUtils.startupShutdownMessage(BSPMaster.class, args, LOG);

    if (args.length != 0) {
        System.out.println("usage: BSPMasterRunner");
        System.exit(-1);//from   w  w  w .ja va  2s  . c o  m
    }

    try {
        HamaConfiguration conf = new HamaConfiguration();
        BSPMaster master = BSPMaster.startMaster(conf);
        master.offerService();
    } catch (Throwable e) {
        LOG.fatal(StringUtils.stringifyException(e));
        return -1;
    }
    return 0;
}

From source file:org.apache.hama.GroomServerRunner.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    StringUtils.startupShutdownMessage(GroomServer.class, args, LOG);

    try {/* ww w. j av  a 2 s  . c o  m*/
        HamaConfiguration conf = new HamaConfiguration();

        if (args.length == 1) {
            conf.set("bsp.peer.hostname", args[0]);
        }

        GroomServer groom = GroomServer.constructGroomServer(GroomServer.class, conf);
        GroomServer.startGroomServer(groom).join();
    } catch (Throwable e) {
        LOG.fatal(StringUtils.stringifyException(e));
        return -1;
    }

    return 0;
}

From source file:org.commoncrawl.server.CommonCrawlServer.java

License:Open Source License

public static void main(String argv[]) throws Exception {

    try {//ww  w  .ja va2  s . c  o m
        Configuration conf = new Configuration();

        conf.addResource("nutch-default.xml");
        conf.addResource("nutch-site.xml");
        conf.addResource("core-site.xml");
        conf.addResource("mapred-site.xml");
        conf.addResource("hdfs-site.xml");
        // conf.addResource("hadoop-site.xml");
        // conf.addResource("commoncrawl-default.xml");
        // conf.addResource("commoncrawl-site.xml");

        /*
         * conf.setClassLoader( new ClassLoader() {
         * 
         * @Override protected Class<?> findClass(String name) throws
         * ClassNotFoundException { if (name.startsWith("org.crawlcommons")) { //
         * this is a hack to deal with the problem of having a bunch of serialized
         * data in hdfs sequence files that referes to // protocol buffer with the
         * old crawler package name of org.crawlcommons instead of the new package
         * name of org.commoncrawl // we replace the crawlcommons string and call
         * back into the class loader to re-resolve the name... name =
         * name.replaceFirst("org.crawlcommons", "org.commoncrawl"); return
         * loadClass(name); }
         * 
         * return super.findClass(name); }
         * 
         * });
         */

        // set this config object as our global config
        CrawlEnvironment.setHadoopConfig(conf);

        _commonConfig = parseCommonConfig(argv);

        if (_commonConfig._className == null) {
            printCommonUsage();
            return;
        }

        if (_commonConfig._hostName != null) {
            conf.set("org.commoncrawl.hostname", _commonConfig._hostName);
        }

        if (_commonConfig._rpcInterface != null) {
            conf.set("org.commoncrawl.rpcInterface", _commonConfig._rpcInterface);
        }

        if (_commonConfig._webInterface != null) {
            conf.set("org.commoncrawl.httpInterface", _commonConfig._webInterface);
        }

        if (_commonConfig._rpcPort != -1) {
            conf.setInt("org.commoncrawl.rpcPort", _commonConfig._rpcPort);
        }
        if (_commonConfig._webPort != -1) {
            conf.setInt("org.commoncrawl.httpPort", _commonConfig._webPort);
        }
        if (_commonConfig._dataDir != null) {
            conf.set("org.commoncrawl.dataDir", _commonConfig._dataDir);
        }
        if (_commonConfig._dnsThreadPoolSize != -1) {
            conf.setInt("org.commoncrawl.dnsThreadPoolSize", _commonConfig._dnsThreadPoolSize);
        }
        LOG.info("Log File Is:" + System.getProperty("commoncrawl.log.file"));

        LOG.info("Instantiating Class:" + _commonConfig._className);
        Class theClass = conf.getClassByName(_commonConfig._className);

        Object serverInstance = theClass.newInstance();
        CommonCrawlServer server = CommonCrawlServer.class.cast(serverInstance);

        StringUtils.startupShutdownMessage(theClass, argv, LOG);

        if (server.init(argv, conf)) {
            try {

                server.start();
                server.join();

            } catch (IOException e) {
                LOG.error(StringUtils.stringifyException(e));
                throw e;
            } finally {
                server.stopDaemons();
                server.stop();
            }
        }

    } catch (Throwable e) {
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        System.exit(-1);
    }
}

From source file:org.cstor.cproc.cloudComputingFramework.CProcFramework.java

License:Apache License

public static void main(String argv[]) throws Exception {

    LOG.info("CProcFramework  version for hadoop-0.20.2-patch : 32");

    //GCThreadGC/* w  w  w.  java  2 s  . co  m*/
    Thread th = new Thread(new GCThread());
    th.start();

    //LOG.info("CProcFramework [NameNode IP:Port]");
    StringUtils.startupShutdownMessage(CProcFramework.class, argv, LOG);
    //hdfs-site.xml,core-site.xml
    CProcConfiguration cprocConf = CProcConfiguration.getCProcConfiguration();
    RpcConfiguration rpcConf = RpcConfiguration.getRpcConfiguration();
    setParamToRpcConf(cprocConf, rpcConf);

    CProcFramework cpf = null;
    ClientProtocol nameNode = null;
    ClientDatanodeProtocol dataNode = null;
    InetSocketAddress dataNodeAddr = null;
    InetSocketAddress nameNodeAddr = null;
    try {

        boolean isNN = true;

        //LOG.info("conf.get(\"fs.default.name\") == " + conf.get("cProc.name.") );
        //wzt ??? ?? net0 192.168.139.21 9000
        if (argv.length >= 3) {
            network = argv[0];
            namenodeAddress = argv[1];
            port = argv[2];
        } else {

            LOG.info("CProcFramework [network adapter] [namenode Address]");
            System.exit(0);

        }
        LOG.info("network = " + network);

        try {
            nameNodeAddr = NetUtils.createSocketAddr(namenodeAddress);
        } catch (IllegalArgumentException e) {
            //nameNodeAddr = NetUtils.createSocketAddr(argv[0]);
        }

        LOG.info("NameNodeAddr = " + nameNodeAddr);

        //-----------------------get all DN and get conf of NN----------------------------------

        ArrayList<String> DNIP = new ArrayList<String>();

        // Configuration confInFunc = new Configuration();

        nameNode = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, nameNodeAddr,
                rpcConf, NetUtils.getSocketFactory(rpcConf, ClientProtocol.class));

        DatanodeDescriptor[] dds = nameNode.getAllDatanode();

        // LOG.info("==========");
        for (DatanodeDescriptor dd : dds) {
            LOG.info(dd.getHost());
            //LOG.info(dd.getHostName());
            //LOG.info(dd.getDatanodeReport());
            DNIP.add(dd.getHost());
        }
        //LOG.info("==========");
        //conf = nameNode.getConfiguration();
        setParamToCprocConf(nameNode.getConfiguration(), rpcConf);

        LOG.info("getHostIP.getLocalIP() = " + getHostIPcProc.getLocalIP(network));

        //-----------------------if this node is a DN get conf----------------------------------

        if (DNIP.contains(getHostIPcProc.getLocalIP(network))) {
            dataNodeAddr = NetUtils
                    .createSocketAddr("127.0.0.1:" + rpcConf.get("dfs.datanode.ipc.address").split(":")[1]);

            dataNode = (ClientDatanodeProtocol) RPC.getProxy(ClientDatanodeProtocol.class,
                    ClientDatanodeProtocol.versionID, dataNodeAddr, rpcConf,
                    NetUtils.getSocketFactory(rpcConf, ClientDatanodeProtocol.class));

            //conf = dataNode.getConfiguration();
            LOG.info("This is DataNode!!");
            isNN = false;
        } else {
            LOG.info("This is NameNode!!");
            isNN = true;
        }

        cpf = createCProcFramework(nameNode, dataNode, cprocConf, rpcConf, isNN, nameNodeAddr);

        cpf.waitForStop();

    } catch (Throwable e) {
        LOG.error(StringUtils.stringifyException(e));
        System.exit(-1);
    } finally {
        //20131210RPC
        //          if(nameNode != null){
        //             RPC.stopProxy(nameNode);
        //          }
        //          if(dataNode != null){
        //             RPC.stopProxy(dataNode);
        //          }
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

/**
 * Start the JobTracker process. This is used only for debugging. As a rule,
 * JobTracker should be run as part of the DFS Namenode process.
 *///from  w  ww.j a  va 2 s . c o m
public static void main(String argv[]) throws IOException, InterruptedException {
    StringUtils.startupShutdownMessage(STJobTracker.class, argv, LOG);

    try {
        if (argv.length == 0) {
            STJobTracker tracker = startTracker(new JobConf());
            tracker.offerService();
        } else {
            if ("-dumpConfiguration".equals(argv[0]) && argv.length == 1) {
                dumpConfiguration(new PrintWriter(System.out));
                System.out.println();
            } else {
                System.out.println("usage: JobTracker [-dumpConfiguration]");
                System.exit(-1);
            }
        }
    } catch (Throwable e) {
        LOG.fatal(StringUtils.stringifyException(e));
        System.exit(-1);
    }
}