Example usage for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration.

Prototype

public YarnConfiguration() 

Source Link

Usage

From source file:com.toy.TOYMaster.java

License:Apache License

public TOYMaster() throws Exception {
    this.configuration = new YarnConfiguration();
    final String rm_address = System.getenv(YarnConfiguration.RM_SCHEDULER_ADDRESS);
    LOG.info("RM is @ {}", rm_address);
    configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, rm_address);
    this.fs = FileSystem.get(configuration);
    init();/*w  ww  . j  a v  a  2  s.c o m*/
}

From source file:com.twitter.hraven.hadoopJobMonitor.ClusterStatusChecker.java

License:Apache License

/**
 * Get the app list from RM and check status of each
 *//* ww  w.j  a  v  a  2  s .c  o  m*/
@Override
public void run() {
    // 1. get the list of running apps
    LOG.info("Running " + ClusterStatusChecker.class.getName());
    try {
        YarnConfiguration yConf = new YarnConfiguration();
        LOG.info(yConf.get(YarnConfiguration.RM_ADDRESS));
        LOG.info("Getting appList ...");
        // TODO: in future hadoop API we will be able to filter the app list
        EnumSet<YarnApplicationState> states = EnumSet.of(YarnApplicationState.RUNNING);
        List<ApplicationReport> appList = rmDelegate.getApplications(states);
        LOG.info("appList received. size is: " + appList.size());
        for (ApplicationReport appReport : appList)
            checkAppStatus(appReport);
    } catch (YarnRuntimeException e) {
        LOG.error("Error in getting application list from RM", e);
    } catch (YarnException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:com.twitter.hraven.hadoopJobMonitor.HadoopJobMonitorService.java

License:Apache License

public void init() {
    YarnConfiguration yConf = new YarnConfiguration();
    DefaultMetricsSystem.initialize("HadoopJobMonitor");
    String logDir = System.getProperty("hadoopJobMonitor.log.dir");
    if (logDir == null)
        logDir = "/tmp";
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    try {//from  ww  w  . j a  v a 2 s .  c  o  m
        ObjectName name = new ObjectName(
                "com.twitter.hraven.hadoopJobMonitor.jmx:type=" + WhiteList.class.getSimpleName());
        WhiteList.init(logDir);
        WhiteList mbean = WhiteList.getInstance();
        mbs.registerMBean(mbean, name);
        LOG.error("Current whitelist is: \n" + mbean.getExpirations());
    } catch (Exception e) {
        LOG.fatal("Error in retriving white list from dir " + logDir, e);
    }

    metrics = HadoopJobMonitorMetrics.initSingleton(conf);

    rmDelegate = new ResourceMgrDelegate(yConf);
    clientCache = new ClientCache(conf, rmDelegate);
    AppConfCache.init(conf);
    ProgressCache.init(conf);
    Mail.init(conf);
    Notifier.init(conf);
    clusterCheckerExecutor = Executors
            .newSingleThreadScheduledExecutor(new ClusterStatusChecker.SimpleThreadFactory());
    int concurrentAppCheckers = conf.getInt(HadoopJobMonitorConfiguration.NEW_APP_CHECKER_CONCURRENCY,
            HadoopJobMonitorConfiguration.DEFAULT_NEW_APP_CHECKER_CONCURRENCY);
    appCheckerExecutor = new BlockingExecutor(concurrentAppCheckers,
            new AppStatusChecker.SimpleThreadFactory());
}

From source file:com.yahoo.storm.yarn.MasterServer.java

License:Open Source License

@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    LOG.info("Starting the AM!!!!");

    Options opts = new Options();
    opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used " + "unless for testing purposes");

    CommandLine cl = new GnuParser().parse(opts, args);

    ApplicationAttemptId appAttemptID;//w ww  .j  a v a  2 s  .co  m
    Map<String, String> envs = System.getenv();
    if (cl.hasOption("app_attempt_id")) {
        String appIdStr = cl.getOptionValue("app_attempt_id", "");
        appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
    } else if (envs.containsKey(ApplicationConstants.Environment.CONTAINER_ID.name())) {
        ContainerId containerId = ConverterUtils
                .toContainerId(envs.get(ApplicationConstants.Environment.CONTAINER_ID.name()));
        appAttemptID = containerId.getApplicationAttemptId();
        LOG.info("appAttemptID from env:" + appAttemptID.toString());
    } else {
        LOG.error("appAttemptID is not specified for storm master");
        throw new Exception("appAttemptID is not specified for storm master");
    }

    @SuppressWarnings("rawtypes")
    Map storm_conf = Config.readStormConfig(null);
    Util.rmNulls(storm_conf);

    YarnConfiguration hadoopConf = new YarnConfiguration();

    final String host = InetAddress.getLocalHost().getHostName();
    storm_conf.put("nimbus.host", host);

    StormAMRMClient rmClient = new StormAMRMClient(appAttemptID, storm_conf, hadoopConf);
    rmClient.init(hadoopConf);
    rmClient.start();

    BlockingQueue<Container> launcherQueue = new LinkedBlockingQueue<Container>();

    MasterServer server = new MasterServer(storm_conf, rmClient);
    try {
        final int port = Utils.getInt(storm_conf.get(Config.MASTER_THRIFT_PORT));
        final String target = host + ":" + port;
        InetSocketAddress addr = NetUtils.createSocketAddr(target);
        RegisterApplicationMasterResponse resp = rmClient.registerApplicationMaster(addr.getHostName(), port,
                null);
        LOG.info("Got a registration response " + resp);
        LOG.info("Max Capability " + resp.getMaximumResourceCapability());
        rmClient.setMaxResource(resp.getMaximumResourceCapability());
        LOG.info("Starting HB thread");
        server.initAndStartHeartbeat(rmClient, launcherQueue,
                (Integer) storm_conf.get(Config.MASTER_HEARTBEAT_INTERVAL_MILLIS));
        LOG.info("Starting launcher");
        initAndStartLauncher(rmClient, launcherQueue);
        rmClient.startAllSupervisors();
        LOG.info("Starting Master Thrift Server");
        server.serve();
        LOG.info("StormAMRMClient::unregisterApplicationMaster");
        rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "AllDone", null);
    } finally {
        if (server.isServing()) {
            LOG.info("Stop Master Thrift Server");
            server.stop();
        }
        LOG.info("Stop RM client");
        rmClient.stop();
    }
    System.exit(0);
}

From source file:com.yahoo.storm.yarn.StormOnYarn.java

License:Open Source License

private StormOnYarn(ApplicationId appId, @SuppressWarnings("rawtypes") Map stormConf) {
    _hadoopConf = new YarnConfiguration();
    _yarn = YarnClient.createYarnClient();
    _stormConf = stormConf;/*from  www.  j  av a  2  s. c o m*/
    _appId = appId;
    _yarn.init(_hadoopConf);
    _yarn.start();
}

From source file:com.yahoo.storm.yarn.TestIntegration.java

License:Open Source License

@SuppressWarnings({ "rawtypes", "unchecked" })
@BeforeClass/*from   www.j  a v  a2  s.  c om*/
public static void setup() {
    try {
        zkServer = new EmbeddedZKServer();
        zkServer.start();

        LOG.info("Starting up MiniYARN cluster");
        if (yarnCluster == null) {
            yarnCluster = new MiniYARNCluster(TestIntegration.class.getName(), 2, 1, 1);
            Configuration conf = new YarnConfiguration();
            conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
            conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 2 * 1024);
            yarnCluster.init(conf);
            yarnCluster.start();
        }
        sleep(2000);

        Configuration miniyarn_conf = yarnCluster.getConfig();
        yarn_site_xml = testConf.createYarnSiteConfig(miniyarn_conf);

        storm_home = testConf.stormHomePath();
        LOG.info("Will be using storm found on PATH at " + storm_home);

        //create a storm configuration file with zkport 
        final Map storm_conf = Config.readStormConfig();
        storm_conf.put(backtype.storm.Config.STORM_ZOOKEEPER_PORT, zkServer.port());
        storm_conf_file = testConf.createConfigFile(storm_conf);

        List<String> cmd = java.util.Arrays.asList("bin/storm-yarn", "launch", storm_conf_file.toString(),
                "--stormZip", "lib/storm.zip", "--appname", "storm-on-yarn-test", "--output",
                "target/appId.txt");
        execute(cmd);

        //wait for Storm cluster to be fully luanched
        sleep(15000);

        BufferedReader reader = new BufferedReader(new FileReader("target/appId.txt"));
        appId = reader.readLine();
        reader.close();
        if (appId != null)
            appId = appId.trim();
        LOG.info("application ID:" + appId);
    } catch (Exception ex) {
        LOG.error("setup failure", ex);
        Assert.assertEquals(null, ex);
    }
}

From source file:com.yahoo.storm.yarn.TestStormMaster.java

License:Open Source License

@SuppressWarnings({ "unchecked", "rawtypes" })
@BeforeClass//  w w w.  j  a v  a  2s  .  c o  m
public static void setup() throws InterruptedException, IOException {
    //start embedded ZK server
    zkServer = new EmbeddedZKServer();
    zkServer.start();

    //simple configuration
    final Map storm_conf = Config.readStormConfig("src/main/resources/master_defaults.yaml");
    storm_conf.put(backtype.storm.Config.STORM_ZOOKEEPER_PORT, zkServer.port());

    String storm_home = testConf.stormHomePath();
    if (storm_home == null) {
        throw new RuntimeException("Storm home was not found." + "  Make sure to include storm in the PATH.");
    }
    LOG.info("Will be using storm found on PATH at " + storm_home);

    final YarnConfiguration hadoopConf = new YarnConfiguration();
    ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class);

    StormAMRMClient client = new StormAMRMClient(appAttemptId, storm_conf, hadoopConf);

    LOG.info("Storm server attaching to port: " + storm_conf.get(Config.MASTER_THRIFT_PORT));
    //launch server
    server = new MasterServer(storm_conf, client);
    new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                server.serve();
            } catch (Exception e) {
                LOG.error(e);
            }
        }
    }).start();
    while (!server.isServing())
        Thread.sleep(10);
    LOG.info("Storm server started at port: " + storm_conf.get(Config.MASTER_THRIFT_PORT));

    //launch client
    TestStormMaster.client = MasterClient.getConfiguredClient(storm_conf);
}

From source file:com.yss.yarn.launch.APPLaunch.java

License:Open Source License

private APPLaunch(ApplicationId appId, Map<String, String> yarnConf) {
    Configuration conf = null;/*w  w  w  .j av  a  2s.c  o  m*/

    if ((yarnConf != null) && !yarnConf.isEmpty()) {
        conf = new Configuration();

        for (String key : yarnConf.keySet()) {
            conf.set(key, yarnConf.get(key));
        }
    }

    _hadoopConf = (conf == null) ? new YarnConfiguration() : new YarnConfiguration(conf);
    _yarn = YarnClient.createYarnClient();
    _appId = appId;
    _yarn.init(_hadoopConf);
    _yarn.start();
}

From source file:com.zqh.hadoop.moya.core.yarn.ApplicationMaster.java

License:Apache License

public ApplicationMaster() throws Exception {
    // Set up the configuration and RPC
    conf = new YarnConfiguration();
}

From source file:edu.cmu.graphchi.toolkits.collaborative_filtering.yarn.ApplicationMaster.java

License:Apache License

/**
 * Parse command line options//from  w ww .j  a va  2s .com
 *
 * @param args Command line args
 * @return Whether init successful and run should be invoked
 * @throws ParseException
 * @throws IOException
*/
public boolean init(String[] args) throws ParseException, IOException {
    LOG.info("Inside Init");
    this.setup = new ProblemSetup(args);
    Configuration conf = new YarnConfiguration();
    this.yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);

    Map<String, String> envs = System.getenv();

    ContainerId containerId = ConverterUtils.toContainerId(envs.get(Environment.CONTAINER_ID.name()));
    appAttemptID = containerId.getApplicationAttemptId();

    if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
        throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HOST.name())) {
        throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
        throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_PORT.name())) {
        throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment");
    }
    LOG.info("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId()
            + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId="
            + appAttemptID.getAttemptId());

    DataSetDescription dataDesc = new DataSetDescription();
    dataDesc.loadFromJsonFile(setup.dataMetadataFile);
    this.recommenders = RecommenderFactory.buildRecommenders(dataDesc, setup.paramFile, null, setup);

    return true;
}