Example usage for org.apache.hadoop.yarn.client.api YarnClient createYarnClient

List of usage examples for org.apache.hadoop.yarn.client.api YarnClient createYarnClient

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.client.api YarnClient createYarnClient.

Prototype

@Public
public static YarnClient createYarnClient() 

Source Link

Document

Create a new instance of YarnClient.

Usage

From source file:hadoop.yarn.distributedshell.DshellClient.java

License:Apache License

DshellClient(String appMasterMainClass, Configuration conf) {
    this.conf = conf;
    this.appMasterMainClass = appMasterMainClass;
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);//from  w  w w  .  j  av a2  s  . c  o m
    opts = new Options();
    opts.addOption("appname", true, "Application Name. Default value - DistributedShell");
    opts.addOption("priority", true, "Application Priority. Default 0");
    opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
    opts.addOption("timeout", true, "Application timeout in milliseconds");
    opts.addOption("master_memory", true,
            "Amount of memory in MB to be requested to run the application master");
    opts.addOption("master_vcores", true,
            "Amount of virtual cores to be requested to run the application master");
    opts.addOption("jar", true, "Jar file containing the application master");
    // 
    opts.addOption("container_files", true, "The files that containers will run .  Separated by comma");
    opts.addOption("container_archives", true, "The archives that containers will unzip.  Separated by comma");
    // 
    opts.addOption("shell_command", true, "Shell command to be executed by "
            + "the Application Master. Can only specify either --shell_command " + "or --shell_script");
    opts.addOption("shell_script", true, "Location of the shell script to be "
            + "executed. Can only specify either --shell_command or --shell_script");
    opts.addOption("shell_args", true,
            "Command line args for the shell script." + "Multiple args can be separated by empty space.");
    opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES);
    opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs");
    opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers");
    opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command");
    opts.addOption("container_vcores", true,
            "Amount of virtual cores to be requested to run the shell command");
    // opts.addOption("num_containers", true,
    // "No. of containers on which the shell command needs to be executed");//container?1
    opts.addOption("log_properties", true, "log4j.properties file");
    opts.addOption("keep_containers_across_application_attempts", false,
            "Flag to indicate whether to keep containers across application attempts."
                    + " If the flag is true, running containers will not be killed when"
                    + " application attempt fails and these containers will be retrieved by"
                    + " the new application attempt ");
    opts.addOption("debug", false, "Dump out debug information");
    opts.addOption("help", false, "Print usage");
}

From source file:husky.client.HuskyYarnClient.java

License:Apache License

public HuskyYarnClient() throws IOException {
    mYarnConf = new YarnConfiguration();
    mFileSystem = FileSystem.get(mYarnConf);
    mYarnClient = YarnClient.createYarnClient();
    mYarnClient.init(mYarnConf);//  w w w  .j a v  a  2s  . co m
}

From source file:hws.core.JobClient.java

License:Apache License

public void run(String[] args) throws Exception {
    //final String command = args[0];
    //final int n = Integer.valueOf(args[1]);
    //final Path jarPath = new Path(args[2]);
    Options options = new Options();
    /*options.addOption(OptionBuilder.withLongOpt("jar")
                           .withDescription( "Jar path" )
                           .hasArg()/*  w ww  .  ja v  a2s.co m*/
                           .withArgName("JarPath")
                           .create());
    options.addOption(OptionBuilder.withLongOpt("scheduler")
                           .withDescription( "Scheduler class name" )
                           .hasArg()
                           .withArgName("ClassName")
                           .create());
    */options.addOption(OptionBuilder.withLongOpt("zk-servers")
            .withDescription("List of the ZooKeeper servers").hasArgs().withArgName("zkAddrs").create("zks"));
    //options.addOption("l", "list", false, "list modules");
    options.addOption(OptionBuilder.withLongOpt("load").withDescription("load new modules").hasArgs()
            .withArgName("XMLFiles").create());
    /*options.addOption(OptionBuilder.withLongOpt( "remove" )
                           .withDescription( "remove modules" )
                           .hasArgs()
                           .withArgName("ModuleNames")
                           .create("rm"));
    */CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);

    //Path jarPath = null;
    //String schedulerClassName = null;
    String[] xmlFileNames = null;
    //String []moduleNames = null;
    String zksArgs = "";
    String[] zkServers = null;
    if (cmd.hasOption("zks")) {
        zksArgs = "-zks";
        zkServers = cmd.getOptionValues("zks");
        for (String zks : zkServers) {
            zksArgs += " " + zks;
        }
    }

    //Logger setup
    //FSDataOutputStream writer = FileSystem.get(conf).create(new Path("hdfs:///hws/apps/"+appIdStr+"/logs/jobClient.log"));
    //Logger.addOutputStream(writer);

    /*if(cmd.hasOption("l")){
       LOG.warn("Argument --list (-l) is not supported yet.");
    }
    if(cmd.hasOption("jar")){
       jarPath = new Path(cmd.getOptionValue("jar")); 
    }
    if(cmd.hasOption("scheduler")){
       schedulerClassName = cmd.getOptionValue("scheduler");
    }*/
    if (cmd.hasOption("load")) {
        xmlFileNames = cmd.getOptionValues("load");
    } /*else if(cmd.hasOption("rm")){
        moduleNames = cmd.getOptionValues("rm");
      }*/

    //LOG.info("Jar-Path "+jarPath);
    if (xmlFileNames != null) {
        String paths = "";
        for (String path : xmlFileNames) {
            paths += path + "; ";
        }
        LOG.info("Load XMLs: " + paths);
    }
    /*if(moduleNames!=null){
       String modules = "";
       for(String module: moduleNames){
          modules += module+"; ";
       }
       LOG.info("remove: "+modules);
    }*/
    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    System.out.println("LOG Path: " + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    ZkClient zk = new ZkClient(zkServers[0]); //TODO select a ZooKeeper server
    if (!zk.exists("/hadoop-watershed")) {
        zk.createPersistent("/hadoop-watershed", "");
    }
    zk.createPersistent("/hadoop-watershed/" + appId.toString(), "");

    FileSystem fs = FileSystem.get(conf);

    LOG.info("Collecting files to upload");
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString()));
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString() + "/logs"));

    ModulePipeline modulePipeline = ModulePipeline.fromXMLFiles(xmlFileNames);
    LOG.info("Uploading files to HDFS");
    for (String path : modulePipeline.files()) {
        uploadFile(fs, new File(path), appId);
    }
    LOG.info("Upload finished");

    String modulePipelineJson = Json.dumps(modulePipeline);
    String modulePipelineBase64 = Base64.encodeBase64String(StringUtils.getBytesUtf8(modulePipelineJson))
            .replaceAll("\\s", "");
    LOG.info("ModulePipeline: " + modulePipelineJson);
    //LOG.info("ModulePipeline: "+modulePipelineBase64);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " hws.core.JobMaster" + " -aid " + appId.toString() + " --load " + modulePipelineBase64 + " "
            + zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    //LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    //setupAppMasterJar(jarPath, appMasterJar);
    //amContainer.setLocalResources(Collections.singletonMap("hws.jar", appMasterJar));

    LOG.info("Listing files for YARN-Watershed");
    RemoteIterator<LocatedFileStatus> filesIterator = fs.listFiles(new Path("hdfs:///hws/bin/"), false);
    Map<String, LocalResource> resources = new HashMap<String, LocalResource>();
    LOG.info("Files setup as resource");
    while (filesIterator.hasNext()) {
        LocatedFileStatus fileStatus = filesIterator.next();
        // Setup jar for ApplicationMaster
        LocalResource containerJar = Records.newRecord(LocalResource.class);
        ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
        resources.put(fileStatus.getPath().getName(), containerJar);
    }
    LOG.info("container resource setup");
    amContainer.setLocalResources(resources);

    fs.close(); //closing FileSystem interface

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    ContainerUtils.setupContainerEnv(appMasterEnv, conf);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    //ApplicationSubmissionContext appContext = 
    //app.getApplicationSubmissionContext();
    appContext.setApplicationName("Hadoop-Watershed"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue 

    // Submit application
    LOG.info("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for containers to finish");
    zk.waitUntilExists("/hadoop-watershed/" + appId.toString() + "/done", TimeUnit.MILLISECONDS, 250);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

    System.out.println("deleting " + appId.toString() + " znode");
    zk.deleteRecursive("/hadoop-watershed/" + appId.toString()); //TODO remove app folder from ZooKeeper
}

From source file:io.hops.hopsworks.api.zeppelin.rest.InterpreterRestApi.java

License:Apache License

private List<JobAdministration.YarnApplicationReport> fetchJobs() {
    JobAdministration jobAdmin = new JobAdministration();
    List<JobAdministration.YarnApplicationReport> reports = new ArrayList<>();
    YarnClient client = YarnClient.createYarnClient();
    Configuration conf = settings.getConfiguration();
    client.init(conf);/*from w w  w .jav a  2s.co m*/
    client.start();
    try {
        //Create our custom YarnApplicationReport Pojo
        for (ApplicationReport appReport : client.getApplications(PREDICATE)) {
            reports.add(jobAdmin.new YarnApplicationReport(appReport.getApplicationId().toString(),
                    appReport.getName(), appReport.getUser(), appReport.getStartTime(),
                    appReport.getFinishTime(), appReport.getApplicationId().getClusterTimestamp(),
                    appReport.getApplicationId().getId(), appReport.getYarnApplicationState().name()));
        }
    } catch (YarnException | IOException ex) {
        logger.error("", ex);
    }
    return reports;
}

From source file:io.hops.hopsworks.api.zeppelin.rest.InterpreterRestApi.java

License:Apache License

private List<JobAdministration.YarnApplicationReport> fetchJobs(String username) {
    JobAdministration jobAdmin = new JobAdministration();
    List<JobAdministration.YarnApplicationReport> reports = new ArrayList<>();
    YarnClient client = YarnClient.createYarnClient();
    Configuration conf = settings.getConfiguration();
    client.init(conf);/*from  w  w  w .  ja va  2 s  . c  om*/
    client.start();
    try {
        //Create our custom YarnApplicationReport Pojo
        for (ApplicationReport appReport : client.getApplications(PREDICATE)) {
            if (username.equals(appReport.getUser())) {
                reports.add(jobAdmin.new YarnApplicationReport(appReport.getApplicationId().toString(),
                        appReport.getName(), appReport.getUser(), appReport.getStartTime(),
                        appReport.getFinishTime(), appReport.getApplicationId().getClusterTimestamp(),
                        appReport.getApplicationId().getId(), appReport.getYarnApplicationState().name()));
            }
        }
    } catch (YarnException | IOException ex) {
        logger.error("", ex);
    }
    return reports;
}

From source file:io.hops.hopsworks.common.jobs.flink.AbstractYarnClusterDescriptor.java

License:Apache License

/**
 * Gets a Hadoop Yarn client//from   w w w  . ja  v a2 s  .c  o  m
 *
 * @return Returns a YarnClient which has to be shutdown manually
 */
protected YarnClient getYarnClient() {
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    return yarnClient;
}

From source file:io.hops.hopsworks.common.yarn.YarnClientWrapper.java

License:Open Source License

public YarnClientWrapper get() {
    if (yarnClient == null) {
        yarnClient = YarnClient.createYarnClient();
        yarnClient.init(conf);//  ww  w .  ja va 2  s .  c o  m
        yarnClient.start();
    }

    return this;
}

From source file:io.hops.tensorflow.Client.java

License:Apache License

Client(String appMasterMainClass, Configuration conf) {
    this.conf = conf;
    this.appMasterMainClass = appMasterMainClass;
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);/*w w w  . j  a va 2  s.c  o m*/
    opts = createOptions();
}

From source file:MasteringYarn.DistributedShellClient.java

public void run(String[] args) throws YarnException, IOException, InterruptedException {

    YarnConfiguration yarnConfiguration = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfiguration);/*ww  w. j a va2 s  . c  o m*/
    yarnClient.start();

    YarnClientApplication yarnClientApplication = yarnClient.createApplication();

    //container launch context for application master
    ContainerLaunchContext applicationMasterContainer = Records.newRecord(ContainerLaunchContext.class);
    applicationMasterContainer.setCommands(
            Collections.singletonList("$JAVA_HOME/bin/java MasteringYarn.DistributedShellApplicationMaster "
                    + args[2] + " " + args[3] + " " + "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                    + "/stdout " + "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    LocalResource applicationMasterJar = Records.newRecord(LocalResource.class);
    setupJarFileForApplicationMaster(new Path(args[1]), applicationMasterJar);
    applicationMasterContainer
            .setLocalResources(Collections.singletonMap("MasteringYarn.jar", applicationMasterJar));

    Map<String, String> appMasterEnv = new HashMap<>();
    setupEnvironmentForApplicationMaster(appMasterEnv);
    applicationMasterContainer.setEnvironment(appMasterEnv);

    Resource resources = Records.newRecord(Resource.class);
    resources.setVirtualCores(1);
    resources.setMemory(100);

    ApplicationSubmissionContext submissionContext = yarnClientApplication.getApplicationSubmissionContext();
    submissionContext.setAMContainerSpec(applicationMasterContainer);
    submissionContext.setQueue("default");
    submissionContext.setApplicationName("MasteringYarn");
    submissionContext.setResource(resources);

    ApplicationId applicationId = submissionContext.getApplicationId();
    System.out.println("Submitting " + applicationId);
    yarnClient.submitApplication(submissionContext);
    System.out.println("Post submission " + applicationId);

    ApplicationReport applicationReport;
    YarnApplicationState applicationState;

    do {
        Thread.sleep(1000);
        applicationReport = yarnClient.getApplicationReport(applicationId);
        applicationState = applicationReport.getYarnApplicationState();

        System.out.println("Diagnostics " + applicationReport.getDiagnostics());

    } while (applicationState != YarnApplicationState.FAILED
            && applicationState != YarnApplicationState.FINISHED
            && applicationState != YarnApplicationState.KILLED);

    System.out.println("Application finished with " + applicationState + " state and id " + applicationId);
}

From source file:ml.shifu.guagua.yarn.GuaguaYarnClient.java

License:Apache License

public boolean init(String[] args) {
    try {/*from w w  w.  ja v  a 2 s . c om*/
        this.yarnClient = YarnClient.createYarnClient();
        this.yarnClient.init(getConf());
    } catch (Throwable e) {
        LOG.error("Error in yarn client initiliazation.", e);
        return false;
    }
    return true;
}