Example usage for org.apache.hadoop.yarn.api.records ApplicationReport getFinishTime

List of usage examples for org.apache.hadoop.yarn.api.records ApplicationReport getFinishTime

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ApplicationReport getFinishTime.

Prototype

@Public
@Stable
public abstract long getFinishTime();

Source Link

Document

Get the finish time of the application.

Usage

From source file:com.github.sakserv.minicluster.simpleyarnapp.Client.java

License:Apache License

public void run(String[] args) throws Exception {
    final String command = args[0];
    final int n = Integer.valueOf(args[1]);
    final Path jarPath = new Path(args[2]);
    final String resourceManagerAddress = args[3];
    final String resourceManagerHostname = args[4];
    final String resourceManagerSchedulerAddress = args[5];
    final String resourceManagerResourceTrackerAddress = args[6];

    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    conf.set("yarn.resourcemanager.address", resourceManagerAddress);
    conf.set("yarn.resourcemanager.hostname", resourceManagerHostname);
    conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress);
    conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);/*from  w ww  .  j av a 2  s. c  o m*/
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " com.hortonworks.simpleyarnapp.ApplicationMaster" + " " + command + " " + String.valueOf(n) + " "
            + resourceManagerAddress + " " + resourceManagerHostname + " " + resourceManagerSchedulerAddress
            + " " + resourceManagerResourceTrackerAddress + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
            + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    setupAppMasterJar(jarPath, appMasterJar);
    amContainer.setLocalResources(Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("simple-yarn-app"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();
    System.out.println("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

}

From source file:com.ibm.bi.dml.yarn.DMLYarnClient.java

License:Open Source License

/**
 * Method to launch the dml yarn app master and execute the given dml script
 * with the given configuration and jar file.
 * //w w  w  .ja v a  2  s .c o m
 * NOTE: on launching the yarn app master, we do not explicitly probe if we
 *     are running on a yarn or MR1 cluster. In case of MR1, already the class 
 *     YarnConfiguration will not be found and raise a classnotfound. In case of any 
 *     exception we fall back to run CP directly in the client process.
 * 
 * @return true if dml program successfully executed as yarn app master
 * @throws IOException 
 */
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
    boolean ret = false;
    String hdfsWD = null;

    try {
        Timing time = new Timing(true);

        // load yarn configuration
        YarnConfiguration yconf = new YarnConfiguration();

        // create yarn client
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(yconf);
        yarnClient.start();

        // create application and get the ApplicationID
        YarnClientApplication app = yarnClient.createApplication();
        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
        ApplicationId appId = appContext.getApplicationId();
        LOG.debug("Created application (applicationID: " + appId + ")");

        // prepare hdfs working directory via ApplicationID
        // copy script, config, jar file to hdfs
        hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
        copyResourcesToHdfsWorkingDir(yconf, hdfsWD);

        //construct command line argument
        String command = constructAMCommand(_args, _dmlConfig);
        LOG.debug("Constructed application master command: \n" + command);

        // set up the container launch context for the application master
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
        amContainer.setCommands(Collections.singletonList(command));
        amContainer.setLocalResources(constructLocalResourceMap(yconf));
        amContainer.setEnvironment(constructEnvionmentMap(yconf));

        // Set up resource type requirements for ApplicationMaster
        int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
        int memAlloc = (int) computeMemoryAllocation(memHeap);
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(memAlloc);
        capability.setVirtualCores(NUM_CORES);
        LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);

        // Finally, set-up ApplicationSubmissionContext for the application
        String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
        appContext.setApplicationName(APPMASTER_NAME); // application name
        appContext.setAMContainerSpec(amContainer);
        appContext.setResource(capability);
        appContext.setQueue(qname); // queue
        LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);

        // submit application (non-blocking)
        yarnClient.submitApplication(appContext);

        // Check application status periodically (and output web ui address)
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
        YarnApplicationState appState = appReport.getYarnApplicationState();
        YarnApplicationState oldState = appState;
        LOG.info("Application state: " + appState);
        while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
                && appState != YarnApplicationState.FAILED) {
            Thread.sleep(APP_STATE_INTERVAL); //wait for 200ms
            appReport = yarnClient.getApplicationReport(appId);
            appState = appReport.getYarnApplicationState();
            if (appState != oldState) {
                oldState = appState;
                LOG.info("Application state: " + appState);
            }
        }
        //check final status (failed or succeeded)
        FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
        LOG.info("Application final status: " + finalState);

        //show application and total runtime
        double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
        LOG.info("Application runtime: " + appRuntime + " sec.");
        LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");

        //raised script-level error in case of failed final status
        if (finalState != FinalApplicationStatus.SUCCEEDED) {
            //propagate script-level stop call message
            String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
            if (stop_msg != null)
                throw new DMLScriptException(stop_msg);

            //generic failure message
            throw new DMLRuntimeException(
                    "DML yarn app master finished with final status: " + finalState + ".");
        }

        ret = true;
    } catch (DMLScriptException ex) {
        //rethrow DMLScriptException to propagate stop call
        throw ex;
    } catch (Exception ex) {
        LOG.error("Failed to run DML yarn app master.", ex);
        ret = false;
    } finally {
        //cleanup working directory
        if (hdfsWD != null)
            MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
    }

    return ret;
}

From source file:eu.stratosphere.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Stratosphere YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }//  ww  w.  jav  a 2s . c o  m
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(STRATOSPHERE_CONF_DIR);
    options.addOption(STRATOSPHERE_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(STRATOSPHERE_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(STRATOSPHERE_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(STRATOSPHERE_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(STRATOSPHERE_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getStratosphereConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Stratosphere YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " eu.stratosphere.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, stratosphereConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("stratosphere.jar", appMasterJar);
    localResources.put("stratosphere-conf.yaml", stratosphereConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".stratosphere/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.STRATOSPHERE_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Stratosphere"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Stratosphere-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err
                    .println("Stratosphere JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:hws.core.JobClient.java

License:Apache License

public void run(String[] args) throws Exception {
    //final String command = args[0];
    //final int n = Integer.valueOf(args[1]);
    //final Path jarPath = new Path(args[2]);
    Options options = new Options();
    /*options.addOption(OptionBuilder.withLongOpt("jar")
                           .withDescription( "Jar path" )
                           .hasArg()/*w  w  w . j  a  va2  s.c  o m*/
                           .withArgName("JarPath")
                           .create());
    options.addOption(OptionBuilder.withLongOpt("scheduler")
                           .withDescription( "Scheduler class name" )
                           .hasArg()
                           .withArgName("ClassName")
                           .create());
    */options.addOption(OptionBuilder.withLongOpt("zk-servers")
            .withDescription("List of the ZooKeeper servers").hasArgs().withArgName("zkAddrs").create("zks"));
    //options.addOption("l", "list", false, "list modules");
    options.addOption(OptionBuilder.withLongOpt("load").withDescription("load new modules").hasArgs()
            .withArgName("XMLFiles").create());
    /*options.addOption(OptionBuilder.withLongOpt( "remove" )
                           .withDescription( "remove modules" )
                           .hasArgs()
                           .withArgName("ModuleNames")
                           .create("rm"));
    */CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);

    //Path jarPath = null;
    //String schedulerClassName = null;
    String[] xmlFileNames = null;
    //String []moduleNames = null;
    String zksArgs = "";
    String[] zkServers = null;
    if (cmd.hasOption("zks")) {
        zksArgs = "-zks";
        zkServers = cmd.getOptionValues("zks");
        for (String zks : zkServers) {
            zksArgs += " " + zks;
        }
    }

    //Logger setup
    //FSDataOutputStream writer = FileSystem.get(conf).create(new Path("hdfs:///hws/apps/"+appIdStr+"/logs/jobClient.log"));
    //Logger.addOutputStream(writer);

    /*if(cmd.hasOption("l")){
       LOG.warn("Argument --list (-l) is not supported yet.");
    }
    if(cmd.hasOption("jar")){
       jarPath = new Path(cmd.getOptionValue("jar")); 
    }
    if(cmd.hasOption("scheduler")){
       schedulerClassName = cmd.getOptionValue("scheduler");
    }*/
    if (cmd.hasOption("load")) {
        xmlFileNames = cmd.getOptionValues("load");
    } /*else if(cmd.hasOption("rm")){
        moduleNames = cmd.getOptionValues("rm");
      }*/

    //LOG.info("Jar-Path "+jarPath);
    if (xmlFileNames != null) {
        String paths = "";
        for (String path : xmlFileNames) {
            paths += path + "; ";
        }
        LOG.info("Load XMLs: " + paths);
    }
    /*if(moduleNames!=null){
       String modules = "";
       for(String module: moduleNames){
          modules += module+"; ";
       }
       LOG.info("remove: "+modules);
    }*/
    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    System.out.println("LOG Path: " + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    ZkClient zk = new ZkClient(zkServers[0]); //TODO select a ZooKeeper server
    if (!zk.exists("/hadoop-watershed")) {
        zk.createPersistent("/hadoop-watershed", "");
    }
    zk.createPersistent("/hadoop-watershed/" + appId.toString(), "");

    FileSystem fs = FileSystem.get(conf);

    LOG.info("Collecting files to upload");
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString()));
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString() + "/logs"));

    ModulePipeline modulePipeline = ModulePipeline.fromXMLFiles(xmlFileNames);
    LOG.info("Uploading files to HDFS");
    for (String path : modulePipeline.files()) {
        uploadFile(fs, new File(path), appId);
    }
    LOG.info("Upload finished");

    String modulePipelineJson = Json.dumps(modulePipeline);
    String modulePipelineBase64 = Base64.encodeBase64String(StringUtils.getBytesUtf8(modulePipelineJson))
            .replaceAll("\\s", "");
    LOG.info("ModulePipeline: " + modulePipelineJson);
    //LOG.info("ModulePipeline: "+modulePipelineBase64);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " hws.core.JobMaster" + " -aid " + appId.toString() + " --load " + modulePipelineBase64 + " "
            + zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    //LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    //setupAppMasterJar(jarPath, appMasterJar);
    //amContainer.setLocalResources(Collections.singletonMap("hws.jar", appMasterJar));

    LOG.info("Listing files for YARN-Watershed");
    RemoteIterator<LocatedFileStatus> filesIterator = fs.listFiles(new Path("hdfs:///hws/bin/"), false);
    Map<String, LocalResource> resources = new HashMap<String, LocalResource>();
    LOG.info("Files setup as resource");
    while (filesIterator.hasNext()) {
        LocatedFileStatus fileStatus = filesIterator.next();
        // Setup jar for ApplicationMaster
        LocalResource containerJar = Records.newRecord(LocalResource.class);
        ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
        resources.put(fileStatus.getPath().getName(), containerJar);
    }
    LOG.info("container resource setup");
    amContainer.setLocalResources(resources);

    fs.close(); //closing FileSystem interface

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    ContainerUtils.setupContainerEnv(appMasterEnv, conf);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    //ApplicationSubmissionContext appContext = 
    //app.getApplicationSubmissionContext();
    appContext.setApplicationName("Hadoop-Watershed"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue 

    // Submit application
    LOG.info("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for containers to finish");
    zk.waitUntilExists("/hadoop-watershed/" + appId.toString() + "/done", TimeUnit.MILLISECONDS, 250);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

    System.out.println("deleting " + appId.toString() + " znode");
    zk.deleteRecursive("/hadoop-watershed/" + appId.toString()); //TODO remove app folder from ZooKeeper
}

From source file:io.hops.hopsworks.api.zeppelin.rest.InterpreterRestApi.java

License:Apache License

private List<JobAdministration.YarnApplicationReport> fetchJobs() {
    JobAdministration jobAdmin = new JobAdministration();
    List<JobAdministration.YarnApplicationReport> reports = new ArrayList<>();
    YarnClient client = YarnClient.createYarnClient();
    Configuration conf = settings.getConfiguration();
    client.init(conf);//from  w ww. j  av  a2  s . c  o m
    client.start();
    try {
        //Create our custom YarnApplicationReport Pojo
        for (ApplicationReport appReport : client.getApplications(PREDICATE)) {
            reports.add(jobAdmin.new YarnApplicationReport(appReport.getApplicationId().toString(),
                    appReport.getName(), appReport.getUser(), appReport.getStartTime(),
                    appReport.getFinishTime(), appReport.getApplicationId().getClusterTimestamp(),
                    appReport.getApplicationId().getId(), appReport.getYarnApplicationState().name()));
        }
    } catch (YarnException | IOException ex) {
        logger.error("", ex);
    }
    return reports;
}

From source file:io.hops.hopsworks.api.zeppelin.rest.InterpreterRestApi.java

License:Apache License

private List<JobAdministration.YarnApplicationReport> fetchJobs(String username) {
    JobAdministration jobAdmin = new JobAdministration();
    List<JobAdministration.YarnApplicationReport> reports = new ArrayList<>();
    YarnClient client = YarnClient.createYarnClient();
    Configuration conf = settings.getConfiguration();
    client.init(conf);/*from w  w  w  .  jav  a 2s.  c om*/
    client.start();
    try {
        //Create our custom YarnApplicationReport Pojo
        for (ApplicationReport appReport : client.getApplications(PREDICATE)) {
            if (username.equals(appReport.getUser())) {
                reports.add(jobAdmin.new YarnApplicationReport(appReport.getApplicationId().toString(),
                        appReport.getName(), appReport.getUser(), appReport.getStartTime(),
                        appReport.getFinishTime(), appReport.getApplicationId().getClusterTimestamp(),
                        appReport.getApplicationId().getId(), appReport.getYarnApplicationState().name()));
            }
        }
    } catch (YarnException | IOException ex) {
        logger.error("", ex);
    }
    return reports;
}

From source file:io.hops.hopsworks.common.jobs.flink.YarnClusterClient.java

License:Apache License

/**
 * Shuts down the Yarn application// w ww  .  ja  v a  2  s. c  om
 */
public void shutdownCluster() {

    if (hasBeenShutDown.getAndSet(true)) {
        return;
    }

    if (!isConnected) {
        throw new IllegalStateException("The cluster has been not been connected to the ApplicationMaster.");
    }

    try {
        Runtime.getRuntime().removeShutdownHook(clientShutdownHook);
    } catch (IllegalStateException e) {
        // we are already in the shutdown hook
    }

    LOG.info("Sending shutdown request to the Application Master");
    try {
        Future<Object> response = Patterns.ask(applicationClient.get(), new YarnMessages.LocalStopYarnSession(
                getApplicationStatus(), "Flink YARN Client requested shutdown"), new Timeout(akkaDuration));
        Await.ready(response, akkaDuration);
    } catch (Exception e) {
        LOG.warn("Error while stopping YARN cluster.", e);
    }

    try {
        File propertiesFile = FlinkYarnSessionCli.getYarnPropertiesLocation(flinkConfig);
        if (propertiesFile.isFile()) {
            if (propertiesFile.delete()) {
                LOG.info("Deleted Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString());
            } else {
                LOG.warn("Couldn't delete Yarn properties file at {}",
                        propertiesFile.getAbsoluteFile().toString());
            }
        }
    } catch (Exception e) {
        LOG.warn("Exception while deleting the JobManager address file", e);
    }

    if (sessionFilesDir != null) {
        LOG.info("Deleting files in " + sessionFilesDir);
        try {
            FileSystem shutFS = FileSystem.get(hadoopConfig);
            shutFS.delete(sessionFilesDir, true); // delete conf and jar file.
            shutFS.close();
        } catch (IOException e) {
            LOG.error("Could not delete the Flink jar and configuration files in HDFS..", e);
        }
    } else {
        LOG.warn("Session file directory not set. Not deleting session files");
    }

    try {
        pollingRunner.stopRunner();
        pollingRunner.join(1000);
    } catch (InterruptedException e) {
        LOG.warn("Shutdown of the polling runner was interrupted", e);
        Thread.currentThread().interrupt();
    }

    try {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);

        LOG.info("Application " + appId + " finished with state " + appReport.getYarnApplicationState()
                + " and final state " + appReport.getFinalApplicationStatus() + " at "
                + appReport.getFinishTime());

        if (appReport.getYarnApplicationState() == YarnApplicationState.FAILED
                || appReport.getYarnApplicationState() == YarnApplicationState.KILLED) {
            LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
            LOG.warn("If log aggregation is activated in the Hadoop cluster, we recommend to retrieve "
                    + "the full application log using this command:\n" + "\tyarn logs -appReport "
                    + appReport.getApplicationId() + "\n"
                    + "(It sometimes takes a few seconds until the logs are aggregated)");
        }
    } catch (Exception e) {
        LOG.warn("Couldn't get final report", e);
    }

    LOG.info("YARN Client is shutting down");
    yarnClient.stop(); // actorRunner is using the yarnClient.
    yarnClient = null; // set null to clearly see if somebody wants to access it afterwards.
}

From source file:ml.shifu.guagua.yarn.GuaguaYarnClient.java

License:Apache License

private int printFinalJobReport() throws YarnException, IOException {
    try {// w ww  .ja  v  a2s. c om
        ApplicationReport report = this.yarnClient.getApplicationReport(getAppId());
        FinalApplicationStatus finalAppStatus = report.getFinalApplicationStatus();
        final long secs = (report.getFinishTime() - report.getStartTime()) / 1000L;
        final String time = String.format("%d minutes, %d seconds.", secs / 60L, secs % 60L);
        LOG.info("Completed {}: {}, total running time: {}", getAppName(), finalAppStatus.name(), time);
        return finalAppStatus == FinalApplicationStatus.SUCCEEDED ? 0 : -1;
    } catch (YarnException yre) {
        LOG.error(String.format("Exception encountered while attempting to request a final job report for %s.",
                getAppId()), yre);
        return -1;
    }
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private SliderApp createSliderAppObject(ApplicationReport yarnApp, Set<String> properties,
        SliderClient sliderClient) {/*ww  w.j  a  v a  2 s .  c o m*/
    if (yarnApp == null) {
        return null;
    }

    SliderApp app = new SliderApp();
    app.setState(yarnApp.getYarnApplicationState().name());

    // Valid Slider App?
    // We want all Slider apps except the ones which properly finished.
    if (YarnApplicationState.FINISHED.equals(yarnApp.getYarnApplicationState())) {
        try {
            if (sliderClient.actionExists(yarnApp.getName(), false) == LauncherExitCodes.EXIT_SUCCESS) {
                app.setState(SliderApp.STATE_FROZEN);
            }
        } catch (UnknownApplicationInstanceException e) {
            return null; // Application not in HDFS - means it is not frozen
        } catch (YarnException e) {
            logger.warn("Unable to determine frozen state for " + yarnApp.getName(), e);
            return null;
        } catch (IOException e) {
            logger.warn("Unable to determine frozen state for " + yarnApp.getName(), e);
            return null;
        }
    }
    if (YarnApplicationState.KILLED.equals(yarnApp.getYarnApplicationState())
            || YarnApplicationState.FAILED.equals(yarnApp.getYarnApplicationState())) {
        try {
            if (sliderClient.actionExists(yarnApp.getName(), false) != LauncherExitCodes.EXIT_SUCCESS) {
                // YARN application is killed or failed, and no HDFS content - Application has been destroyed.
                return null;
            }
        } catch (UnknownApplicationInstanceException e) {
            return null; // Application not in HDFS - means it is not frozen
        } catch (YarnException e) {
            logger.warn("Unable to determine status of killed app " + yarnApp.getName(), e);
            return null;
        } catch (IOException e) {
            logger.warn("Unable to determine status of killed app " + yarnApp.getName(), e);
            return null;
        }
    }

    app.setId(getApplicationIdString(yarnApp.getApplicationId()));
    app.setName(yarnApp.getName());
    app.setUser(yarnApp.getUser());
    app.setDiagnostics(yarnApp.getDiagnostics());
    app.setYarnId(yarnApp.getApplicationId().toString());
    app.setStartTime(yarnApp.getStartTime());
    app.setEndTime(yarnApp.getFinishTime());
    Set<String> applicationTags = yarnApp.getApplicationTags();
    if (applicationTags != null && applicationTags.size() > 0) {
        for (String tag : applicationTags) {
            int index = tag.indexOf(':');
            if (index > 0 && index < tag.length() - 1) {
                String key = tag.substring(0, index).trim();
                String value = tag.substring(index + 1).trim();
                if ("name".equals(key)) {
                    app.setType(value);
                } else if ("version".equals(key)) {
                    app.setAppVersion(value);
                } else if ("description".equals(key)) {
                    app.setDescription(value);
                }
            }
        }
    }
    if (properties != null && !properties.isEmpty()) {
        SliderAppType matchedAppType = null;
        List<SliderAppType> matchingAppTypes = getSliderAppTypes(null);
        if (matchingAppTypes != null && matchingAppTypes.size() > 0) {
            for (SliderAppType appType : matchingAppTypes) {
                if ((appType.getTypeName() != null && appType.getTypeName().equalsIgnoreCase(app.getType()))
                        && (appType.getTypeVersion() != null
                                && appType.getTypeVersion().equalsIgnoreCase(app.getAppVersion()))) {
                    matchedAppType = appType;
                    break;
                }
            }
        }

        SliderAppMasterClient sliderAppClient = yarnApp.getTrackingUrl() == null ? null
                : new SliderAppMasterClient(yarnApp.getTrackingUrl());
        SliderAppMasterData appMasterData = null;
        Map<String, String> quickLinks = new HashMap<String, String>();
        Set<String> metrics = new HashSet<String>();
        for (String property : properties) {
            if ("RUNNING".equals(app.getState())) {
                if (sliderAppClient != null) {
                    if (appMasterData == null) {
                        appMasterData = sliderAppClient.getAppMasterData();
                    }
                    if (appMasterData != null && "urls".equals(property.toLowerCase())) {
                        if (quickLinks.isEmpty()) {
                            quickLinks = sliderAppClient.getQuickLinks(appMasterData.publisherUrl);
                        }
                        app.setUrls(quickLinks);
                    } else if (appMasterData != null && "configs".equals(property.toLowerCase())) {
                        Map<String, Map<String, String>> configs = sliderAppClient
                                .getConfigs(appMasterData.publisherUrl);
                        app.setConfigs(configs);
                    } else if (appMasterData != null && "jmx".equals(property.toLowerCase())) {
                        if (quickLinks.isEmpty()) {
                            quickLinks = sliderAppClient.getQuickLinks(appMasterData.publisherUrl);
                        }
                        if (quickLinks != null && quickLinks.containsKey("JMX")) {
                            String jmxUrl = quickLinks.get("JMX");
                            if (matchedAppType != null) {
                                MetricsHolder metricsHolder = appMetrics.get(matchedAppType.uniqueName());
                                app.setJmx(sliderAppClient.getJmx(jmxUrl, viewContext, matchedAppType,
                                        metricsHolder));
                            }
                        }
                        Map<String, Map<String, String>> configs = sliderAppClient
                                .getConfigs(appMasterData.publisherUrl);
                        app.setConfigs(configs);
                    } else if ("components".equals(property.toLowerCase())) {
                        try {
                            ClusterDescription description = sliderClient
                                    .getClusterDescription(yarnApp.getName());
                            if (description != null && description.status != null
                                    && !description.status.isEmpty()) {
                                Map<String, SliderAppComponent> componentTypeMap = new HashMap<String, SliderAppComponent>();
                                for (Entry<String, Object> e : description.status.entrySet()) {
                                    @SuppressWarnings("unchecked")
                                    Map<String, Map<String, Map<String, Object>>> componentsObj = (Map<String, Map<String, Map<String, Object>>>) e
                                            .getValue();
                                    boolean isLive = "live".equals(e.getKey());
                                    for (Entry<String, Map<String, Map<String, Object>>> componentEntry : componentsObj
                                            .entrySet()) {
                                        SliderAppComponent appComponent = componentTypeMap
                                                .get(componentEntry.getKey());
                                        if (appComponent == null) {
                                            appComponent = new SliderAppComponent();
                                            appComponent.setComponentName(componentEntry.getKey());
                                            appComponent.setActiveContainers(
                                                    new HashMap<String, Map<String, String>>());
                                            appComponent.setCompletedContainers(
                                                    new HashMap<String, Map<String, String>>());
                                            componentTypeMap.put(componentEntry.getKey(), appComponent);
                                        }
                                        for (Entry<String, Map<String, Object>> containerEntry : componentEntry
                                                .getValue().entrySet()) {
                                            Map<String, String> containerDataMap = new HashMap<String, String>();
                                            String containerId = containerEntry.getKey();
                                            Map<String, Object> containerValues = containerEntry.getValue();
                                            for (String containerProperty : containerValues.keySet()) {
                                                Object containerPropertyValue = containerValues
                                                        .get(containerProperty);
                                                containerDataMap.put(containerProperty,
                                                        containerPropertyValue.toString());
                                            }
                                            if (isLive) {
                                                appComponent.getActiveContainers().put(containerId,
                                                        containerDataMap);
                                            } else {
                                                appComponent.getCompletedContainers().put(containerId,
                                                        containerDataMap);
                                            }
                                        }
                                        // Set total instances count from statistics
                                        appComponent.setInstanceCount(appComponent.getActiveContainers().size()
                                                + appComponent.getCompletedContainers().size());
                                        if (description.statistics != null && description.statistics
                                                .containsKey(componentEntry.getKey())) {
                                            Map<String, Integer> statisticsMap = description.statistics
                                                    .get(componentEntry.getKey());
                                            if (statisticsMap.containsKey("containers.desired")) {
                                                appComponent.setInstanceCount(
                                                        statisticsMap.get("containers.desired"));
                                            }
                                        }
                                    }
                                }
                                app.setAlerts(
                                        sliderAlerts.generateComponentsAlerts(componentTypeMap, app.getType()));
                                app.setComponents(componentTypeMap);
                            }
                        } catch (UnknownApplicationInstanceException e) {
                            logger.warn("Unable to determine app components for " + yarnApp.getName(), e);
                        } catch (YarnException e) {
                            logger.warn("Unable to determine app components for " + yarnApp.getName(), e);
                            throw new RuntimeException(e.getMessage(), e);
                        } catch (IOException e) {
                            logger.warn("Unable to determine app components for " + yarnApp.getName(), e);
                            throw new RuntimeException(e.getMessage(), e);
                        }
                    } else if (property.startsWith(METRICS_PREFIX)) {
                        metrics.add(property.substring(METRICS_PREFIX.length()));
                    } else if ("supportedMetrics".equals(property)) {
                        if (matchedAppType != null) {
                            app.setSupportedMetrics(matchedAppType.getSupportedMetrics());
                        }
                    }
                }
            }
        }
        if (metrics.size() > 0) {
            if (quickLinks.isEmpty()) {
                quickLinks = sliderAppClient.getQuickLinks(appMasterData.publisherUrl);
            }
            if (quickLinks != null && quickLinks.containsKey(METRICS_API_NAME)) {
                String metricsUrl = quickLinks.get(METRICS_API_NAME);
                MetricsHolder metricsHolder = appMetrics.get(matchedAppType.uniqueName());
                app.setMetrics(sliderAppClient.getMetrics(yarnApp.getName(), metricsUrl, metrics, null,
                        viewContext, matchedAppType, metricsHolder));
            }
        }
    }
    return app;
}

From source file:org.apache.drill.yarn.core.YarnRMClient.java

License:Apache License

/**
 * Wait for the application to enter one of the completion states. This is an
 * informal implementation useful for testing.
 *
 * @throws YarnClientException// w  ww.  j  a v a  2  s.c  om
 */

public void waitForCompletion() throws YarnClientException {
    ApplicationReport appReport;
    YarnApplicationState appState;
    for (;;) {
        appReport = getAppReport();
        appState = appReport.getYarnApplicationState();
        if (appState == YarnApplicationState.FINISHED || appState == YarnApplicationState.KILLED
                || appState == YarnApplicationState.FAILED) {
            break;
        }
        try {
            Thread.sleep(100);
        } catch (InterruptedException e) {
            // Should never occur.
        }
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
}