Example usage for org.apache.hadoop.yarn.client.api YarnClient createApplication

List of usage examples for org.apache.hadoop.yarn.client.api YarnClient createApplication

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.client.api YarnClient createApplication.

Prototype

public abstract YarnClientApplication createApplication() throws YarnException, IOException;

Source Link

Document

Obtain a YarnClientApplication for a new application, which in turn contains the ApplicationSubmissionContext and org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse objects.

Usage

From source file:MasteringYarn.DistributedShellClient.java

public void run(String[] args) throws YarnException, IOException, InterruptedException {

    YarnConfiguration yarnConfiguration = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfiguration);//  w  w  w . ja va2s  . c o  m
    yarnClient.start();

    YarnClientApplication yarnClientApplication = yarnClient.createApplication();

    //container launch context for application master
    ContainerLaunchContext applicationMasterContainer = Records.newRecord(ContainerLaunchContext.class);
    applicationMasterContainer.setCommands(
            Collections.singletonList("$JAVA_HOME/bin/java MasteringYarn.DistributedShellApplicationMaster "
                    + args[2] + " " + args[3] + " " + "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                    + "/stdout " + "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    LocalResource applicationMasterJar = Records.newRecord(LocalResource.class);
    setupJarFileForApplicationMaster(new Path(args[1]), applicationMasterJar);
    applicationMasterContainer
            .setLocalResources(Collections.singletonMap("MasteringYarn.jar", applicationMasterJar));

    Map<String, String> appMasterEnv = new HashMap<>();
    setupEnvironmentForApplicationMaster(appMasterEnv);
    applicationMasterContainer.setEnvironment(appMasterEnv);

    Resource resources = Records.newRecord(Resource.class);
    resources.setVirtualCores(1);
    resources.setMemory(100);

    ApplicationSubmissionContext submissionContext = yarnClientApplication.getApplicationSubmissionContext();
    submissionContext.setAMContainerSpec(applicationMasterContainer);
    submissionContext.setQueue("default");
    submissionContext.setApplicationName("MasteringYarn");
    submissionContext.setResource(resources);

    ApplicationId applicationId = submissionContext.getApplicationId();
    System.out.println("Submitting " + applicationId);
    yarnClient.submitApplication(submissionContext);
    System.out.println("Post submission " + applicationId);

    ApplicationReport applicationReport;
    YarnApplicationState applicationState;

    do {
        Thread.sleep(1000);
        applicationReport = yarnClient.getApplicationReport(applicationId);
        applicationState = applicationReport.getYarnApplicationState();

        System.out.println("Diagnostics " + applicationReport.getDiagnostics());

    } while (applicationState != YarnApplicationState.FAILED
            && applicationState != YarnApplicationState.FINISHED
            && applicationState != YarnApplicationState.KILLED);

    System.out.println("Application finished with " + applicationState + " state and id " + applicationId);
}

From source file:org.apache.flink.yarn.AbstractYarnClusterDescriptor.java

License:Apache License

/**
 * This method will block until the ApplicationMaster/JobManager have been
 * deployed on YARN.//from w w w. j  av a 2  s. co m
 */
protected YarnClusterClient deployInternal() throws Exception {
    isReadyForDeployment();
    LOG.info("Using values:");
    LOG.info("\tTaskManager count = {}", taskManagerCount);
    LOG.info("\tJobManager memory = {}", jobManagerMemoryMb);
    LOG.info("\tTaskManager memory = {}", taskManagerMemoryMb);

    final YarnClient yarnClient = getYarnClient();

    // ------------------ Check if the specified queue exists --------------------

    try {
        List<QueueInfo> queues = yarnClient.getAllQueues();
        if (queues.size() > 0 && this.yarnQueue != null) { // check only if there are queues configured in yarn and for this session.
            boolean queueFound = false;
            for (QueueInfo queue : queues) {
                if (queue.getQueueName().equals(this.yarnQueue)) {
                    queueFound = true;
                    break;
                }
            }
            if (!queueFound) {
                String queueNames = "";
                for (QueueInfo queue : queues) {
                    queueNames += queue.getQueueName() + ", ";
                }
                LOG.warn("The specified queue '" + this.yarnQueue + "' does not exist. " + "Available queues: "
                        + queueNames);
            }
        } else {
            LOG.debug("The YARN cluster does not have any queues configured");
        }
    } catch (Throwable e) {
        LOG.warn("Error while getting queue information from YARN: " + e.getMessage());
        if (LOG.isDebugEnabled()) {
            LOG.debug("Error details", e);
        }
    }

    // ------------------ Add dynamic properties to local flinkConfiguraton ------
    Map<String, String> dynProperties = getDynamicProperties(dynamicPropertiesEncoded);
    for (Map.Entry<String, String> dynProperty : dynProperties.entrySet()) {
        flinkConfiguration.setString(dynProperty.getKey(), dynProperty.getValue());
    }

    // ------------------ Check if the YARN ClusterClient has the requested resources --------------

    // the yarnMinAllocationMB specifies the smallest possible container allocation size.
    // all allocations below this value are automatically set to this value.
    final int yarnMinAllocationMB = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0);
    if (jobManagerMemoryMb < yarnMinAllocationMB || taskManagerMemoryMb < yarnMinAllocationMB) {
        LOG.warn("The JobManager or TaskManager memory is below the smallest possible YARN Container size. "
                + "The value of 'yarn.scheduler.minimum-allocation-mb' is '" + yarnMinAllocationMB
                + "'. Please increase the memory size."
                + "YARN will allocate the smaller containers but the scheduler will account for the minimum-allocation-mb, maybe not all instances "
                + "you requested will start.");
    }

    // set the memory to minAllocationMB to do the next checks correctly
    if (jobManagerMemoryMb < yarnMinAllocationMB) {
        jobManagerMemoryMb = yarnMinAllocationMB;
    }
    if (taskManagerMemoryMb < yarnMinAllocationMB) {
        taskManagerMemoryMb = yarnMinAllocationMB;
    }

    // Create application via yarnClient
    final YarnClientApplication yarnApplication = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();

    Resource maxRes = appResponse.getMaximumResourceCapability();
    final String NOTE = "Please check the 'yarn.scheduler.maximum-allocation-mb' and the 'yarn.nodemanager.resource.memory-mb' configuration values\n";
    if (jobManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment(yarnClient, yarnApplication);
        throw new YarnDeploymentException(
                "The cluster does not have the requested resources for the JobManager available!\n"
                        + "Maximum Memory: " + maxRes.getMemory() + "MB Requested: " + jobManagerMemoryMb
                        + "MB. " + NOTE);
    }

    if (taskManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment(yarnClient, yarnApplication);
        throw new YarnDeploymentException(
                "The cluster does not have the requested resources for the TaskManagers available!\n"
                        + "Maximum Memory: " + maxRes.getMemory() + " Requested: " + taskManagerMemoryMb
                        + "MB. " + NOTE);
    }

    final String NOTE_RSC = "\nThe Flink YARN client will try to allocate the YARN session, but maybe not all TaskManagers are "
            + "connecting from the beginning because the resources are currently not available in the cluster. "
            + "The allocation might take more time than usual because the Flink YARN client needs to wait until "
            + "the resources become available.";
    int totalMemoryRequired = jobManagerMemoryMb + taskManagerMemoryMb * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.warn("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available." + NOTE_RSC);

    }
    if (taskManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn("The requested amount of memory for the TaskManagers (" + taskManagerMemoryMb
                + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit
                + NOTE_RSC);
    }
    if (jobManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn(
                "The requested amount of memory for the JobManager (" + jobManagerMemoryMb + "MB) is more than "
                        + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC);
    }

    // ----------------- check if the requested containers fit into the cluster.

    int[] nmFree = Arrays.copyOf(freeClusterMem.nodeManagersFree, freeClusterMem.nodeManagersFree.length);
    // first, allocate the jobManager somewhere.
    if (!allocateResource(nmFree, jobManagerMemoryMb)) {
        LOG.warn("Unable to find a NodeManager that can fit the JobManager/Application master. "
                + "The JobManager requires " + jobManagerMemoryMb + "MB. NodeManagers available: "
                + Arrays.toString(freeClusterMem.nodeManagersFree) + NOTE_RSC);
    }
    // allocate TaskManagers
    for (int i = 0; i < taskManagerCount; i++) {
        if (!allocateResource(nmFree, taskManagerMemoryMb)) {
            LOG.warn("There is not enough memory available in the YARN cluster. "
                    + "The TaskManager(s) require " + taskManagerMemoryMb + "MB each. "
                    + "NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + "\n"
                    + "After allocating the JobManager (" + jobManagerMemoryMb + "MB) and (" + i + "/"
                    + taskManagerCount + ") TaskManagers, " + "the following NodeManagers are available: "
                    + Arrays.toString(nmFree) + NOTE_RSC);
        }
    }

    ApplicationReport report = startAppMaster(null, yarnClient, yarnApplication);

    String host = report.getHost();
    int port = report.getRpcPort();

    // Correctly initialize the Flink config
    flinkConfiguration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, host);
    flinkConfiguration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, port);

    // the Flink cluster is deployed in YARN. Represent cluster
    return createYarnClusterClient(this, yarnClient, report, flinkConfiguration, sessionFilesDir, true);
}

From source file:org.apache.flink.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Flink YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }/*from   w ww. jav a2  s  . c o  m*/
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(FLINK_CONF_DIR);
    options.addOption(FLINK_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(FLINK_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(FLINK_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(FLINK_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(FLINK_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getFlinkConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Flink YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " org.apache.flink.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, flinkConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("flink.jar", appMasterJar);
    localResources.put("flink-conf.yaml", flinkConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.FLINK_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Flink"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Flink-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err.println("Flink JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:org.apache.ignite.yarn.IgniteYarnClient.java

License:Apache License

/**
 * Main methods has one mandatory parameter and one optional parameter.
 *
 * @param args Path to jar mandatory parameter and property file is optional.
 *///from  www  . j ava  2  s.  c om
public static void main(String[] args) throws Exception {
    checkArguments(args);

    // Set path to app master jar.
    String pathAppMasterJar = args[0];

    ClusterProperties props = ClusterProperties.from(args.length == 2 ? args[1] : null);

    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    FileSystem fs = FileSystem.get(conf);

    Path ignite;

    // Load ignite and jar
    if (props.ignitePath() == null)
        ignite = getIgnite(props, fs);
    else
        ignite = new Path(props.ignitePath());

    // Upload the jar file to HDFS.
    Path appJar = IgniteYarnUtils.copyLocalToHdfs(fs, pathAppMasterJar,
            props.igniteWorkDir() + File.separator + IgniteYarnUtils.JAR_NAME);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    amContainer.setCommands(Collections
            .singletonList(Environment.JAVA_HOME.$() + "/bin/java -Xmx512m " + ApplicationMaster.class.getName()
                    + IgniteYarnUtils.SPACE + ignite.toUri() + IgniteYarnUtils.YARN_LOG_OUT));

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = IgniteYarnUtils.setupFile(appJar, fs, LocalResourceType.FILE);

    amContainer.setLocalResources(Collections.singletonMap(IgniteYarnUtils.JAR_NAME, appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = props.toEnvs();

    setupAppMasterEnv(appMasterEnv, conf);

    amContainer.setEnvironment(appMasterEnv);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials creds = new Credentials();

        String tokRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);

        if (tokRenewer == null || tokRenewer.length() == 0)
            throw new IOException("Master Kerberos principal for the RM is not set.");

        log.info("Found RM principal: " + tokRenewer);

        final Token<?> tokens[] = fs.addDelegationTokens(tokRenewer, creds);

        if (tokens != null)
            log.info("File system delegation tokens: " + Arrays.toString(tokens));

        amContainer.setTokens(IgniteYarnUtils.createTokenBuffer(creds));
    }

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(512);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("ignition"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();

    yarnClient.submitApplication(appContext);

    log.log(Level.INFO, "Submitted application. Application id: {0}", appId);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();

    while (appState == YarnApplicationState.NEW || appState == YarnApplicationState.NEW_SAVING
            || appState == YarnApplicationState.SUBMITTED || appState == YarnApplicationState.ACCEPTED) {
        TimeUnit.SECONDS.sleep(1L);

        appReport = yarnClient.getApplicationReport(appId);

        if (appState != YarnApplicationState.ACCEPTED
                && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED)
            log.log(Level.INFO, "Application {0} is ACCEPTED.", appId);

        appState = appReport.getYarnApplicationState();
    }

    log.log(Level.INFO, "Application {0} is {1}.", new Object[] { appId, appState });
}

From source file:org.apache.reef.runtime.yarn.driver.unmanaged.UnmanagedAmTest.java

License:Apache License

@Test
public void testAmShutdown() throws IOException, YarnException {

    Assume.assumeTrue("This test requires a YARN Resource Manager to connect to",
            Boolean.parseBoolean(System.getenv("REEF_TEST_YARN")));

    final YarnConfiguration yarnConfig = new YarnConfiguration();

    // Start YARN client and register the application

    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfig);/*from   w w w.j  a  v  a2 s . c o m*/
    yarnClient.start();

    final ContainerLaunchContext containerContext = Records.newRecord(ContainerLaunchContext.class);
    containerContext.setCommands(Collections.<String>emptyList());
    containerContext.setLocalResources(Collections.<String, LocalResource>emptyMap());
    containerContext.setEnvironment(Collections.<String, String>emptyMap());
    containerContext.setTokens(getTokens());

    final ApplicationSubmissionContext appContext = yarnClient.createApplication()
            .getApplicationSubmissionContext();
    appContext.setApplicationName("REEF_Unmanaged_AM_Test");
    appContext.setAMContainerSpec(containerContext);
    appContext.setUnmanagedAM(true);
    appContext.setQueue("default");

    final ApplicationId applicationId = appContext.getApplicationId();
    LOG.log(Level.INFO, "Registered YARN application: {0}", applicationId);

    yarnClient.submitApplication(appContext);

    LOG.log(Level.INFO, "YARN application submitted: {0}", applicationId);

    addToken(yarnClient.getAMRMToken(applicationId));

    // Start the AM

    final AMRMClientAsync<AMRMClient.ContainerRequest> rmClient = AMRMClientAsync.createAMRMClientAsync(1000,
            this);
    rmClient.init(yarnConfig);
    rmClient.start();

    final NMClientAsync nmClient = new NMClientAsyncImpl(this);
    nmClient.init(yarnConfig);
    nmClient.start();

    final RegisterApplicationMasterResponse registration = rmClient
            .registerApplicationMaster(NetUtils.getHostname(), -1, null);

    LOG.log(Level.INFO, "Unmanaged AM is running: {0}", registration);

    rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "Success!", null);

    LOG.log(Level.INFO, "Unregistering AM: state {0}", rmClient.getServiceState());

    // Shutdown the AM

    rmClient.stop();
    nmClient.stop();

    // Get the final application report

    final ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);
    final YarnApplicationState appState = appReport.getYarnApplicationState();
    final FinalApplicationStatus finalAttemptStatus = appReport.getFinalApplicationStatus();

    LOG.log(Level.INFO, "Application {0} final attempt {1} status: {2}/{3}", new Object[] { applicationId,
            appReport.getCurrentApplicationAttemptId(), appState, finalAttemptStatus });

    Assert.assertEquals("Application must be in FINISHED state", YarnApplicationState.FINISHED, appState);
    Assert.assertEquals("Final status must be SUCCEEDED", FinalApplicationStatus.SUCCEEDED, finalAttemptStatus);

    // Shutdown YARN client

    yarnClient.stop();
}

From source file:org.apache.tez.client.TestTezClient.java

License:Apache License

TezClientForTest configure(Map<String, LocalResource> lrs, boolean isSession)
        throws YarnException, IOException {
    TezConfiguration conf = new TezConfiguration();
    conf.setBoolean(TezConfiguration.TEZ_IGNORE_LIB_URIS, true);
    conf.setBoolean(TezConfiguration.TEZ_AM_SESSION_MODE, isSession);
    TezClientForTest client = new TezClientForTest("test", conf, lrs, null);

    ApplicationId appId1 = ApplicationId.newInstance(0, 1);
    YarnClient yarnClient = mock(YarnClient.class, RETURNS_DEEP_STUBS);
    when(yarnClient.createApplication().getNewApplicationResponse().getApplicationId()).thenReturn(appId1);

    DAGClientAMProtocolBlockingPB sessionAmProxy = mock(DAGClientAMProtocolBlockingPB.class,
            RETURNS_DEEP_STUBS);/*from   ww w  .  j av a  2 s .  c  o  m*/

    client.sessionAmProxy = sessionAmProxy;
    client.mockTezYarnClient = new TezYarnClient(yarnClient);
    client.mockYarnClient = yarnClient;
    client.mockAppId = appId1;

    return client;
}

From source file:proxyyarn.ProxyYarn.java

License:Apache License

public boolean run() throws Exception {
    Configuration conf = new YarnConfiguration(new Configuration());

    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);//  w  w w .j  av a 2s. c o m
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    log.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    log.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        log.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo("default");
    log.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            log.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    FileSystem fs = FileSystem.get(conf);
    if (!fs.getClass().equals(DistributedFileSystem.class)) {
        log.error("Expected DistributedFileSystem, but was {}", fs.getClass().getSimpleName());
        System.exit(1);
    }

    //    ApplicationClientProtocol applicationsManager;
    //    InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS));

    //    log.info("Connecting to ResourceManager at {}", rmAddress);
    //    Configuration appManagerServerConf = new Configuration(conf);
    //    YarnRPC rpc = YarnRPC.create(appManagerServerConf);
    //    ApplicationClientProtocol applicationManager = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, appManagerServerConf);

    String appName = "AccumuloProxyYarn";
    YarnClientApplication app = yarnClient.createApplication();

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    //    GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class);
    //    GetNewApplicationResponse response = applicationManager.getNewApplication(request);
    //    log.info("Got new ApplicationId=" + response.getApplicationId());

    //    ApplicationId appId = response.getApplicationId();

    // Create a new ApplicationSubmissionContext
    //    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
    // set the ApplicationId
    //    appContext.setApplicationId(appId);
    // set the application name
    //    appContext.setApplicationName(appName);

    // Create a new container launch context for the AM's container
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Define the local resources required
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // Lets assume the jar we need for our ApplicationMaster is available in
    // HDFS at a certain known path to us and we want to make it available to
    // the ApplicationMaster in the launched container
    Path localJarPath = new Path(
            "file:///Users/jelser/projects/accumulo-proxy-yarn/target/accumulo-proxy-yarn-0.0.1-SNAPSHOT.jar");
    Path jarPath = new Path("hdfs:///accumulo-proxy-yarn-0.0.1-SNAPSHOT.jar");
    fs.copyFromLocalFile(false, true, localJarPath, jarPath);
    FileStatus jarStatus = fs.getFileStatus(jarPath);
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);

    // Set the type of resource - file or archive
    // archives are untarred at the destination by the framework
    amJarRsrc.setType(LocalResourceType.FILE);

    // Set visibility of the resource
    // Setting to most private option i.e. this file will only
    // be visible to this instance of the running application
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);

    // Set the location of resource to be copied over into the
    // working directory
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));

    // Set timestamp and length of file so that the framework
    // can do basic sanity checks for the local resource
    // after it has been copied over to ensure it is the same
    // resource the client intended to use with the application
    amJarRsrc.setTimestamp(jarStatus.getModificationTime());
    amJarRsrc.setSize(jarStatus.getLen());

    // The framework will create a symlink called AppMaster.jar in the
    // working directory that will be linked back to the actual file.
    // The ApplicationMaster, if needs to reference the jar file, would
    // need to use the symlink filename.
    localResources.put("AppMaster.jar", amJarRsrc);

    // Set the local resources into the launch context
    amContainer.setLocalResources(localResources);

    // Set up the environment needed for the launch context
    Map<String, String> env = new HashMap<String, String>();

    // For example, we could setup the classpath needed.
    // Assuming our classes or jars are available as local resources in the
    // working directory from which the command will be run, we need to append
    // "." to the path.
    // By default, all the hadoop specific classpaths will already be available
    // in $CLASSPATH, so we should be careful not to overwrite it.
    String classPathEnv = "$CLASSPATH:./*:/Users/jelser/projects/accumulo-proxy-yarn/target/lib/*";
    env.put("CLASSPATH", classPathEnv);
    amContainer.setEnvironment(env);

    // Construct the command to be executed on the launched container
    String command = "${JAVA_HOME}" + "/bin/java" + " proxyyarn.ProxyYarnAppMaster 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";

    List<String> commands = new ArrayList<String>();
    commands.add(command);
    // add additional commands if needed

    // Set the command array into the container spec
    amContainer.setCommands(commands);

    // Define the resource requirements for the container
    // For now, YARN only supports memory so we set the memory
    // requirements.
    // If the process takes more than its allocated memory, it will
    // be killed by the framework.
    // Memory being requested for should be less than max capability
    // of the cluster and all asks should be a multiple of the min capability.
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    appContext.setResource(capability);

    // Create the request to send to the ApplicationsManager
    //    SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
    //    appRequest.setApplicationSubmissionContext(appContext);

    // Submit the application to the ApplicationsManager
    // Ignore the response as either a valid response object is returned on
    // success or an exception thrown to denote the failure
    //    applicationManager.submitApplication(appRequest);

    // Set the container launch content into the ApplicationSubmissionContext
    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide? 
    pri.setPriority(0);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue("default");

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    log.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    return monitorApplication(yarnClient, appId);
    /*    Thread.sleep(200);
                
        boolean running = false;
        while(true) {
          GetApplicationReportRequest reportRequest = Records.newRecord(GetApplicationReportRequest.class);
          reportRequest.setApplicationId(appId);
          GetApplicationReportResponse reportResponse = applicationManager.getApplicationReport(reportRequest);
          ApplicationReport report = reportResponse.getApplicationReport();
                  
          log.info(report.toString());
                  
          YarnApplicationState state = report.getYarnApplicationState();
          switch (state) {
            case NEW:
            case NEW_SAVING:
            case SUBMITTED:
            case ACCEPTED:
              log.info("State: {}", state);
              break;
            case RUNNING:
              log.info("Running application");
              running = true;
              break;
            case FINISHED:
            case FAILED:
            case KILLED:
              log.info("State: {}", state);
              return;
            default:
              log.info("Unknown state: {}", state);
              return;
          }
                  
          if (!running) {
            Thread.sleep(1000);
          }
        }*/

}

From source file:runtime.starter.MPJYarnClient.java

License:Open Source License

public void run() throws Exception {

    Map<String, String> map = System.getenv();

    try {//w  ww .  java2s  . c  om
        mpjHomeDir = map.get("MPJ_HOME");

        if (mpjHomeDir == null) {
            throw new Exception("[MPJRun.java]:MPJ_HOME environment found..");
        }
    } catch (Exception exc) {
        System.out.println("[MPJRun.java]:" + exc.getMessage());
        exc.printStackTrace();
        return;
    }

    // Copy the application master jar to HDFS
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    /*
          Path dataset = new Path(fs.getHomeDirectory(),"/dataset");
          FileStatus datasetFile = fs.getFileStatus(dataset);
                 
          BlockLocation myBlocks [] = fs.getFileBlockLocations(datasetFile,0,datasetFile.getLen());
          for(BlockLocation b : myBlocks){
            System.out.println("\n--------------------");
            System.out.println("Length "+b.getLength());
            for(String host : b.getHosts()){
              System.out.println("host "+host);
            }
          }
    */
    Path source = new Path(mpjHomeDir + "/lib/mpj-app-master.jar");
    String pathSuffix = hdfsFolder + "mpj-app-master.jar";
    Path dest = new Path(fs.getHomeDirectory(), pathSuffix);

    if (debugYarn) {
        logger.info("Uploading mpj-app-master.jar to: " + dest.toString());
    }

    fs.copyFromLocalFile(false, true, source, dest);
    FileStatus destStatus = fs.getFileStatus(dest);

    Path wrapperSource = new Path(mpjHomeDir + "/lib/mpj-yarn-wrapper.jar");
    String wrapperSuffix = hdfsFolder + "mpj-yarn-wrapper.jar";
    Path wrapperDest = new Path(fs.getHomeDirectory(), wrapperSuffix);

    if (debugYarn) {
        logger.info("Uploading mpj-yarn-wrapper.jar to: " + wrapperDest.toString());
    }

    fs.copyFromLocalFile(false, true, wrapperSource, wrapperDest);

    Path userJar = new Path(jarPath);
    String userJarSuffix = hdfsFolder + "user-code.jar";
    Path userJarDest = new Path(fs.getHomeDirectory(), userJarSuffix);

    if (debugYarn) {
        logger.info("Uploading user-code.jar to: " + userJarDest.toString());
    }

    fs.copyFromLocalFile(false, true, userJar, userJarDest);

    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    if (debugYarn) {
        YarnClusterMetrics metrics = yarnClient.getYarnClusterMetrics();
        logger.info("\nNodes Information");
        logger.info("Number of NM: " + metrics.getNumNodeManagers() + "\n");

        List<NodeReport> nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        for (NodeReport n : nodeReports) {
            logger.info("NodeId: " + n.getNodeId());
            logger.info("RackName: " + n.getRackName());
            logger.info("Total Memory: " + n.getCapability().getMemory());
            logger.info("Used Memory: " + n.getUsed().getMemory());
            logger.info("Total vCores: " + n.getCapability().getVirtualCores());
            logger.info("Used vCores: " + n.getUsed().getVirtualCores() + "\n");
        }
    }

    logger.info("Creating server socket at HOST " + serverName + " PORT " + serverPort + " \nWaiting for " + np
            + " processes to connect...");

    // Creating a server socket for incoming connections
    try {
        servSock = new ServerSocket(serverPort);
        infoSock = new ServerSocket();
        TEMP_PORT = findPort(infoSock);
    } catch (Exception e) {
        e.printStackTrace();
    }

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();

    int maxMem = appResponse.getMaximumResourceCapability().getMemory();

    if (debugYarn) {
        logger.info("Max memory capability resources in cluster: " + maxMem);
    }

    if (amMem > maxMem) {
        amMem = maxMem;
        logger.info("AM memory specified above threshold of cluster "
                + "Using maximum memory for AM container: " + amMem);
    }
    int maxVcores = appResponse.getMaximumResourceCapability().getVirtualCores();

    if (debugYarn) {
        logger.info("Max vCores capability resources in cluster: " + maxVcores);
    }

    if (amCores > maxVcores) {
        amCores = maxVcores;
        logger.info("AM virtual cores specified above threshold of cluster "
                + "Using maximum virtual cores for AM container: " + amCores);
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    List<String> commands = new ArrayList<String>();
    commands.add("$JAVA_HOME/bin/java");
    commands.add("-Xmx" + amMem + "m");
    commands.add("runtime.starter.MPJAppMaster");
    commands.add("--np");
    commands.add(String.valueOf(np));
    commands.add("--serverName");
    commands.add(serverName); //server name
    commands.add("--ioServerPort");
    commands.add(Integer.toString(serverPort)); //server port
    commands.add("--deviceName");
    commands.add(deviceName); //device name
    commands.add("--className");
    commands.add(className); //class name
    commands.add("--wdir");
    commands.add(workingDirectory); //wdir
    commands.add("--psl");
    commands.add(Integer.toString(psl)); //protocol switch limit
    commands.add("--wireUpPort");
    commands.add(String.valueOf(TEMP_PORT)); //for sharing ports & rank
    commands.add("--wrapperPath");
    commands.add(wrapperDest.toString());//MPJYarnWrapper.jar HDFS path
    commands.add("--userJarPath");
    commands.add(userJarDest.toString());//User Jar File HDFS path
    commands.add("--mpjContainerPriority");
    commands.add(mpjContainerPriority);// priority for mpj containers 
    commands.add("--containerMem");
    commands.add(containerMem);
    commands.add("--containerCores");
    commands.add(containerCores);

    if (debugYarn) {
        commands.add("--debugYarn");
    }

    if (appArgs != null) {

        commands.add("--appArgs");

        for (int i = 0; i < appArgs.length; i++) {
            commands.add(appArgs[i]);
        }
    }

    amContainer.setCommands(commands); //set commands

    // Setup local Resource for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);

    appMasterJar.setResource(ConverterUtils.getYarnUrlFromPath(dest));
    appMasterJar.setSize(destStatus.getLen());
    appMasterJar.setTimestamp(destStatus.getModificationTime());
    appMasterJar.setType(LocalResourceType.ARCHIVE);
    appMasterJar.setVisibility(LocalResourceVisibility.APPLICATION);

    amContainer.setLocalResources(Collections.singletonMap("mpj-app-master.jar", appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    // Setting up the environment
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMem);
    capability.setVirtualCores(amCores);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();

    appContext.setApplicationName(appName);
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(yarnQueue); // queue

    Priority priority = Priority.newInstance(amPriority);
    appContext.setPriority(priority);

    ApplicationId appId = appContext.getApplicationId();

    //Adding ShutDown Hook
    Runtime.getRuntime().addShutdownHook(new KillYarnApp(appId, yarnClient));

    // Submit application
    System.out.println("Submitting Application: " + appContext.getApplicationName() + "\n");

    try {
        isRunning = true;
        yarnClient.submitApplication(appContext);
    } catch (Exception exp) {
        System.err.println("Error Submitting Application");
        exp.printStackTrace();
    }

    // np = number of processes , + 1 for Application Master container
    IOMessagesThread[] ioThreads = new IOMessagesThread[np + 1];

    peers = new String[np];
    socketList = new Vector<Socket>();
    int wport = 0;
    int rport = 0;
    int rank = 0;

    // np + 1 IOThreads
    for (int i = 0; i < (np + 1); i++) {
        try {
            sock = servSock.accept();

            //start IO thread to read STDOUT and STDERR from wrappers
            IOMessagesThread io = new IOMessagesThread(sock);
            ioThreads[i] = io;
            ioThreads[i].start();
        } catch (Exception e) {
            System.err.println("Error accepting connection from peer socket..");
            e.printStackTrace();
        }
    }

    // Loop to read port numbers from Wrapper.java processes
    // and to create WRAPPER_INFO (containing all IPs and ports)
    String WRAPPER_INFO = "#Peer Information";
    for (int i = np; i > 0; i--) {
        try {
            sock = infoSock.accept();

            DataOutputStream out = new DataOutputStream(sock.getOutputStream());
            DataInputStream in = new DataInputStream(sock.getInputStream());
            if (in.readUTF().startsWith("Sending Info")) {
                wport = in.readInt();
                rport = in.readInt();
                rank = in.readInt();
                peers[rank] = ";" + sock.getInetAddress().getHostAddress() + "@" + rport + "@" + wport + "@"
                        + rank;
                socketList.add(sock);
            }
        } catch (Exception e) {
            System.err.println("[MPJYarnClient.java]: Error accepting" + " connection from peer socket!");
            e.printStackTrace();
        }
    }

    for (int i = 0; i < np; i++) {
        WRAPPER_INFO += peers[i];
    }
    // Loop to broadcast WRAPPER_INFO to all Wrappers
    for (int i = np; i > 0; i--) {
        try {
            sock = socketList.get(np - i);
            DataOutputStream out = new DataOutputStream(sock.getOutputStream());

            out.writeUTF(WRAPPER_INFO);
            out.flush();

            sock.close();
        } catch (Exception e) {
            System.err.println("[MPJYarnClient.java]: Error closing" + " connection from peer socket..");
            e.printStackTrace();
        }
    }

    try {
        infoSock.close();
    } catch (IOException exp) {
        exp.printStackTrace();
    }

    // wait for all IO Threads to complete 
    for (int i = 0; i < (np + 1); i++) {
        ioThreads[i].join();
    }
    isRunning = true;

    System.out.println("\nApplication Statistics!");
    while (true) {
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
        fStatus = appReport.getFinalApplicationStatus();
        if (appState == YarnApplicationState.FINISHED) {
            isRunning = false;
            if (fStatus == FinalApplicationStatus.SUCCEEDED) {
                System.out.println("State: " + fStatus);
            } else {
                System.out.println("State: " + fStatus);
            }
            break;
        } else if (appState == YarnApplicationState.KILLED) {
            isRunning = false;
            System.out.println("State: " + appState);
            break;
        } else if (appState == YarnApplicationState.FAILED) {
            isRunning = false;
            System.out.println("State: " + appState);
            break;
        }
        Thread.sleep(100);
    }

    try {

        if (debugYarn) {
            logger.info("Cleaning the files from hdfs: ");
            logger.info("1) " + dest.toString());
            logger.info("2) " + wrapperDest.toString());
            logger.info("3) " + userJarDest.toString());
        }

        fs.delete(dest);
        fs.delete(wrapperDest);
        fs.delete(userJarDest);
    } catch (IOException exp) {
        exp.printStackTrace();
    }
    System.out.println("Application ID: " + appId + "\n" + "Application User: " + appReport.getUser() + "\n"
            + "RM Queue: " + appReport.getQueue() + "\n" + "Start Time: " + appReport.getStartTime() + "\n"
            + "Finish Time: " + appReport.getFinishTime());
}

From source file:uk.gov.gchq.gaffer.slider.util.AppConfigGenerator.java

License:Apache License

private AvailableResources getYarnResources() throws IOException, YarnException {
    final Configuration config = new Configuration();
    final YarnClient yarn = YarnClient.createYarnClient();
    yarn.init(config);//w  w  w.  ja  va  2  s  .com
    yarn.start();

    // Query YARN to find out the largest container it is capable of scheduling
    final YarnClientApplication app = yarn.createApplication();
    final Resource resources = app.getNewApplicationResponse().getMaximumResourceCapability();

    // Also find out how many nodes there are in the cluster by asking for the number of registered Node Managers
    final YarnClusterMetrics metrics = yarn.getYarnClusterMetrics();

    yarn.close();

    return new AvailableResources(resources.getVirtualCores(), resources.getMemory(),
            metrics.getNumNodeManagers());
}

From source file:yarnkit.client.YarnClientService.java

License:Apache License

@Nonnull
private ApplicationSubmissionContext makeApplicationContext(@Nonnull YarnClient yarnClient)
        throws YarnkitException {
    final YarnClientApplication clientApp;
    try {/*from  ww w  .j a v  a2  s  . c o  m*/
        clientApp = yarnClient.createApplication();
    } catch (Exception e) {
        throw new YarnkitException("Failed to initialize YarnClient", e);
    }
    GetNewApplicationResponse appResp = clientApp.getNewApplicationResponse();

    ApplicationSubmissionContext appContext = clientApp.getApplicationSubmissionContext();
    appContext.setApplicationName(parameters.getApplicationName());
    appContext.setQueue(parameters.getQueue());

    final ByteBuffer serializedTokens;
    try {
        serializedTokens = getSecurityToken(conf);
    } catch (IOException e) {
        throw new YarnkitException("Failed to get a security token", e);
    }
    ContainerLaunchContextFactory clcFactory = new ContainerLaunchContextFactory(
            appResp.getMaximumResourceCapability(), serializedTokens);

    ApplicationId applicationId = appContext.getApplicationId();
    ContainerLaunchParameters appMasterParams = parameters.getApplicationMasterParameters(applicationId);
    appContext.setResource(clcFactory.createResource(appMasterParams));
    appContext.setPriority(clcFactory.createPriority(appMasterParams));
    ContainerLaunchContext clc = clcFactory.create(appMasterParams);
    appContext.setAMContainerSpec(clc);

    return appContext;
}