Example usage for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR

List of usage examples for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR.

Prototype

String LOG_DIR_EXPANSION_VAR

To view the source code for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR.

Click Source Link

Document

The temporary environmental variable for container log directory.

Usage

From source file:gobblin.yarn.YarnService.java

License:Apache License

private String buildContainerCommand(Container container, String helixInstanceName) {
    String containerProcessName = GobblinYarnTaskRunner.class.getSimpleName();
    return new StringBuilder().append(ApplicationConstants.Environment.JAVA_HOME.$()).append("/bin/java")
            .append(" -Xmx").append(container.getResource().getMemory()).append("M").append(" ")
            .append(JvmUtils.formatJvmArguments(this.containerJvmArgs)).append(" ")
            .append(GobblinYarnTaskRunner.class.getName()).append(" --")
            .append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME).append(" ")
            .append(this.applicationName).append(" --")
            .append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME).append(" ")
            .append(helixInstanceName).append(" 1>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR)
            .append(File.separator).append(containerProcessName).append(".").append(ApplicationConstants.STDOUT)
            .append(" 2>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator)
            .append(containerProcessName).append(".").append(ApplicationConstants.STDERR).toString();
}

From source file:hadoop.yarn.distributedshell.DshellClient.java

License:Apache License

/**
 * Main run function for the client//from   w  ww.  j a  v  a  2s .c o  m
 * 
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask
    // if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource
    // manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null);

    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed.
    // To do this, we need to first copy into the filesystem that is visible
    // to the yarn framework.
    // We do not need to set this as a local resource for the application
    // master as the application master does not need it.
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH;
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }

    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand);
    }

    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources,
                StringUtils.join(shellArgs, " "));
    }
    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct
    // local resource for the
    // eventual containers that will be launched to execute the shell
    // scripts
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));

    // ========================================jar?
    if (containerJarPaths.length != 0) {
        for (int i = 0; i < containerJarPaths.length; i++) {
            String hdfsJarLocation = "";
            String[] jarNameSplit = containerJarPaths[i].split("/");
            String jarName = jarNameSplit[jarNameSplit.length - 1];

            long hdfsJarLen = 0;
            long hdfsJarTimestamp = 0;
            if (!containerJarPaths[i].isEmpty()) {
                Path jarSrc = new Path(containerJarPaths[i]);
                String jarPathSuffix = appName + "/" + appId.toString() + "/" + jarName;
                Path jarDst = new Path(fs.getHomeDirectory(), jarPathSuffix);
                fs.copyFromLocalFile(false, true, jarSrc, jarDst);
                hdfsJarLocation = jarDst.toUri().toString();
                FileStatus jarFileStatus = fs.getFileStatus(jarDst);
                hdfsJarLen = jarFileStatus.getLen();
                hdfsJarTimestamp = jarFileStatus.getModificationTime();
                env.put(DshellDSConstants.DISTRIBUTEDJARLOCATION + i, hdfsJarLocation);
                env.put(DshellDSConstants.DISTRIBUTEDJARTIMESTAMP + i, Long.toString(hdfsJarTimestamp));
                env.put(DshellDSConstants.DISTRIBUTEDJARLEN + i, Long.toString(hdfsJarLen));
            }
        }
    }
    // ========================================jar?

    // ========================================archive?
    if (containerArchivePaths.length != 0) {
        for (int i = 0; i < containerArchivePaths.length; i++) {
            String hdfsArchiveLocation = "";
            String[] archiveNameSplit = containerArchivePaths[i].split("/");
            String archiveName = archiveNameSplit[archiveNameSplit.length - 1];

            long hdfsArchiveLen = 0;
            long hdfsArchiveTimestamp = 0;
            if (!containerArchivePaths[i].isEmpty()) {
                Path archiveSrc = new Path(containerArchivePaths[i]);
                String archivePathSuffix = appName + "/" + appId.toString() + "/" + archiveName;
                Path archiveDst = new Path(fs.getHomeDirectory(), archivePathSuffix);
                fs.copyFromLocalFile(false, true, archiveSrc, archiveDst);
                hdfsArchiveLocation = archiveDst.toUri().toString();
                FileStatus archiveFileStatus = fs.getFileStatus(archiveDst);
                hdfsArchiveLen = archiveFileStatus.getLen();
                hdfsArchiveTimestamp = archiveFileStatus.getModificationTime();
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVELOCATION + i, hdfsArchiveLocation);
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVETIMESTAMP + i, Long.toString(hdfsArchiveTimestamp));
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVELEN + i, Long.toString(hdfsArchiveLen));
            }
        }
    }
    // ========================================archive?

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(shellCmdPriority));

    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and
    // vcores requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    capability.setVirtualCores(amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:hws.core.JobClient.java

License:Apache License

public void run(String[] args) throws Exception {
    //final String command = args[0];
    //final int n = Integer.valueOf(args[1]);
    //final Path jarPath = new Path(args[2]);
    Options options = new Options();
    /*options.addOption(OptionBuilder.withLongOpt("jar")
                           .withDescription( "Jar path" )
                           .hasArg()/*from  w  ww  .ja v a 2s. com*/
                           .withArgName("JarPath")
                           .create());
    options.addOption(OptionBuilder.withLongOpt("scheduler")
                           .withDescription( "Scheduler class name" )
                           .hasArg()
                           .withArgName("ClassName")
                           .create());
    */options.addOption(OptionBuilder.withLongOpt("zk-servers")
            .withDescription("List of the ZooKeeper servers").hasArgs().withArgName("zkAddrs").create("zks"));
    //options.addOption("l", "list", false, "list modules");
    options.addOption(OptionBuilder.withLongOpt("load").withDescription("load new modules").hasArgs()
            .withArgName("XMLFiles").create());
    /*options.addOption(OptionBuilder.withLongOpt( "remove" )
                           .withDescription( "remove modules" )
                           .hasArgs()
                           .withArgName("ModuleNames")
                           .create("rm"));
    */CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);

    //Path jarPath = null;
    //String schedulerClassName = null;
    String[] xmlFileNames = null;
    //String []moduleNames = null;
    String zksArgs = "";
    String[] zkServers = null;
    if (cmd.hasOption("zks")) {
        zksArgs = "-zks";
        zkServers = cmd.getOptionValues("zks");
        for (String zks : zkServers) {
            zksArgs += " " + zks;
        }
    }

    //Logger setup
    //FSDataOutputStream writer = FileSystem.get(conf).create(new Path("hdfs:///hws/apps/"+appIdStr+"/logs/jobClient.log"));
    //Logger.addOutputStream(writer);

    /*if(cmd.hasOption("l")){
       LOG.warn("Argument --list (-l) is not supported yet.");
    }
    if(cmd.hasOption("jar")){
       jarPath = new Path(cmd.getOptionValue("jar")); 
    }
    if(cmd.hasOption("scheduler")){
       schedulerClassName = cmd.getOptionValue("scheduler");
    }*/
    if (cmd.hasOption("load")) {
        xmlFileNames = cmd.getOptionValues("load");
    } /*else if(cmd.hasOption("rm")){
        moduleNames = cmd.getOptionValues("rm");
      }*/

    //LOG.info("Jar-Path "+jarPath);
    if (xmlFileNames != null) {
        String paths = "";
        for (String path : xmlFileNames) {
            paths += path + "; ";
        }
        LOG.info("Load XMLs: " + paths);
    }
    /*if(moduleNames!=null){
       String modules = "";
       for(String module: moduleNames){
          modules += module+"; ";
       }
       LOG.info("remove: "+modules);
    }*/
    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    System.out.println("LOG Path: " + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    ZkClient zk = new ZkClient(zkServers[0]); //TODO select a ZooKeeper server
    if (!zk.exists("/hadoop-watershed")) {
        zk.createPersistent("/hadoop-watershed", "");
    }
    zk.createPersistent("/hadoop-watershed/" + appId.toString(), "");

    FileSystem fs = FileSystem.get(conf);

    LOG.info("Collecting files to upload");
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString()));
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString() + "/logs"));

    ModulePipeline modulePipeline = ModulePipeline.fromXMLFiles(xmlFileNames);
    LOG.info("Uploading files to HDFS");
    for (String path : modulePipeline.files()) {
        uploadFile(fs, new File(path), appId);
    }
    LOG.info("Upload finished");

    String modulePipelineJson = Json.dumps(modulePipeline);
    String modulePipelineBase64 = Base64.encodeBase64String(StringUtils.getBytesUtf8(modulePipelineJson))
            .replaceAll("\\s", "");
    LOG.info("ModulePipeline: " + modulePipelineJson);
    //LOG.info("ModulePipeline: "+modulePipelineBase64);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " hws.core.JobMaster" + " -aid " + appId.toString() + " --load " + modulePipelineBase64 + " "
            + zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    //LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    //setupAppMasterJar(jarPath, appMasterJar);
    //amContainer.setLocalResources(Collections.singletonMap("hws.jar", appMasterJar));

    LOG.info("Listing files for YARN-Watershed");
    RemoteIterator<LocatedFileStatus> filesIterator = fs.listFiles(new Path("hdfs:///hws/bin/"), false);
    Map<String, LocalResource> resources = new HashMap<String, LocalResource>();
    LOG.info("Files setup as resource");
    while (filesIterator.hasNext()) {
        LocatedFileStatus fileStatus = filesIterator.next();
        // Setup jar for ApplicationMaster
        LocalResource containerJar = Records.newRecord(LocalResource.class);
        ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
        resources.put(fileStatus.getPath().getName(), containerJar);
    }
    LOG.info("container resource setup");
    amContainer.setLocalResources(resources);

    fs.close(); //closing FileSystem interface

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    ContainerUtils.setupContainerEnv(appMasterEnv, conf);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    //ApplicationSubmissionContext appContext = 
    //app.getApplicationSubmissionContext();
    appContext.setApplicationName("Hadoop-Watershed"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue 

    // Submit application
    LOG.info("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for containers to finish");
    zk.waitUntilExists("/hadoop-watershed/" + appId.toString() + "/done", TimeUnit.MILLISECONDS, 250);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

    System.out.println("deleting " + appId.toString() + " znode");
    zk.deleteRecursive("/hadoop-watershed/" + appId.toString()); //TODO remove app folder from ZooKeeper
}

From source file:hws.core.JobMaster.java

License:Apache License

public void onContainersAllocated(List<Container> containers) {
    FileSystem fs = null;/*w  w w.  j a va  2s  . c om*/
    try {
        fs = FileSystem.get(getConfiguration());
    } catch (IOException e) {
        Logger.severe(e.toString());
    }
    for (Container container : containers) {
        try {
            //PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter("/home/yarn/rcor/yarn/app-master-log.out")));
            Logger.info("Selecting instance to container: " + container.getId().toString());
            //dado o container, escolher a instancia que tem dado de entrada mais perto daquele container
            InstanceInfo instanceInfo = null;
            if (instances.get(modulePipeline.get(currentModuleIndex).filterInfo().name())
                    .instancesBuilt() >= modulePipeline.get(currentModuleIndex).numFilterInstances()) {
                currentModuleIndex++;
            }
            if (currentModuleIndex < modulePipeline.size()) {
                instanceInfo = instances.get(modulePipeline.get(currentModuleIndex).filterInfo().name())
                        .build();
            } else
                break;

            String instanceInfoBase64 = Base64
                    .encodeBase64String(StringUtils.getBytesUtf8(Json.dumps(instanceInfo)))
                    .replaceAll("\\s", "");
            // Launch container by create ContainerLaunchContext
            ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
            ctx.setCommands(Collections.singletonList(
                    "$JAVA_HOME/bin/java -Xmx256M hws.core.InstanceDriver --load " + instanceInfoBase64
                            + " -aid " + this.appIdStr + " -cid " + container.getId().toString() + " "
                            + this.zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
                            + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

            Logger.info("Listing YARN-Watershed files for app-id: " + this.appIdStr);
            RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path("hdfs:///hws/bin/"), false);
            Map<String, LocalResource> resources = new HashMap<String, LocalResource>();
            Logger.info("Setup YARN-Watershed files as resources");
            while (files.hasNext()) {
                LocatedFileStatus fileStatus = files.next();
                // Setup jar for ApplicationMaster
                LocalResource containerJar = Records.newRecord(LocalResource.class);
                ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
                resources.put(fileStatus.getPath().getName(), containerJar);
            }

            Logger.info("Listing application files for app-id: " + this.appIdStr);
            files = fs.listFiles(new Path("hdfs:///hws/apps/" + this.appIdStr + "/"), false);
            Logger.info("Setup application files as resources");
            while (files.hasNext()) {
                LocatedFileStatus fileStatus = files.next();
                // Setup jar for ApplicationMaster
                LocalResource containerJar = Records.newRecord(LocalResource.class);
                ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
                resources.put(fileStatus.getPath().getName(), containerJar);
            }
            Logger.info("container resource setup");
            ctx.setLocalResources(resources);

            Logger.info("Environment setup");
            // Setup CLASSPATH for ApplicationMaster
            Map<String, String> containerEnv = new HashMap<String, String>();
            ContainerUtils.setupContainerEnv(containerEnv, getConfiguration());
            ctx.setEnvironment(containerEnv);
            Logger.info("Starting containers");

            Logger.info("[AM] Launching container " + container.getId());
            nmClient.startContainer(container, ctx);
            Logger.info("Container started!");
            /*String znode = "/hadoop-watershed/"+this.appIdStr+"/"+instanceInfo.filterInfo().name()+"/"+instanceInfo.instanceId();
            out.println("Saving instance znode: "+znode);
            out.flush();
            zk.createPersistent(znode, "");
            zk.createPersistent(znode+"/host", container.getNodeId().getHost());
            out.println("saved location: "+container.getNodeId().getHost());
            out.flush();
            */
            if (instances.get(modulePipeline.get(currentModuleIndex).filterInfo().name())
                    .instancesBuilt() >= modulePipeline.get(currentModuleIndex).numFilterInstances()) {
                Logger.info("Starting via ZooKeeper filter: " + instanceInfo.filterInfo().name());
                zk.createPersistent("/hadoop-watershed/" + this.appIdStr + "/"
                        + instanceInfo.filterInfo().name() + "/start", "");
            }
            //out.close();
        } catch (Exception e) {
            Logger.severe("[AM] Error launching container " + container.getId() + " " + e);
        }
    }
    try {
        fs.close();
    } catch (IOException e) {
        Logger.severe(e.toString());
    }
}

From source file:io.amient.yarn1.YarnContainerContext.java

License:Open Source License

private List<String> prepareCommands() {
    String command = "java " + jvmArgs + " -cp $CLASSPATH:./" + jarName + " " + mainClassName + " "
            + StringUtils.join(" ", args);
    command += " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout";
    command += " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
    log.info("$COMMAND = " + command);
    return Arrays.asList(command);
}

From source file:io.hops.hopsworks.common.jobs.flink.AbstractYarnClusterDescriptor.java

License:Apache License

protected ContainerLaunchContext setupApplicationMasterContainer(boolean hasLogback, boolean hasLog4j) {
    // ------------------ Prepare Application Master Container  ------------------------------

    // respect custom JVM options in the YAML file
    final String javaOpts = flinkConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx"
            + Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration) + "M " + javaOpts;

    if (hasLogback || hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

        if (hasLogback) {
            amCommand += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
        }/*from  w  w w  .  j av  a2 s .c om*/

        if (hasLog4j) {
            amCommand += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
        }
    }
    //Loop through Hopsworks properties and add them to env
    for (String envProperty : hopsworksParams) {
        amCommand += " " + envProperty.replace("\'", "");
    }

    amCommand += " " + getApplicationMasterClass().getName() + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err";
    amContainer.setCommands(Collections.singletonList(amCommand));

    LOG.debug("Application Master start command: " + amCommand);

    return amContainer;
}

From source file:io.hops.tensorflow.Client.java

License:Apache License

private ContainerLaunchContext createContainerLaunchContext(GetNewApplicationResponse appResponse)
        throws IOException {
    FileSystem fs = FileSystem.get(conf);
    ApplicationId appId = appResponse.getApplicationId();

    DistributedCacheList dcl = populateDistributedCache(fs, appId);
    Map<String, LocalResource> localResources = prepareLocalResources(fs, appId, dcl);
    Map<String, String> launchEnv = setupLaunchEnv();

    // Set the executable command for the application master
    Vector<CharSequence> vargs = new Vector<>(30);
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    vargs.add("-Xmx" + amMemory + "m");
    vargs.add(appMasterMainClass);//  www  .j a v a  2s  .  com

    if (python != null) {
        vargs.add(newArg(PYTHON, python));
    }
    vargs.add(newArg(MEMORY, String.valueOf(memory)));
    vargs.add(newArg(VCORES, String.valueOf(vcores)));
    vargs.add(newArg(GPUS, String.valueOf(gpus)));
    if (protocol != null) {
        vargs.add(newArg(PROTOCOL, protocol));
    }
    // vargs.add(newArg(PRIORITY, String.valueOf(priority)));
    vargs.add(newArg(ALLOCATION_TIMEOUT, String.valueOf(allocationTimeout / 1000)));

    vargs.add(newArg(ApplicationMasterArguments.MAIN_RELATIVE, mainRelativePath));
    if (arguments != null) {
        vargs.add(newArg(ARGS, StringUtils.join(arguments, " ")));
    }
    vargs.add(newArg(WORKERS, Integer.toString(numWorkers)));
    vargs.add(newArg(PSES, Integer.toString(numPses)));

    for (Map.Entry<String, String> entry : environment.entrySet()) {
        vargs.add(newArg(ENV, entry.getKey() + "=" + entry.getValue()));
    }
    if (tensorboard) {
        vargs.add("--" + TENSORBOARD);
    }
    if (debugFlag) {
        vargs.add("--" + DEBUG);
    }

    // Add log redirect params
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final command
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, launchEnv, commands,
            null, null, null);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        // For now: only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    return amContainer;
}

From source file:MasteringYarn.DistributedShellClient.java

public void run(String[] args) throws YarnException, IOException, InterruptedException {

    YarnConfiguration yarnConfiguration = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfiguration);/* www .j  a v a2  s  . co m*/
    yarnClient.start();

    YarnClientApplication yarnClientApplication = yarnClient.createApplication();

    //container launch context for application master
    ContainerLaunchContext applicationMasterContainer = Records.newRecord(ContainerLaunchContext.class);
    applicationMasterContainer.setCommands(
            Collections.singletonList("$JAVA_HOME/bin/java MasteringYarn.DistributedShellApplicationMaster "
                    + args[2] + " " + args[3] + " " + "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                    + "/stdout " + "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    LocalResource applicationMasterJar = Records.newRecord(LocalResource.class);
    setupJarFileForApplicationMaster(new Path(args[1]), applicationMasterJar);
    applicationMasterContainer
            .setLocalResources(Collections.singletonMap("MasteringYarn.jar", applicationMasterJar));

    Map<String, String> appMasterEnv = new HashMap<>();
    setupEnvironmentForApplicationMaster(appMasterEnv);
    applicationMasterContainer.setEnvironment(appMasterEnv);

    Resource resources = Records.newRecord(Resource.class);
    resources.setVirtualCores(1);
    resources.setMemory(100);

    ApplicationSubmissionContext submissionContext = yarnClientApplication.getApplicationSubmissionContext();
    submissionContext.setAMContainerSpec(applicationMasterContainer);
    submissionContext.setQueue("default");
    submissionContext.setApplicationName("MasteringYarn");
    submissionContext.setResource(resources);

    ApplicationId applicationId = submissionContext.getApplicationId();
    System.out.println("Submitting " + applicationId);
    yarnClient.submitApplication(submissionContext);
    System.out.println("Post submission " + applicationId);

    ApplicationReport applicationReport;
    YarnApplicationState applicationState;

    do {
        Thread.sleep(1000);
        applicationReport = yarnClient.getApplicationReport(applicationId);
        applicationState = applicationReport.getYarnApplicationState();

        System.out.println("Diagnostics " + applicationReport.getDiagnostics());

    } while (applicationState != YarnApplicationState.FAILED
            && applicationState != YarnApplicationState.FINISHED
            && applicationState != YarnApplicationState.KILLED);

    System.out.println("Application finished with " + applicationState + " state and id " + applicationId);
}

From source file:ml.shifu.guagua.yarn.util.YarnUtils.java

License:Apache License

private static StringBuilder getCommandBase(String mainClass, String vmArgs, String programArgs,
        String memory) {//from w  w  w.  j av  a 2  s  . c o  m
    List<String> commands = new ArrayList<String>(8);
    commands.add("exec");
    commands.add(Environment.JAVA_HOME.$() + File.separator + "bin" + File.separator + "java");
    commands.add("-Xms" + memory + "m");
    commands.add("-Xmx" + memory + "m");
    if (vmArgs != null) {
        commands.add(vmArgs);
    }
    commands.add("-cp .:${CLASSPATH}");
    commands.add(mainClass);
    if (programArgs != null) {
        commands.add(programArgs);
    }
    commands.add(
            "1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + ApplicationConstants.STDOUT);
    commands.add(
            "2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + ApplicationConstants.STDERR);

    StringBuilder sb = new StringBuilder(200);
    for (String cmd : commands) {
        sb.append(cmd).append(" ");
    }
    return sb;
}

From source file:org.apache.drill.yarn.appMaster.DrillControllerFactory.java

License:Apache License

/**
 * Constructs the Drill launch command. The launch uses the YARN-specific
 * yarn-drillbit.sh script, setting up the required input environment
 * variables.// w  ww  . ja  v  a 2 s . co  m
 * <p>
 * This is an exercise in getting many details just right. The code here sets
 * the environment variables required by (and documented in) yarn-drillbit.sh.
 * The easiest way to understand this code is to insert an "echo" statement in
 * drill-bit.sh to echo the launch command there. Then, look in YARN's NM
 * private container directory for the launch_container.sh script to see the
 * command generated by the following code. Compare the two to validate that
 * the code does the right thing.
 * <p>
 * This class is very Linux-specific. The usual adjustments must be made to
 * adapt it to Windows.
 *
 * @param config
 * @return
 * @throws DoyConfigException
 */

private TaskSpec buildDrillTaskSpec(Map<String, LocalResource> resources) throws DoyConfigException {
    DrillOnYarnConfig doyConfig = DrillOnYarnConfig.instance();

    // Drillbit launch description

    ContainerRequestSpec containerSpec = new ContainerRequestSpec();
    containerSpec.memoryMb = config.getInt(DrillOnYarnConfig.DRILLBIT_MEMORY);
    containerSpec.vCores = config.getInt(DrillOnYarnConfig.DRILLBIT_VCORES);
    containerSpec.disks = config.getDouble(DrillOnYarnConfig.DRILLBIT_DISKS);

    LaunchSpec drillbitSpec = new LaunchSpec();

    // The drill home location is either a non-localized location,
    // or, more typically, the expanded Drill directory under the
    // container's working directory. When the localized directory,
    // we rely on the fact that the current working directory is
    // set to the container directory, so we just need the name
    // of the Drill folder under the cwd.

    String drillHome = doyConfig.getRemoteDrillHome();
    drillbitSpec.env.put("DRILL_HOME", drillHome);
    LOG.trace("Drillbit DRILL_HOME: " + drillHome);

    // Heap memory

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_HEAP, "DRILL_HEAP");

    // Direct memory

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_DIRECT_MEM, "DRILL_MAX_DIRECT_MEMORY");

    // Code cache

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_CODE_CACHE, "DRILLBIT_CODE_CACHE_SIZE");

    // Any additional VM arguments from the config file.

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_VM_ARGS, "DRILL_JVM_OPTS");

    // Any user-specified library path

    addIfSet(drillbitSpec, DrillOnYarnConfig.JAVA_LIB_PATH, DrillOnYarnConfig.DOY_LIBPATH_ENV_VAR);

    // Drill logs.
    // Relies on the LOG_DIR_EXPANSION_VAR marker which is replaced by
    // the container log directory.

    if (!config.getBoolean(DrillOnYarnConfig.DISABLE_YARN_LOGS)) {
        drillbitSpec.env.put("DRILL_YARN_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    }

    // Debug option.

    if (config.getBoolean(DrillOnYarnConfig.DRILLBIT_DEBUG_LAUNCH)) {
        drillbitSpec.env.put(DrillOnYarnConfig.DRILL_DEBUG_ENV_VAR, "1");
    }

    // Hadoop home should be set in drill-env.sh since it is needed
    // for client launch as well as the AM.

    // addIfSet( drillbitSpec, DrillOnYarnConfig.HADOOP_HOME, "HADOOP_HOME" );

    // Garbage collection (gc) logging. In drillbit.sh logging can be
    // configured to go anywhere. In YARN, all logs go to the YARN log
    // directory; the gc log file is always called "gc.log".

    if (config.getBoolean(DrillOnYarnConfig.DRILLBIT_LOG_GC)) {
        drillbitSpec.env.put("ENABLE_GC_LOG", "1");
    }

    // Class path additions.

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_PREFIX_CLASSPATH,
            DrillOnYarnConfig.DRILL_CLASSPATH_PREFIX_ENV_VAR);
    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_CLASSPATH, DrillOnYarnConfig.DRILL_CLASSPATH_ENV_VAR);

    // Drill-config.sh has specific entries for Hadoop and Hbase. To prevent
    // an endless number of such one-off cases, we add a general extension
    // class path. But, we retain Hadoop and Hbase for backward compatibility.

    addIfSet(drillbitSpec, DrillOnYarnConfig.DRILLBIT_EXTN_CLASSPATH, "EXTN_CLASSPATH");
    addIfSet(drillbitSpec, DrillOnYarnConfig.HADOOP_CLASSPATH, "DRILL_HADOOP_CLASSPATH");
    addIfSet(drillbitSpec, DrillOnYarnConfig.HBASE_CLASSPATH, "DRILL_HBASE_CLASSPATH");

    // Note that there is no equivalent of niceness for YARN: YARN controls
    // the niceness of its child processes.

    // Drillbit launch script under YARN
    // Here we can use DRILL_HOME because all env vars are set before
    // issuing this command.

    drillbitSpec.command = "$DRILL_HOME/bin/yarn-drillbit.sh";

    // Configuration (site directory), if given.

    String siteDirPath = doyConfig.getRemoteSiteDir();
    if (siteDirPath != null) {
        drillbitSpec.cmdArgs.add("--site");
        drillbitSpec.cmdArgs.add(siteDirPath);
    }

    // Localized resources

    if (resources != null) {
        drillbitSpec.resources.putAll(resources);
    }

    // Container definition.

    TaskSpec taskSpec = new TaskSpec();
    taskSpec.name = "Drillbit";
    taskSpec.containerSpec = containerSpec;
    taskSpec.launchSpec = drillbitSpec;
    taskSpec.maxRetries = config.getInt(DrillOnYarnConfig.DRILLBIT_MAX_RETRIES);
    return taskSpec;
}