Example usage for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR

List of usage examples for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR.

Prototype

String LOG_DIR_EXPANSION_VAR

To view the source code for org.apache.hadoop.yarn.api ApplicationConstants LOG_DIR_EXPANSION_VAR.

Click Source Link

Document

The temporary environmental variable for container log directory.

Usage

From source file:com.yahoo.storm.yarn.StormOnYarn.java

License:Open Source License

private void launchApp(String appName, String queue, int amMB, String storm_zip_location) throws Exception {
    LOG.debug("StormOnYarn:launchApp() ...");
    YarnClientApplication client_app = _yarn.createApplication();
    GetNewApplicationResponse app = client_app.getNewApplicationResponse();
    _appId = app.getApplicationId();/*from www.j  av a 2 s.c  o m*/
    LOG.debug("_appId:" + _appId);

    if (amMB > app.getMaximumResourceCapability().getMemory()) {
        //TODO need some sanity checks
        amMB = app.getMaximumResourceCapability().getMemory();
    }
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
    appContext.setApplicationId(app.getApplicationId());
    appContext.setApplicationName(appName);
    appContext.setQueue(queue);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the
    // local resources
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    String appMasterJar = findContainingJar(MasterServer.class);
    FileSystem fs = FileSystem.get(_hadoopConf);
    Path src = new Path(appMasterJar);
    String appHome = Util.getApplicationHomeForId(_appId.toString());
    Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar");
    fs.copyFromLocalFile(false, true, src, dst);
    localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst));

    String stormVersion = Util.getStormVersion();
    Path zip;
    if (storm_zip_location != null) {
        zip = new Path(storm_zip_location);
    } else {
        zip = new Path("/lib/storm/" + stormVersion + "/storm.zip");
    }
    _stormConf.put("storm.zip.path", zip.makeQualified(fs).toUri().getPath());
    LocalResourceVisibility visibility = LocalResourceVisibility.PUBLIC;
    _stormConf.put("storm.zip.visibility", "PUBLIC");
    if (!Util.isPublic(fs, zip)) {
        visibility = LocalResourceVisibility.APPLICATION;
        _stormConf.put("storm.zip.visibility", "APPLICATION");
    }
    localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, visibility));

    Path confDst = Util.createConfigurationFileInFs(fs, appHome, _stormConf, _hadoopConf);
    // establish a symbolic link to conf directory
    localResources.put("conf", Util.newYarnAppResource(fs, confDst));

    // Setup security tokens
    Path[] paths = new Path[3];
    paths[0] = dst;
    paths[1] = zip;
    paths[2] = confDst;
    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf);
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    //security tokens for HDFS distributed cache
    amContainer.setTokens(securityTokens);

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the env variables to be setup in the env where the application master
    // will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    // add the runtime classpath needed for tests to work
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./conf");
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar");

    //Make sure that AppMaster has access to all YARN JARs
    List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath");
    ProcessBuilder pb = new ProcessBuilder(yarn_classpath_cmd).redirectError(Redirect.INHERIT);
    LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]");
    pb.environment().putAll(System.getenv());
    Process proc = pb.start();
    BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream(), "UTF-8"));
    String line = "";
    String yarn_class_path = (String) _stormConf.get("storm.yarn.yarn_classpath");
    if (yarn_class_path == null) {
        StringBuilder yarn_class_path_builder = new StringBuilder();
        while ((line = reader.readLine()) != null) {
            yarn_class_path_builder.append(line);
        }
        yarn_class_path = yarn_class_path_builder.toString();
    }
    LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]");
    proc.waitFor();
    reader.close();
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path);

    String stormHomeInZip = Util.getStormHomeInZip(fs, zip, stormVersion);
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/*");
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/lib/*");

    String java_home = (String) _stormConf.get("storm.yarn.java_home");
    if (java_home == null)
        java_home = System.getenv("JAVA_HOME");

    if (java_home != null && !java_home.isEmpty())
        env.put("JAVA_HOME", java_home);
    LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]");

    env.put("appJar", appMasterJar);
    env.put("appName", appName);
    env.put("appId", new Integer(_appId.getId()).toString());
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<String> vargs = new Vector<String>();
    if (java_home != null && !java_home.isEmpty())
        vargs.add(env.get("JAVA_HOME") + "/bin/java");
    else
        vargs.add("java");
    vargs.add("-Dstorm.home=./storm/" + stormHomeInZip + "/");
    vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log");
    //vargs.add("-verbose:class");
    vargs.add("com.yahoo.storm.yarn.MasterServer");
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
    // Set java executable command
    LOG.info("Setting up app master command:" + vargs);

    amContainer.setCommands(vargs);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMB);
    appContext.setResource(capability);
    appContext.setAMContainerSpec(amContainer);

    _yarn.submitApplication(appContext);
}

From source file:com.yahoo.storm.yarn.Util.java

License:Open Source License

@SuppressWarnings("rawtypes")
static List<String> buildSupervisorCommands(Map conf) throws IOException {
    List<String> toRet = buildCommandPrefix(conf, backtype.storm.Config.NIMBUS_CHILDOPTS);

    toRet.add("-Dworker.logdir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    toRet.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/supervisor.log");
    toRet.add("backtype.storm.daemon.supervisor");

    return toRet;
}

From source file:com.yss.yarn.launch.APPLaunch.java

License:Open Source License

private void launchApp(String appName, String queue, int amMB, String appMasterJar, String classPath,
        String lanchMainClass, Map<String, String> runenv) throws Exception {

    YarnClientApplication client_app = _yarn.createApplication();
    GetNewApplicationResponse app = client_app.getNewApplicationResponse();

    _appId = app.getApplicationId();//from w ww.  j a  v a2 s  .  c  o  m
    LOG.debug("_appId:" + _appId);

    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    appContext.setApplicationId(app.getApplicationId());
    appContext.setApplicationName(appName);
    appContext.setQueue(queue);

    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    LOG.info("load  " + appMasterJar);

    FileSystem fs = FileSystem.get(_hadoopConf);
    Path src = new Path(appMasterJar);
    String appHome = YarnUtil.getApplicationHomeForId(_appId.toString());
    Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar");

    fs.copyFromLocalFile(false, true, src, dst);
    localResources.put("AppMaster.jar", YarnUtil.newYarnAppResource(fs, dst));

    amContainer.setLocalResources(localResources);

    LOG.info("Set the environment for the application master");

    Map<String, String> env = new HashMap<String, String>();

    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar");

    List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath");

    LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]");

    String yarn_class_path = classPath;

    LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]");
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path);

    String java_home = System.getenv("JAVA_HOME");

    if ((java_home != null) && !java_home.isEmpty()) {
        env.put("JAVA_HOME", java_home);
    }

    LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]");
    env.put("appJar", appMasterJar);
    env.put("appName", appName);
    env.put("appId", _appId.toString());
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    env.putAll(runenv);
    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<String> vargs = new Vector<String>();

    if ((java_home != null) && !java_home.isEmpty()) {
        vargs.add(env.get("JAVA_HOME") + "/bin/java");
    } else {
        vargs.add("java");
    }

    vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log");

    vargs.add(lanchMainClass);
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");

    // Set java executable command
    LOG.info("Setting up app master command:" + vargs);
    amContainer.setCommands(vargs);

    Resource capability = Records.newRecord(Resource.class);

    appContext.setResource(capability);
    appContext.setAMContainerSpec(amContainer);
    appContext.setApplicationName(appName);
    _yarn.submitApplication(appContext);
}

From source file:com.zqh.hadoop.moya.core.yarn.Client.java

License:Apache License

/**
 * Main run function for the client//from  w  w w  .  ja v a  2s  .  com
 *
 * @return true if application completed successfully
 * @throws java.io.IOException
 * @throws org.apache.hadoop.yarn.exceptions.YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports();
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask
    // if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource
    // manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    Path src = new Path(appMasterJar);
    String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar";
    Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
    fs.copyFromLocalFile(false, true, src, dst);
    FileStatus destStatus = fs.getFileStatus(dst);
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);

    // Set the type of resource - file or archive
    // archives are untarred at destination
    // we don't need the jar file to be untarred
    amJarRsrc.setType(LocalResourceType.FILE);
    // Set visibility of the resource
    // Setting to most private option
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    // Set the resource to be copied over
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    // Set timestamp and length of file so that the framework
    // can do basic sanity checks for the local resource
    // after it has been copied over to ensure it is the same
    // resource the client intended to use with the application
    amJarRsrc.setTimestamp(destStatus.getModificationTime());
    amJarRsrc.setSize(destStatus.getLen());
    localResources.put("AppMaster.jar", amJarRsrc);

    // Setup App Master Constants
    String amJarLocation = "";
    long amJarLen = 0;
    long amJarTimestamp = 0;

    // adding info so we can add the jar to the App master container path
    amJarLocation = dst.toUri().toString();
    FileStatus shellFileStatus = fs.getFileStatus(dst);
    amJarLen = shellFileStatus.getLen();
    amJarTimestamp = shellFileStatus.getModificationTime();

    // ADD libs needed that will be untared
    // Keep it all archived for now so add it as a file...
    src = new Path(localLibJar);
    pathSuffix = appName + "/" + appId.getId() + "/Runnable.jar";
    dst = new Path(fs.getHomeDirectory(), pathSuffix);
    fs.copyFromLocalFile(false, true, src, dst);
    destStatus = fs.getFileStatus(dst);
    LocalResource libsJarRsrc = Records.newRecord(LocalResource.class);
    libsJarRsrc.setType(LocalResourceType.FILE);
    libsJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    libsJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    libsJarRsrc.setTimestamp(destStatus.getModificationTime());
    localResources.put("Runnable.jar", libsJarRsrc);

    // Setup Libs Constants
    String libsLocation = "";
    long libsLen = 0;
    long libsTimestamp = 0;

    // adding info so we can add the jar to the App master container path
    libsLocation = dst.toUri().toString();
    FileStatus libsFileStatus = fs.getFileStatus(dst);
    libsLen = libsFileStatus.getLen();
    libsTimestamp = libsFileStatus.getModificationTime();

    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put the AM jar into env and MOYA Runnable
    // using the env info, the application master will create the correct
    // local resource for the
    // eventual containers that will be launched to execute the shell
    // scripts
    env.put(MConstants.APPLICATIONMASTERJARLOCATION, amJarLocation);
    env.put(MConstants.APPLICATIONMASTERJARTIMESTAMP, Long.toString(amJarTimestamp));
    env.put(MConstants.APPLICATIONMASTERJARLEN, Long.toString(amJarLen));

    env.put(MConstants.LIBSLOCATION, libsLocation);
    env.put(MConstants.LIBSTIMESTAMP, Long.toString(libsTimestamp));
    env.put(MConstants.LIBSLEN, Long.toString(libsLen));

    env.put(MConstants.ZOOKEEPERHOSTS, ZKHosts);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(moyaPriority));
    if (!localLibJar.isEmpty()) {
        vargs.add("--lib " + localLibJar + "");
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // The following are not required for launching an application master
    // amContainer.setContainerId(containerId);

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:de.huberlin.wbi.hiway.common.Client.java

License:Apache License

/**
 * Main run function for the client.//from   ww w .j  a v  a  2  s. c  o m
 * 
 * @return true if application completed successfully.
 */
private boolean run() throws IOException, YarnException {

    /* log */ System.out.println("Running Client");

    yarnClient.start();
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();

    /* log */ System.out.println(
            "Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    /* log */ System.out.println("Got Cluster node info from ASM");
    /* log */ for (NodeReport node : clusterNodeReports)
        System.out.println("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    /* log */ System.out.println("Queue info" + ", queueName=" + queueInfo.getQueueName()
            + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity="
            + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size()
            + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    /* log */ for (QueueUserACLInfo aclInfo : listAclInfo)
        for (QueueACL userAcl : aclInfo.getUserAcls())
            System.out.println("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName()
                    + ", userAcl=" + userAcl.name());

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();

    // Get min/max resource capabilities from RM and change memory ask if needed
    int maxVC = appResponse.getMaximumResourceCapability().getVirtualCores();
    /* log */ System.out.println("Max vCores capabililty of resources in this cluster " + maxVC);
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    /* log */ System.out.println("Max mem capabililty of resources in this cluster " + maxMem);
    // A resource ask cannot exceed the max.
    if (amVCores > maxVC) {
        /* log */ System.out.println("AM vCores specified above max threshold of cluster. Using max value."
                + ", specified=" + amVCores + ", max=" + maxVC);
        amVCores = maxVC;
    }
    if (amMemory > maxMem) {
        /* log */ System.out.println("AM memory specified above max threshold of cluster. Using max value."
                + ", specified=" + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationType(conf.get(HiWayConfiguration.HIWAY_AM_APPLICATION_TYPE,
            HiWayConfiguration.HIWAY_AM_APPLICATION_TYPE_DEFAULT));
    appContext.setApplicationName("run " + workflowParam + " (type: " + workflowType.toString() + ")");
    ApplicationId appId = appContext.getApplicationId();
    String hdfsBaseDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE_DEFAULT);
    String hdfsSandboxDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE_DEFAULT);
    Path hdfsBaseDirectory = new Path(new Path(hdfs.getUri()), hdfsBaseDirectoryName);
    Data.setHdfsBaseDirectory(hdfsBaseDirectory);
    Path hdfsSandboxDirectory = new Path(hdfsBaseDirectory, hdfsSandboxDirectoryName);
    Path hdfsApplicationDirectory = new Path(hdfsSandboxDirectory, appId.toString());
    Data.setHdfsApplicationDirectory(hdfsApplicationDirectory);
    Data.setHdfs(hdfs);

    Path wfSource, wfDest, wfTemp = null;
    try {
        wfSource = new Path(new URI(workflowParam).getPath());
    } catch (URISyntaxException e) {
        wfSource = new Path(workflowParam);
    }
    wfDest = new Path(hdfsApplicationDirectory + "/" + wfSource.getName());

    // (1) if workflow file in hdfs, then transfer to temp file in local fs
    if (hdfs.exists(wfSource)) {
        wfTemp = new Path("./." + wfSource.getName());
        System.out.println("Workflow found in HDFS at location " + wfSource);
        hdfs.copyToLocalFile(false, wfSource, wfTemp);
    }

    // (2) if galaxy workflow, then copy and replace input ports
    if (workflowType.equals(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_OPTS.galaxy)) {
        wfTemp = preProcessGalaxyWorkflow(wfSource, wfTemp);
    }

    if (wfTemp != null) {
        hdfs.copyFromLocalFile(wfTemp, wfDest);
        new File(wfTemp.toString()).delete();
    } else {
        hdfs.copyFromLocalFile(wfSource, wfDest);
    }

    if (summaryPath != null)
        summary = new Data(summaryPath);
    if (customMemPath != null)
        (new Data(customMemPath)).stageOut();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    /* set the env variables to be setup in the env where the application master will be run */
    System.out.println("Set the environment for the application master");
    Map<String, String> env = new HashMap<>();

    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(':');
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }

    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<>(30);

    // Set java executable command
    System.out.println("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    if (HiWayConfiguration.debug)
        vargs.add(
                "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    vargs.add("-Xss" + "16m");
    // Set class name

    switch (workflowType) {
    case dax:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_DAX_AM_CLASS);
        break;
    case log:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_LOG_AM_CLASS);
        break;
    case galaxy:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_GALAXY_AM_CLASS);
        break;
    case cuneiformE:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_CUNEIFORME_AM_CLASS);
        break;
    default:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_CUNEIFORMJ_AM_CLASS);
    }

    vargs.add("--scheduler " + schedulerName.toString());
    if (memory != null)
        vargs.add("--memory " + memory);
    if (summary != null)
        vargs.add("--summary " + summary.getName());
    if (customMemPath != null)
        vargs.add("--custom " + customMemPath);
    vargs.add("--appid " + appId.toString());
    if (HiWayConfiguration.debug)
        vargs.add("--debug");
    if (HiWayConfiguration.verbose)
        vargs.add("--verbose");
    vargs.add(workflowParam);
    vargs.add("> >(tee AppMaster.stdout " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout)");
    vargs.add("2> >(tee AppMaster.stderr " + ApplicationConstants.LOG_DIR_EXPANSION_VAR
            + "/AppMaster.stderr >&2)");

    // Get final command
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    System.out.println("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setVirtualCores(amVCores);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = hdfs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                System.out.println("Got dt for " + hdfs.getUri() + "; " + token);
            }
        }
        try (DataOutputBuffer dob = new DataOutputBuffer()) {
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            amContainer.setTokens(fsTokens);
        }
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    /* log */ System.out.println("Submitting application to ASM");
    yarnClient.submitApplication(appContext);

    // Monitor the application
    boolean success = monitorApplication(appId);

    if (success && summary != null) {
        summary.stageIn();
    }

    return success;

}

From source file:edu.uci.ics.asterix.aoya.AsterixYARNClient.java

License:Apache License

/**
 * Submits the request to start the AsterixApplicationMaster to the YARN ResourceManager.
 * //from   ww w.  j a  va2  s. c o m
 * @param app
 *            The application attempt handle.
 * @param resources
 *            Resources to be distributed as part of the container launch
 * @param mode
 *            The mode of the ApplicationMaster
 * @return The application ID of the new Asterix instance.
 * @throws IOException
 * @throws YarnException
 */

public ApplicationId deployAM(YarnClientApplication app, List<DFSResourceCoordinate> resources, Mode mode)
        throws IOException, YarnException {

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();

    // Set local resource info into app master container launch context
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    for (DFSResourceCoordinate res : resources) {
        localResources.put(res.name, res.res);
    }
    amContainer.setLocalResources(localResources);
    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // using the env info, the application master will create the correct
    // local resource for the
    // eventual containers that will be launched to execute the shell
    // scripts
    for (DFSResourceCoordinate res : resources) {
        if (res.envs == null) { //some entries may not have environment variables.
            continue;
        }
        for (Map.Entry<String, String> e : res.envs.entrySet()) {
            env.put(e.getValue(), e.getKey());
        }
    }
    //this is needed for when the RM address isn't known from the environment of the AM
    env.put(AConstants.RMADDRESS, conf.get("yarn.resourcemanager.address"));
    env.put(AConstants.RMSCHEDULERADDRESS, conf.get("yarn.resourcemanager.scheduler.address"));
    ///add miscellaneous environment variables.
    env.put(AConstants.INSTANCESTORE, CONF_DIR_REL + instanceFolder);
    env.put(AConstants.DFS_BASE, FileSystem.get(conf).getHomeDirectory().toUri().toString());
    env.put(AConstants.CC_JAVA_OPTS, ccJavaOpts);
    env.put(AConstants.NC_JAVA_OPTS, ncJavaOpts);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder("").append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("." + File.separator + "log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        LOG.info("In YARN MiniCluster");
        classPathEnv.append(System.getProperty("path.separator"));
        classPathEnv.append(System.getProperty("java.class.path"));
        env.put("HADOOP_CONF_DIR", System.getProperty("user.dir") + File.separator + "target" + File.separator);
    }
    LOG.info("AM Classpath:" + classPathEnv.toString());
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(JAVA_HOME + File.separator + "bin" + File.separator + "java");
    // Set class name
    vargs.add(appMasterMainClass);
    //Set params for Application Master
    if (debugFlag) {
        vargs.add("-debug");
    }
    if (mode == Mode.DESTROY) {
        vargs.add("-obliterate");
    } else if (mode == Mode.BACKUP) {
        vargs.add("-backup");
    } else if (mode == Mode.RESTORE) {
        vargs.add("-restore " + snapName);
    } else if (mode == Mode.INSTALL) {
        vargs.add("-initial ");
    }
    if (refresh) {
        vargs.add("-refresh");
    }
    //vargs.add("/bin/ls -alh asterix-server.zip/repo");
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + "AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + "AppMaster.stderr");
    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // The following are not required for launching an application master
    // amContainer.setContainerId(containerId);

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    //now write the instance lock
    if (mode == Mode.INSTALL || mode == Mode.START) {
        FileSystem fs = FileSystem.get(conf);
        Path lock = new Path(fs.getHomeDirectory(), CONF_DIR_REL + instanceFolder + instanceLock);
        if (fs.exists(lock)) {
            throw new IllegalStateException("Somehow, this instance has been launched twice. ");
        }
        BufferedWriter out = new BufferedWriter(new OutputStreamWriter(fs.create(lock, true)));
        try {
            out.write(app.getApplicationSubmissionContext().getApplicationId().toString());
            out.close();
        } finally {
            out.close();
        }
    }
    return app.getApplicationSubmissionContext().getApplicationId();

}

From source file:edu.uci.ics.hyracks.yarn.client.LaunchHyracksApplication.java

License:Apache License

private void run() throws Exception {
    Configuration conf = new Configuration();
    YarnConfiguration yconf = new YarnConfiguration(conf);
    YarnClientRMConnection crmc = new YarnClientRMConnection(yconf);

    YarnApplication app = crmc.createApplication(options.appName);

    ContainerLaunchContext clCtx = app.getContainerLaunchContext();

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    File amZipFile = new File(System.getProperty("basedir") + "/hyracks-yarn-am/hyracks-yarn-am.zip");
    localResources.put("archive", LocalResourceHelper.createArchiveResource(conf, amZipFile));
    clCtx.setLocalResources(localResources);

    String command = "./archive/bin/hyracks-yarn-am 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
            + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";

    List<String> commands = new ArrayList<String>();
    commands.add(command);//from   w  w w.  j  av  a 2  s  . co  m
    clCtx.setCommands(commands);

    clCtx.setResource(ResourceHelper.createMemoryCapability(options.amMemory));

    app.submit();
}

From source file:eu.stratosphere.yarn.ApplicationMaster.java

License:Apache License

private void run() throws Exception {
    //Utils.logFilesInCurrentDirectory(LOG);
    // Initialize clients to ResourceManager and NodeManagers
    Configuration conf = Utils.initializeYarnConfiguration();
    FileSystem fs = FileSystem.get(conf);
    Map<String, String> envs = System.getenv();
    final String currDir = envs.get(Environment.PWD.key());
    final String logDirs = envs.get(Environment.LOG_DIRS.key());
    final String ownHostname = envs.get(Environment.NM_HOST.key());
    final String appId = envs.get(Client.ENV_APP_ID);
    final String clientHomeDir = envs.get(Client.ENV_CLIENT_HOME_DIR);
    final String applicationMasterHost = envs.get(Environment.NM_HOST.key());
    final String remoteStratosphereJarPath = envs.get(Client.STRATOSPHERE_JAR_PATH);
    final String shipListString = envs.get(Client.ENV_CLIENT_SHIP_FILES);
    final String yarnClientUsername = envs.get(Client.ENV_CLIENT_USERNAME);
    final int taskManagerCount = Integer.valueOf(envs.get(Client.ENV_TM_COUNT));
    final int memoryPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_MEMORY));
    final int coresPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_CORES));

    int heapLimit = Utils.calculateHeapSize(memoryPerTaskManager);

    if (currDir == null) {
        throw new RuntimeException("Current directory unknown");
    }//from w w  w.j  a  v  a 2s  .c  o  m
    if (ownHostname == null) {
        throw new RuntimeException("Own hostname (" + Environment.NM_HOST + ") not set.");
    }
    LOG.info("Working directory " + currDir);

    // load Stratosphere configuration.
    Utils.getStratosphereConfiguration(currDir);

    final String localWebInterfaceDir = currDir + "/resources/"
            + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME;

    // Update yaml conf -> set jobManager address to this machine's address.
    FileInputStream fis = new FileInputStream(currDir + "/stratosphere-conf.yaml");
    BufferedReader br = new BufferedReader(new InputStreamReader(fis));
    Writer output = new BufferedWriter(new FileWriter(currDir + "/stratosphere-conf-modified.yaml"));
    String line;
    while ((line = br.readLine()) != null) {
        if (line.contains(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
        } else if (line.contains(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + "\n");
        } else {
            output.append(line + "\n");
        }
    }
    // just to make sure.
    output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + localWebInterfaceDir + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY + ": " + logDirs + "\n");
    output.close();
    br.close();
    File newConf = new File(currDir + "/stratosphere-conf-modified.yaml");
    if (!newConf.exists()) {
        LOG.warn("modified yaml does not exist!");
    }

    Utils.copyJarContents("resources/" + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME,
            ApplicationMaster.class.getProtectionDomain().getCodeSource().getLocation().getPath());

    JobManager jm;
    {
        String pathToNepheleConfig = currDir + "/stratosphere-conf-modified.yaml";
        String[] args = { "-executionMode", "cluster", "-configDir", pathToNepheleConfig };

        // start the job manager
        jm = JobManager.initialize(args);

        // Start info server for jobmanager
        jm.startInfoServer();
    }

    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();

    NMClient nmClient = NMClient.createNMClient();
    nmClient.init(conf);
    nmClient.start();

    // Register with ResourceManager
    LOG.info("registering ApplicationMaster");
    rmClient.registerApplicationMaster(applicationMasterHost, 0, "http://" + applicationMasterHost + ":"
            + GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, "undefined"));

    // Priority for worker containers - priorities are intra-application
    Priority priority = Records.newRecord(Priority.class);
    priority.setPriority(0);

    // Resource requirements for worker containers
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memoryPerTaskManager);
    capability.setVirtualCores(coresPerTaskManager);

    // Make container requests to ResourceManager
    for (int i = 0; i < taskManagerCount; ++i) {
        ContainerRequest containerAsk = new ContainerRequest(capability, null, null, priority);
        LOG.info("Requesting TaskManager container " + i);
        rmClient.addContainerRequest(containerAsk);
    }

    LocalResource stratosphereJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);

    // register Stratosphere Jar with remote HDFS
    final Path remoteJarPath = new Path(remoteStratosphereJarPath);
    Utils.registerLocalResource(fs, remoteJarPath, stratosphereJar);

    // register conf with local fs.
    Path remoteConfPath = Utils.setupLocalResource(conf, fs, appId,
            new Path("file://" + currDir + "/stratosphere-conf-modified.yaml"), stratosphereConf,
            new Path(clientHomeDir));
    LOG.info("Prepared localresource for modified yaml: " + stratosphereConf);

    boolean hasLog4j = new File(currDir + "/log4j.properties").exists();
    // prepare the files to ship
    LocalResource[] remoteShipRsc = null;
    String[] remoteShipPaths = shipListString.split(",");
    if (!shipListString.isEmpty()) {
        remoteShipRsc = new LocalResource[remoteShipPaths.length];
        { // scope for i
            int i = 0;
            for (String remoteShipPathStr : remoteShipPaths) {
                if (remoteShipPathStr == null || remoteShipPathStr.isEmpty()) {
                    continue;
                }
                remoteShipRsc[i] = Records.newRecord(LocalResource.class);
                Path remoteShipPath = new Path(remoteShipPathStr);
                Utils.registerLocalResource(fs, remoteShipPath, remoteShipRsc[i]);
                i++;
            }
        }
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Obtain allocated containers and launch
    int allocatedContainers = 0;
    int completedContainers = 0;
    while (allocatedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(0);
        for (Container container : response.getAllocatedContainers()) {
            LOG.info("Got new Container for TM " + container.getId() + " on host "
                    + container.getNodeId().getHost());
            ++allocatedContainers;

            // Launch container by create ContainerLaunchContext
            ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

            String tmCommand = "$JAVA_HOME/bin/java -Xmx" + heapLimit + "m " + javaOpts;
            if (hasLog4j) {
                tmCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                        + "/taskmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
            }
            tmCommand += " eu.stratosphere.yarn.YarnTaskManagerRunner -configDir . " + " 1>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stdout.log" + " 2>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stderr.log";
            ctx.setCommands(Collections.singletonList(tmCommand));

            LOG.info("Starting TM with command=" + tmCommand);

            // copy resources to the TaskManagers.
            Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
            localResources.put("stratosphere.jar", stratosphereJar);
            localResources.put("stratosphere-conf.yaml", stratosphereConf);

            // add ship resources
            if (!shipListString.isEmpty()) {
                Preconditions.checkNotNull(remoteShipRsc);
                for (int i = 0; i < remoteShipPaths.length; i++) {
                    localResources.put(new Path(remoteShipPaths[i]).getName(), remoteShipRsc[i]);
                }
            }

            ctx.setLocalResources(localResources);

            // Setup CLASSPATH for Container (=TaskTracker)
            Map<String, String> containerEnv = new HashMap<String, String>();
            Utils.setupEnv(conf, containerEnv); //add stratosphere.jar to class path.
            containerEnv.put(Client.ENV_CLIENT_USERNAME, yarnClientUsername);

            ctx.setEnvironment(containerEnv);

            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            try {
                Credentials credentials = user.getCredentials();
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.writeTokenStorageToStream(dob);
                ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
                ctx.setTokens(securityTokens);
            } catch (IOException e) {
                LOG.warn("Getting current user info failed when trying to launch the container"
                        + e.getMessage());
            }

            LOG.info("Launching container " + allocatedContainers);
            nmClient.startContainer(container, ctx);
        }
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container (while allocating) " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(100);
    }

    // Now wait for containers to complete

    while (completedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(completedContainers / taskManagerCount);
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(5000);
    }
    LOG.info("Shutting down JobManager");
    jm.shutdown();

    // Un-register with ResourceManager
    rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "", "");

}

From source file:eu.stratosphere.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Stratosphere YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }/* w  ww. j av  a 2  s .  c  o  m*/
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(STRATOSPHERE_CONF_DIR);
    options.addOption(STRATOSPHERE_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(STRATOSPHERE_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(STRATOSPHERE_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(STRATOSPHERE_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(STRATOSPHERE_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getStratosphereConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Stratosphere YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " eu.stratosphere.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, stratosphereConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("stratosphere.jar", appMasterJar);
    localResources.put("stratosphere-conf.yaml", stratosphereConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".stratosphere/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.STRATOSPHERE_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Stratosphere"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Stratosphere-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err
                    .println("Stratosphere JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private String buildApplicationMasterCommand(int memoryMbs) {
    String appMasterClassName = GobblinApplicationMaster.class.getSimpleName();
    return new StringBuilder().append(ApplicationConstants.Environment.JAVA_HOME.$()).append("/bin/java")
            .append(" -Xmx").append(memoryMbs).append("M").append(" ")
            .append(JvmUtils.formatJvmArguments(this.appMasterJvmArgs)).append(" ")
            .append(GobblinApplicationMaster.class.getName()).append(" --")
            .append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME).append(" ")
            .append(this.applicationName).append(" 1>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR)
            .append(File.separator).append(appMasterClassName).append(".").append(ApplicationConstants.STDOUT)
            .append(" 2>").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append(File.separator)
            .append(appMasterClassName).append(".").append(ApplicationConstants.STDERR).toString();
}