Example usage for org.apache.hadoop.security Credentials writeTokenStorageToStream

List of usage examples for org.apache.hadoop.security Credentials writeTokenStorageToStream

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials writeTokenStorageToStream.

Prototype

public void writeTokenStorageToStream(DataOutputStream os) throws IOException 

Source Link

Usage

From source file:com.sogou.dockeronyarn.service.DockerApplicationMaster_24.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*from  w  ww .  java 2  s  .  com*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
    numRetryCount.set(0);
}

From source file:com.srini.hadoopYarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*from   w  w w  .j a va2 s  .  c o m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainers; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}

From source file:com.srini.hadoopYarn.Client.java

License:Apache License

/**
 * Main run function for the client// ww w.j a v  a 2 s .  c  om
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request 
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max. 
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max. 
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources         
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    FileSystem fs = FileSystem.get(conf);
    Path src = new Path(appMasterJar);
    String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar";
    Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
    fs.copyFromLocalFile(false, true, src, dst);
    FileStatus destStatus = fs.getFileStatus(dst);
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);

    // Set the type of resource - file or archive
    // archives are untarred at destination
    // we don't need the jar file to be untarred for now
    amJarRsrc.setType(LocalResourceType.FILE);
    // Set visibility of the resource 
    // Setting to most private option
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    // Set the resource to be copied over
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    // Set timestamp and length of file so that the framework 
    // can do basic sanity checks for the local resource 
    // after it has been copied over to ensure it is the same 
    // resource the client intended to use with the application
    amJarRsrc.setTimestamp(destStatus.getModificationTime());
    amJarRsrc.setSize(destStatus.getLen());
    localResources.put("AppMaster.jar", amJarRsrc);

    // Set the log4j properties if needed 
    if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed. 
    // To do this, we need to first copy into the filesystem that is visible 
    // to the yarn framework. 
    // We do not need to set this as a local resource for the application 
    // master as the application master does not need it.       
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh";
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the 
    // eventual containers that will be launched to execute the shell scripts
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));

    // Add AppMaster.jar location to classpath       
    // At some point we should not be required to add 
    // the hadoop specific classpaths to the env. 
    // It should be provided out of the box. 
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master 
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command 
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name 
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(shellCmdPriority));
    if (!shellCommand.isEmpty()) {
        vargs.add("--shell_command " + shellCommand + "");
    }
    if (!shellArgs.isEmpty()) {
        vargs.add("--shell_args " + shellArgs + "");
    }
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide? 
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:com.tito.easyyarn.appmaster.ApplicationMaster.java

License:Apache License

private void extractTokens() {
    // Credentials, Token, UserGroupInformation, DataOutputBuffer
    Credentials credentials;
    try {//  w ww  .  ja v  a 2s. c  o  m
        credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        // Now remove the AM->RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        LOG.info("Executing with tokens:");
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            LOG.info(token);
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    } catch (IOException e) {
        LOG.error("extractTokens error={}", e);

    }

}

From source file:com.toy.Client.java

License:Apache License

/**
 * Start a new Application Master and deploy the web application on 2 Tomcat containers
 *
 * @throws Exception//w w w .java  2 s .  c  om
 */
void start() throws Exception {

    //Check tomcat dir
    final File tomcatHomeDir = new File(toyConfig.tomcat);
    final File tomcatLibraries = new File(tomcatHomeDir, "lib");
    final File tomcatBinaries = new File(tomcatHomeDir, "bin");
    Preconditions.checkState(tomcatLibraries.isDirectory(),
            tomcatLibraries.getAbsolutePath() + " does not exist");

    //Check war file
    final File warFile = new File(toyConfig.war);
    Preconditions.checkState(warFile.isFile(), warFile.getAbsolutePath() + " does not exist");

    yarn = YarnClient.createYarnClient();
    yarn.init(configuration);
    yarn.start();

    YarnClientApplication yarnApplication = yarn.createApplication();
    GetNewApplicationResponse newApplication = yarnApplication.getNewApplicationResponse();
    appId = newApplication.getApplicationId();
    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
    appContext.setApplicationName("Tomcat : " + tomcatHomeDir.getName() + "\n War : " + warFile.getName());
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Register required libraries
    Map<String, LocalResource> localResources = new HashMap<>();
    FileSystem fs = FileSystem.get(configuration);
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-client-2.3.0.jar");
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-framework-2.3.0.jar");
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-recipes-2.3.0.jar");

    // Register application master jar
    registerLocalResource(localResources, appId, fs, new Path(appMasterJar));

    // Register the WAR that will be deployed on Tomcat
    registerLocalResource(localResources, appId, fs, new Path(warFile.getAbsolutePath()));

    // Register Tomcat libraries
    for (File lib : tomcatLibraries.listFiles()) {
        registerLocalResource(localResources, appId, fs, new Path(lib.getAbsolutePath()));
    }

    File juli = new File(tomcatBinaries, "tomcat-juli.jar");
    if (juli.exists()) {
        registerLocalResource(localResources, appId, fs, new Path(juli.getAbsolutePath()));
    }

    amContainer.setLocalResources(localResources);

    // Setup master environment
    Map<String, String> env = new HashMap<>();
    final String TOMCAT_LIBS = fs.getHomeDirectory() + "/" + Constants.TOY_PREFIX + appId.toString();
    env.put(Constants.TOMCAT_LIBS, TOMCAT_LIBS);

    if (toyConfig.zookeeper != null) {
        env.put(Constants.ZOOKEEPER_QUORUM, toyConfig.zookeeper);
    } else {
        env.put(Constants.ZOOKEEPER_QUORUM, NetUtils.getHostname());
    }

    // 1. Compute classpath
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
            .append(File.pathSeparatorChar).append("./*");
    for (String c : configuration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (configuration.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    env.put("CLASSPATH", classPathEnv.toString());
    env.put(Constants.WAR, warFile.getName());
    // For unit test with YarnMiniCluster
    env.put(YarnConfiguration.RM_SCHEDULER_ADDRESS, configuration.get(YarnConfiguration.RM_SCHEDULER_ADDRESS));
    amContainer.setEnvironment(env);

    // 1.2 Set constraint for the app master
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(32);
    appContext.setResource(capability);

    // 2. Compute app master cmd line
    Vector<CharSequence> vargs = new Vector<>(10);
    // Set java executable command
    vargs.add(ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx32m");
    // Set class name
    vargs.add(TOYMaster.class.getCanonicalName());
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());
    amContainer.setCommands(commands);
    appContext.setAMContainerSpec(amContainer);

    // 3. Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = configuration.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new Exception("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final org.apache.hadoop.security.token.Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer,
                credentials);
        if (tokens != null) {
            for (org.apache.hadoop.security.token.Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setQueue("default");
    LOG.info("Submitting TOY application {} to ASM", appId.toString());
    yarn.submitApplication(appContext);

    // Monitor the application and exit if it is RUNNING
    monitorApplication(appId);
}

From source file:com.yahoo.storm.yarn.StormAMRMClient.java

License:Open Source License

public void launchSupervisorOnContainer(Container container) throws IOException {
    // create a container launch context
    ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class);
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    try {/*w  w  w  .  j a v a 2 s .  com*/
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        launchContext.setTokens(securityTokens);
    } catch (IOException e) {
        LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage());
    }

    // CLC: env
    Map<String, String> env = new HashMap<String, String>();
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    launchContext.setEnvironment(env);

    // CLC: local resources includes storm, conf
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    String storm_zip_path = (String) storm_conf.get("storm.zip.path");
    Path zip = new Path(storm_zip_path);
    FileSystem fs = FileSystem.get(hadoopConf);
    String vis = (String) storm_conf.get("storm.zip.visibility");
    if (vis.equals("PUBLIC"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC));
    else if (vis.equals("PRIVATE"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE));
    else if (vis.equals("APPLICATION"))
        localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE,
                LocalResourceVisibility.APPLICATION));

    String appHome = Util.getApplicationHomeForId(appAttemptId.toString());
    Path confDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf);
    localResources.put("conf", Util.newYarnAppResource(fs, confDst));

    launchContext.setLocalResources(localResources);

    // CLC: command
    List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf);
    launchContext.setCommands(supervisorArgs);

    try {
        LOG.info("Use NMClient to launch supervisors in container. ");
        nmClient.startContainer(container, launchContext);

        String userShortName = user.getShortUserName();
        if (userShortName != null)
            LOG.info("Supervisor log: http://" + container.getNodeHttpAddress() + "/node/containerlogs/"
                    + container.getId().toString() + "/" + userShortName + "/supervisor.log");
    } catch (Exception e) {
        LOG.error("Caught an exception while trying to start a container", e);
        System.exit(-1);
    }
}

From source file:com.yahoo.storm.yarn.StormOnYarn.java

License:Open Source License

private void launchApp(String appName, String queue, int amMB, String storm_zip_location) throws Exception {
    LOG.debug("StormOnYarn:launchApp() ...");
    YarnClientApplication client_app = _yarn.createApplication();
    GetNewApplicationResponse app = client_app.getNewApplicationResponse();
    _appId = app.getApplicationId();/*from w  w w  .  j  av a  2  s .  c  o m*/
    LOG.debug("_appId:" + _appId);

    if (amMB > app.getMaximumResourceCapability().getMemory()) {
        //TODO need some sanity checks
        amMB = app.getMaximumResourceCapability().getMemory();
    }
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
    appContext.setApplicationId(app.getApplicationId());
    appContext.setApplicationName(appName);
    appContext.setQueue(queue);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the
    // local resources
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    String appMasterJar = findContainingJar(MasterServer.class);
    FileSystem fs = FileSystem.get(_hadoopConf);
    Path src = new Path(appMasterJar);
    String appHome = Util.getApplicationHomeForId(_appId.toString());
    Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar");
    fs.copyFromLocalFile(false, true, src, dst);
    localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst));

    String stormVersion = Util.getStormVersion();
    Path zip;
    if (storm_zip_location != null) {
        zip = new Path(storm_zip_location);
    } else {
        zip = new Path("/lib/storm/" + stormVersion + "/storm.zip");
    }
    _stormConf.put("storm.zip.path", zip.makeQualified(fs).toUri().getPath());
    LocalResourceVisibility visibility = LocalResourceVisibility.PUBLIC;
    _stormConf.put("storm.zip.visibility", "PUBLIC");
    if (!Util.isPublic(fs, zip)) {
        visibility = LocalResourceVisibility.APPLICATION;
        _stormConf.put("storm.zip.visibility", "APPLICATION");
    }
    localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, visibility));

    Path confDst = Util.createConfigurationFileInFs(fs, appHome, _stormConf, _hadoopConf);
    // establish a symbolic link to conf directory
    localResources.put("conf", Util.newYarnAppResource(fs, confDst));

    // Setup security tokens
    Path[] paths = new Path[3];
    paths[0] = dst;
    paths[1] = zip;
    paths[2] = confDst;
    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf);
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    //security tokens for HDFS distributed cache
    amContainer.setTokens(securityTokens);

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the env variables to be setup in the env where the application master
    // will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    // add the runtime classpath needed for tests to work
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./conf");
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar");

    //Make sure that AppMaster has access to all YARN JARs
    List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath");
    ProcessBuilder pb = new ProcessBuilder(yarn_classpath_cmd).redirectError(Redirect.INHERIT);
    LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]");
    pb.environment().putAll(System.getenv());
    Process proc = pb.start();
    BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream(), "UTF-8"));
    String line = "";
    String yarn_class_path = (String) _stormConf.get("storm.yarn.yarn_classpath");
    if (yarn_class_path == null) {
        StringBuilder yarn_class_path_builder = new StringBuilder();
        while ((line = reader.readLine()) != null) {
            yarn_class_path_builder.append(line);
        }
        yarn_class_path = yarn_class_path_builder.toString();
    }
    LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]");
    proc.waitFor();
    reader.close();
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path);

    String stormHomeInZip = Util.getStormHomeInZip(fs, zip, stormVersion);
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/*");
    Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/lib/*");

    String java_home = (String) _stormConf.get("storm.yarn.java_home");
    if (java_home == null)
        java_home = System.getenv("JAVA_HOME");

    if (java_home != null && !java_home.isEmpty())
        env.put("JAVA_HOME", java_home);
    LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]");

    env.put("appJar", appMasterJar);
    env.put("appName", appName);
    env.put("appId", new Integer(_appId.getId()).toString());
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<String> vargs = new Vector<String>();
    if (java_home != null && !java_home.isEmpty())
        vargs.add(env.get("JAVA_HOME") + "/bin/java");
    else
        vargs.add("java");
    vargs.add("-Dstorm.home=./storm/" + stormHomeInZip + "/");
    vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log");
    //vargs.add("-verbose:class");
    vargs.add("com.yahoo.storm.yarn.MasterServer");
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
    // Set java executable command
    LOG.info("Setting up app master command:" + vargs);

    amContainer.setCommands(vargs);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMB);
    appContext.setResource(capability);
    appContext.setAMContainerSpec(amContainer);

    _yarn.submitApplication(appContext);
}

From source file:de.huberlin.wbi.hiway.am.HiWay.java

License:Apache License

/**
 * Main run function for the application master
 * //w  w w  .  j  a va  2 s  .co m
 * @return True if there were no errors
 * @throws YarnException
 *             YarnException
 * @throws IOException
 *             IOException
 */
@SuppressWarnings("unchecked")
public boolean run() throws YarnException, IOException {
    System.out.println("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    try (DataOutputBuffer dob = new DataOutputBuffer()) {
        credentials.writeTokenStorageToStream(dob);
        // Now remove the AM->RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

        allocListener = new RMCallbackHandler(this);
        amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
        amRMClient.init(conf);
        amRMClient.start();

        containerListener = new NMCallbackHandler(this);
        nmClientAsync = new NMClientAsyncImpl(containerListener);
        nmClientAsync.init(conf);
        nmClientAsync.start();

        Data workflowData = new Data(workflowPath);
        workflowData.stageIn();

        // Register self with ResourceManager. This will start heartbeating to the RM.
        appMasterHostname = NetUtils.getHostname();
        RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
                appMasterRpcPort, appMasterTrackingUrl);

        switch (schedulerName) {
        case staticRoundRobin:
        case heft:
            scheduler = schedulerName.equals(HiWayConfiguration.HIWAY_SCHEDULER_OPTS.staticRoundRobin)
                    ? new RoundRobin(getWorkflowName(), hdfs, conf)
                    : new HEFT(getWorkflowName(), hdfs, conf);
            break;
        case greedyQueue:
            scheduler = new GreedyQueue(getWorkflowName(), conf, hdfs);
            break;
        default:
            C3PO c3po = new C3PO(getWorkflowName(), hdfs, conf);
            switch (schedulerName) {
            case conservative:
                c3po.setConservatismWeight(12d);
                c3po.setnClones(0);
                c3po.setPlacementAwarenessWeight(0.01d);
                c3po.setOutlookWeight(0.01d);
                break;
            case cloning:
                c3po.setConservatismWeight(0.01d);
                c3po.setnClones(1);
                c3po.setPlacementAwarenessWeight(0.01d);
                c3po.setOutlookWeight(0.01d);
                break;
            case placementAware:
                c3po.setConservatismWeight(0.01d);
                c3po.setnClones(0);
                c3po.setPlacementAwarenessWeight(12d);
                c3po.setOutlookWeight(0.01d);
                break;
            case outlooking:
                c3po.setConservatismWeight(0.01d);
                c3po.setnClones(0);
                c3po.setPlacementAwarenessWeight(0.01d);
                c3po.setOutlookWeight(12d);
                break;
            default:
                c3po.setConservatismWeight(3d);
                c3po.setnClones(2);
                c3po.setPlacementAwarenessWeight(1d);
                c3po.setOutlookWeight(2d);
            }
            scheduler = c3po;
        }

        scheduler.initialize();
        writeEntryToLog(new JsonReportEntry(getRunId(), null, null, null, null, null, HiwayDBI.KEY_WF_NAME,
                getWorkflowName()));
        parseWorkflow();
        scheduler.updateRuntimeEstimates(getRunId().toString());
        federatedReport = new Data(appId + ".log");

        // Dump out information about cluster capability as seen by the resource manager
        int maxMem = response.getMaximumResourceCapability().getMemory();
        int maxCores = response.getMaximumResourceCapability().getVirtualCores();
        System.out.println("Max mem capabililty of resources in this cluster " + maxMem);

        // A resource ask cannot exceed the max.
        if (containerMemory > maxMem) {
            System.out.println("Container memory specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem);
            containerMemory = maxMem;
        }
        if (containerCores > maxCores) {
            System.out.println("Container vcores specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerCores + ", max=" + maxCores);
            containerCores = maxCores;
        }

        while (!done) {
            try {
                while (scheduler.hasNextNodeRequest()) {
                    ContainerRequest containerAsk = setupContainerAskForRM(scheduler.getNextNodeRequest());
                    amRMClient.addContainerRequest(containerAsk);
                }
                Thread.sleep(1000);
                System.out.println("Current application state: requested=" + numRequestedContainers
                        + ", completed=" + numCompletedContainers + ", failed=" + numFailedContainers
                        + ", killed=" + numKilledContainers + ", allocated=" + numAllocatedContainers);
            } catch (InterruptedException e) {
                e.printStackTrace();
                System.exit(-1);
            }
        }
        finish();
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(-1);
    }
    return success;
}

From source file:de.huberlin.wbi.hiway.am.WorkflowDriver.java

License:Apache License

/**
 * Main run function for the application master. Does more initialization (sic!).
 * Calls the abstract {@link #parseWorkflow()}, then {@link #executeWorkflow()} and finally {@link #finish()}.
 * @return True if there were no errors//from ww w.  j  a  va  2  s. c  o m
 */
protected boolean run() throws IOException {
    /* log */ Logger.writeToStdout("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    try (DataOutputBuffer dob = new DataOutputBuffer()) {

        credentials.writeTokenStorageToStream(dob);
        // remove the AM->RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

        // Resource Manager communications setup
        RMCallbackHandler allocListener = new RMCallbackHandler(this);
        amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
        amRMClient.init(conf);
        amRMClient.start();

        // Node Managers communications setup
        containerListener = new NMCallbackHandler(this);
        nmClientAsync = new NMClientAsyncImpl(containerListener);
        nmClientAsync.init(conf);
        nmClientAsync.start();

        // get workflow file
        if (hdfs.exists(workflowPath)) {
            Path localPath = new Path(workflowPath.getName());
            hdfs.copyToLocalFile(false, workflowPath, localPath);
            workflowPath = localPath;
            workflowFile = new Data(workflowPath);
            workflowFile.stageOut();
        } else {
            // TODO this doesn't work; the path is triggered when running the application e.g., as hiway workflows/test.dax
            // but stageIn then fails, because in the HDFS, there is only test.dax and not workflows/test.dax
            workflowFile = new Data(workflowPath);
            workflowFile.stageIn();
        }

        // Register self with ResourceManager. This will start heartbeating to the RM.
        /* the hostname of the container running the Hi-WAY ApplicationMaster */
        String appMasterHostname = NetUtils.getHostname();
        /* the port on which the ApplicationMaster listens for status updates from clients */
        int appMasterRpcPort = -1;
        /* the tracking URL to which the ApplicationMaster publishes info for clients to monitor */
        String appMasterTrackingUrl = "";
        RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
                appMasterRpcPort, appMasterTrackingUrl);

        // initialize scheduler
        switch (schedulerEnumValue) {
        case roundRobin:
        case heft:
            int workerMemory = conf.getInt(YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB);
            scheduler = schedulerEnumValue.equals(HiWayConfiguration.HIWAY_SCHEDULERS.roundRobin)
                    ? new RoundRobin(getWorkflowName())
                    : new HEFT(getWorkflowName(), workerMemory / containerMemory);
            break;
        case greedy:
            scheduler = new GreedyQueue(getWorkflowName());
            break;
        case memoryAware:
            scheduler = new MemoryAware(getWorkflowName(), amRMClient);
            break;
        case perfectDaxGQ:
            scheduler = new PerfectDaxGreedyQueue(getWorkflowName());
            break;
        default:
            C3PO c3po = new C3PO(getWorkflowName());
            switch (schedulerEnumValue) {
            case dataAware:
                c3po.setConservatismWeight(0.01d);
                c3po.setnClones(0);
                c3po.setPlacementAwarenessWeight(12d);
                c3po.setOutlookWeight(0.01d);
                break;
            default:
                c3po.setConservatismWeight(3d);
                c3po.setnClones(2);
                c3po.setPlacementAwarenessWeight(1d);
                c3po.setOutlookWeight(2d);
            }
            scheduler = c3po;
        }
        scheduler.init(conf, hdfs, containerMemory, customMemoryMap, containerCores, requestPriority);
        scheduler.initializeProvenanceManager();

        /* log */ logger.writeEntryToLog(new JsonReportEntry(getRunId(), null, null, null, null, null,
                HiwayDBI.KEY_WF_NAME, getWorkflowName()));
        logger.federatedReport = new Data(appId + ".log");

        // parse workflow, obtain ready tasks
        Collection<TaskInstance> readyTasks = parseWorkflow();

        // scheduler updates runtime estimates for all tasks comprising the workflow
        scheduler.updateRuntimeEstimates(getRunId().toString());

        scheduler.addTasks(readyTasks);

        // Dump out information about cluster capability as seen by the resource manager
        maxMem = response.getMaximumResourceCapability().getMemory();
        maxCores = response.getMaximumResourceCapability().getVirtualCores();
        /* log */ Logger.writeToStdout("Max mem capabililty of resources in this cluster " + maxMem);

        // A resource ask cannot exceed the max.
        if (containerMemory > maxMem) {
            /* log */ Logger.writeToStdout("Container memory specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem);
            containerMemory = maxMem;
        }
        if (containerCores > maxCores) {
            /* log */ Logger.writeToStdout("Container vcores specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerCores + ", max=" + maxCores);
            containerCores = maxCores;
        }

        // this is the actual work loop:
        // ask for resources until the workflow is done.
        executeWorkflow();

        finish();

    } catch (Exception e) {
        e.printStackTrace(System.out);
        System.exit(-1);
    }
    return success;
}

From source file:de.huberlin.wbi.hiway.common.Client.java

License:Apache License

/**
 * Main run function for the client./*from   w w w.j a  va2 s  . co  m*/
 * 
 * @return true if application completed successfully.
 */
private boolean run() throws IOException, YarnException {

    /* log */ System.out.println("Running Client");

    yarnClient.start();
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();

    /* log */ System.out.println(
            "Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    /* log */ System.out.println("Got Cluster node info from ASM");
    /* log */ for (NodeReport node : clusterNodeReports)
        System.out.println("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    /* log */ System.out.println("Queue info" + ", queueName=" + queueInfo.getQueueName()
            + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity="
            + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size()
            + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    /* log */ for (QueueUserACLInfo aclInfo : listAclInfo)
        for (QueueACL userAcl : aclInfo.getUserAcls())
            System.out.println("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName()
                    + ", userAcl=" + userAcl.name());

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();

    // Get min/max resource capabilities from RM and change memory ask if needed
    int maxVC = appResponse.getMaximumResourceCapability().getVirtualCores();
    /* log */ System.out.println("Max vCores capabililty of resources in this cluster " + maxVC);
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    /* log */ System.out.println("Max mem capabililty of resources in this cluster " + maxMem);
    // A resource ask cannot exceed the max.
    if (amVCores > maxVC) {
        /* log */ System.out.println("AM vCores specified above max threshold of cluster. Using max value."
                + ", specified=" + amVCores + ", max=" + maxVC);
        amVCores = maxVC;
    }
    if (amMemory > maxMem) {
        /* log */ System.out.println("AM memory specified above max threshold of cluster. Using max value."
                + ", specified=" + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationType(conf.get(HiWayConfiguration.HIWAY_AM_APPLICATION_TYPE,
            HiWayConfiguration.HIWAY_AM_APPLICATION_TYPE_DEFAULT));
    appContext.setApplicationName("run " + workflowParam + " (type: " + workflowType.toString() + ")");
    ApplicationId appId = appContext.getApplicationId();
    String hdfsBaseDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE_DEFAULT);
    String hdfsSandboxDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE_DEFAULT);
    Path hdfsBaseDirectory = new Path(new Path(hdfs.getUri()), hdfsBaseDirectoryName);
    Data.setHdfsBaseDirectory(hdfsBaseDirectory);
    Path hdfsSandboxDirectory = new Path(hdfsBaseDirectory, hdfsSandboxDirectoryName);
    Path hdfsApplicationDirectory = new Path(hdfsSandboxDirectory, appId.toString());
    Data.setHdfsApplicationDirectory(hdfsApplicationDirectory);
    Data.setHdfs(hdfs);

    Path wfSource, wfDest, wfTemp = null;
    try {
        wfSource = new Path(new URI(workflowParam).getPath());
    } catch (URISyntaxException e) {
        wfSource = new Path(workflowParam);
    }
    wfDest = new Path(hdfsApplicationDirectory + "/" + wfSource.getName());

    // (1) if workflow file in hdfs, then transfer to temp file in local fs
    if (hdfs.exists(wfSource)) {
        wfTemp = new Path("./." + wfSource.getName());
        System.out.println("Workflow found in HDFS at location " + wfSource);
        hdfs.copyToLocalFile(false, wfSource, wfTemp);
    }

    // (2) if galaxy workflow, then copy and replace input ports
    if (workflowType.equals(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_OPTS.galaxy)) {
        wfTemp = preProcessGalaxyWorkflow(wfSource, wfTemp);
    }

    if (wfTemp != null) {
        hdfs.copyFromLocalFile(wfTemp, wfDest);
        new File(wfTemp.toString()).delete();
    } else {
        hdfs.copyFromLocalFile(wfSource, wfDest);
    }

    if (summaryPath != null)
        summary = new Data(summaryPath);
    if (customMemPath != null)
        (new Data(customMemPath)).stageOut();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    /* set the env variables to be setup in the env where the application master will be run */
    System.out.println("Set the environment for the application master");
    Map<String, String> env = new HashMap<>();

    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(':');
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }

    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<>(30);

    // Set java executable command
    System.out.println("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    if (HiWayConfiguration.debug)
        vargs.add(
                "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9010 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    vargs.add("-Xss" + "16m");
    // Set class name

    switch (workflowType) {
    case dax:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_DAX_AM_CLASS);
        break;
    case log:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_LOG_AM_CLASS);
        break;
    case galaxy:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_GALAXY_AM_CLASS);
        break;
    case cuneiformE:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_CUNEIFORME_AM_CLASS);
        break;
    default:
        vargs.add(HiWayConfiguration.HIWAY_WORKFLOW_LANGUAGE_CUNEIFORMJ_AM_CLASS);
    }

    vargs.add("--scheduler " + schedulerName.toString());
    if (memory != null)
        vargs.add("--memory " + memory);
    if (summary != null)
        vargs.add("--summary " + summary.getName());
    if (customMemPath != null)
        vargs.add("--custom " + customMemPath);
    vargs.add("--appid " + appId.toString());
    if (HiWayConfiguration.debug)
        vargs.add("--debug");
    if (HiWayConfiguration.verbose)
        vargs.add("--verbose");
    vargs.add(workflowParam);
    vargs.add("> >(tee AppMaster.stdout " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout)");
    vargs.add("2> >(tee AppMaster.stderr " + ApplicationConstants.LOG_DIR_EXPANSION_VAR
            + "/AppMaster.stderr >&2)");

    // Get final command
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    System.out.println("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setVirtualCores(amVCores);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = hdfs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                System.out.println("Got dt for " + hdfs.getUri() + "; " + token);
            }
        }
        try (DataOutputBuffer dob = new DataOutputBuffer()) {
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            amContainer.setTokens(fsTokens);
        }
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    /* log */ System.out.println("Submitting application to ASM");
    yarnClient.submitApplication(appContext);

    // Monitor the application
    boolean success = monitorApplication(appId);

    if (success && summary != null) {
        summary.stageIn();
    }

    return success;

}