Example usage for org.apache.hadoop.yarn.api.records ApplicationSubmissionContext setApplicationType

List of usage examples for org.apache.hadoop.yarn.api.records ApplicationSubmissionContext setApplicationType

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ApplicationSubmissionContext setApplicationType.

Prototype

@Public
@Stable
public abstract void setApplicationType(String applicationType);

Source Link

Document

Set the application type

Usage

From source file:org.apache.flink.yarn.FlinkYarnClientBase.java

License:Apache License

/**
 * This method will block until the ApplicationMaster/JobManager have been
 * deployed on YARN.//w ww  . j a  va  2 s  .com
 */
protected AbstractFlinkYarnCluster deployInternal() throws Exception {
    isReadyForDeployment();

    LOG.info("Using values:");
    LOG.info("\tTaskManager count = {}", taskManagerCount);
    LOG.info("\tJobManager memory = {}", jobManagerMemoryMb);
    LOG.info("\tTaskManager memory = {}", taskManagerMemoryMb);

    // Create application via yarnClient
    yarnApplication = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();

    // ------------------ Add dynamic properties to local flinkConfiguraton ------

    Map<String, String> dynProperties = CliFrontend.getDynamicProperties(dynamicPropertiesEncoded);
    for (Map.Entry<String, String> dynProperty : dynProperties.entrySet()) {
        flinkConfiguration.setString(dynProperty.getKey(), dynProperty.getValue());
    }

    try {
        org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration);
    } catch (IOException e) {
        throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e);
    }
    // ------------------ Check if the specified queue exists --------------

    try {
        List<QueueInfo> queues = yarnClient.getAllQueues();
        if (queues.size() > 0 && this.yarnQueue != null) { // check only if there are queues configured in yarn and for this session.
            boolean queueFound = false;
            for (QueueInfo queue : queues) {
                if (queue.getQueueName().equals(this.yarnQueue)) {
                    queueFound = true;
                    break;
                }
            }
            if (!queueFound) {
                String queueNames = "";
                for (QueueInfo queue : queues) {
                    queueNames += queue.getQueueName() + ", ";
                }
                LOG.warn("The specified queue '" + this.yarnQueue + "' does not exist. " + "Available queues: "
                        + queueNames);
            }
        } else {
            LOG.debug("The YARN cluster does not have any queues configured");
        }
    } catch (Throwable e) {
        LOG.warn("Error while getting queue information from YARN: " + e.getMessage());
        if (LOG.isDebugEnabled()) {
            LOG.debug("Error details", e);
        }
    }

    // ------------------ Check if the YARN Cluster has the requested resources --------------

    // the yarnMinAllocationMB specifies the smallest possible container allocation size.
    // all allocations below this value are automatically set to this value.
    final int yarnMinAllocationMB = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0);
    if (jobManagerMemoryMb < yarnMinAllocationMB || taskManagerMemoryMb < yarnMinAllocationMB) {
        LOG.warn("The JobManager or TaskManager memory is below the smallest possible YARN Container size. "
                + "The value of 'yarn.scheduler.minimum-allocation-mb' is '" + yarnMinAllocationMB
                + "'. Please increase the memory size."
                + "YARN will allocate the smaller containers but the scheduler will account for the minimum-allocation-mb, maybe not all instances "
                + "you requested will start.");
    }

    // set the memory to minAllocationMB to do the next checks correctly
    if (jobManagerMemoryMb < yarnMinAllocationMB) {
        jobManagerMemoryMb = yarnMinAllocationMB;
    }
    if (taskManagerMemoryMb < yarnMinAllocationMB) {
        taskManagerMemoryMb = yarnMinAllocationMB;
    }

    Resource maxRes = appResponse.getMaximumResourceCapability();
    final String NOTE = "Please check the 'yarn.scheduler.maximum-allocation-mb' and the 'yarn.nodemanager.resource.memory-mb' configuration values\n";
    if (jobManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment();
        throw new YarnDeploymentException(
                "The cluster does not have the requested resources for the JobManager available!\n"
                        + "Maximum Memory: " + maxRes.getMemory() + "MB Requested: " + jobManagerMemoryMb
                        + "MB. " + NOTE);
    }

    if (taskManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment();
        throw new YarnDeploymentException(
                "The cluster does not have the requested resources for the TaskManagers available!\n"
                        + "Maximum Memory: " + maxRes.getMemory() + " Requested: " + taskManagerMemoryMb
                        + "MB. " + NOTE);
    }

    final String NOTE_RSC = "\nThe Flink YARN client will try to allocate the YARN session, but maybe not all TaskManagers are "
            + "connecting from the beginning because the resources are currently not available in the cluster. "
            + "The allocation might take more time than usual because the Flink YARN client needs to wait until "
            + "the resources become available.";
    int totalMemoryRequired = jobManagerMemoryMb + taskManagerMemoryMb * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.warn("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available." + NOTE_RSC);

    }
    if (taskManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn("The requested amount of memory for the TaskManagers (" + taskManagerMemoryMb
                + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit
                + NOTE_RSC);
    }
    if (jobManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn(
                "The requested amount of memory for the JobManager (" + jobManagerMemoryMb + "MB) is more than "
                        + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC);
    }

    // ----------------- check if the requested containers fit into the cluster.

    int[] nmFree = Arrays.copyOf(freeClusterMem.nodeManagersFree, freeClusterMem.nodeManagersFree.length);
    // first, allocate the jobManager somewhere.
    if (!allocateResource(nmFree, jobManagerMemoryMb)) {
        LOG.warn("Unable to find a NodeManager that can fit the JobManager/Application master. "
                + "The JobManager requires " + jobManagerMemoryMb + "MB. NodeManagers available: "
                + Arrays.toString(freeClusterMem.nodeManagersFree) + NOTE_RSC);
    }
    // allocate TaskManagers
    for (int i = 0; i < taskManagerCount; i++) {
        if (!allocateResource(nmFree, taskManagerMemoryMb)) {
            LOG.warn("There is not enough memory available in the YARN cluster. "
                    + "The TaskManager(s) require " + taskManagerMemoryMb + "MB each. "
                    + "NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + "\n"
                    + "After allocating the JobManager (" + jobManagerMemoryMb + "MB) and (" + i + "/"
                    + taskManagerCount + ") TaskManagers, " + "the following NodeManagers are available: "
                    + Arrays.toString(nmFree) + NOTE_RSC);
        }
    }

    // ------------------ Prepare Application Master Container  ------------------------------

    // respect custom JVM options in the YAML file
    final String javaOpts = flinkConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");

    String logbackFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME;
    boolean hasLogback = new File(logbackFile).exists();
    String log4jFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME;

    boolean hasLog4j = new File(log4jFile).exists();
    if (hasLogback) {
        shipFiles.add(new File(logbackFile));
    }
    if (hasLog4j) {
        shipFiles.add(new File(log4jFile));
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx"
            + Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration) + "M " + javaOpts;

    if (hasLogback || hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

        if (hasLogback) {
            amCommand += " -Dlogback.configurationFile=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME;
        }

        if (hasLog4j) {
            amCommand += " -Dlog4j.configuration=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME;
        }
    }

    amCommand += " " + getApplicationMasterClass().getName() + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err";
    amContainer.setCommands(Collections.singletonList(amCommand));

    LOG.debug("Application Master start command: " + amCommand);

    // intialize HDFS
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    final FileSystem fs = FileSystem.get(conf);

    // hard coded check for the GoogleHDFS client because its not overriding the getScheme() method.
    if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values."
                + "The Flink YARN client needs to store its files in a distributed file system");
    }

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();

    if (RecoveryMode.isHighAvailabilityModeActivated(flinkConfiguration)) {
        // activate re-execution of failed applications
        appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS,
                YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));

        activateHighAvailabilitySupport(appContext);
    } else {
        // set number of application retries to 1 in the default case
        appContext
                .setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1));
    }

    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<>(2);
    localResources.put("flink.jar", appMasterJar);
    localResources.put("flink-conf.yaml", flinkConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[2 + shipFiles.size()];
    StringBuilder envShipFileList = new StringBuilder();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[2 + i] = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[2 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/");

    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    fs.setPermission(sessionFilesDir, permission); // set permission for path.

    Utils.setTokensFor(amContainer, paths, conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<>();
    // set user specified app master environment variables
    appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX,
            flinkConfiguration));
    // set classpath from YARN configuration
    Utils.setupEnv(conf, appMasterEnv);
    // set Flink on YARN internal configuration values
    appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb));
    appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_USERNAME,
            UserGroupInformation.getCurrentUser().getShortUserName());
    appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots));
    appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached));

    if (dynamicPropertiesEncoded != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded);
    }

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jobManagerMemoryMb);
    capability.setVirtualCores(1);

    String name;
    if (customName == null) {
        name = "Flink session with " + taskManagerCount + " TaskManagers";
        if (detached) {
            name += " (detached)";
        }
    } else {
        name = customName;
    }

    appContext.setApplicationName(name); // application name
    appContext.setApplicationType("Apache Flink");
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    if (yarnQueue != null) {
        appContext.setQueue(yarnQueue);
    }

    // add a hook to clean up in case deployment fails
    Runtime.getRuntime().addShutdownHook(deploymentFailureHook);
    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for the cluster to be allocated");
    int waittime = 0;
    loop: while (true) {
        ApplicationReport report;
        try {
            report = yarnClient.getApplicationReport(appId);
        } catch (IOException e) {
            throw new YarnDeploymentException("Failed to deploy the cluster: " + e.getMessage());
        }
        YarnApplicationState appState = report.getYarnApplicationState();
        switch (appState) {
        case FAILED:
        case FINISHED:
        case KILLED:
            throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState
                    + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n"
                    + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n"
                    + "yarn logs -applicationId " + appId);
            //break ..
        case RUNNING:
            LOG.info("YARN application has been deployed successfully.");
            break loop;
        default:
            LOG.info("Deploying cluster, current state " + appState);
            if (waittime > 60000) {
                LOG.info(
                        "Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster");
            }

        }
        waittime += 1000;
        Thread.sleep(1000);
    }
    // print the application id for user to cancel themselves.
    if (isDetached()) {
        LOG.info("The Flink YARN client has been started in detached mode. In order to stop "
                + "Flink on YARN, use the following command or a YARN web interface to stop "
                + "it:\nyarn application -kill " + appId + "\nPlease also note that the "
                + "temporary files of the YARN session in the home directoy will not be removed.");
    }
    // since deployment was successful, remove the hook
    try {
        Runtime.getRuntime().removeShutdownHook(deploymentFailureHook);
    } catch (IllegalStateException e) {
        // we're already in the shut down hook.
    }
    // the Flink cluster is deployed in YARN. Represent cluster
    return new FlinkYarnCluster(yarnClient, appId, conf, flinkConfiguration, sessionFilesDir, detached);
}

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

/**
 * Setup and submit the Gobblin Yarn application.
 *
 * @throws IOException if there's anything wrong setting up and submitting the Yarn application
 * @throws YarnException if there's anything wrong setting up and submitting the Yarn application
 *///from  w  ww .  ja  v a2  s. c  o m
@VisibleForTesting
ApplicationId setupAndSubmitApplication() throws IOException, YarnException {
    YarnClientApplication gobblinYarnApp = this.yarnClient.createApplication();
    ApplicationSubmissionContext appSubmissionContext = gobblinYarnApp.getApplicationSubmissionContext();
    appSubmissionContext.setApplicationType(GOBBLIN_YARN_APPLICATION_TYPE);
    ApplicationId applicationId = appSubmissionContext.getApplicationId();

    GetNewApplicationResponse newApplicationResponse = gobblinYarnApp.getNewApplicationResponse();
    // Set up resource type requirements for ApplicationMaster
    Resource resource = prepareContainerResource(newApplicationResponse);

    // Add lib jars, and jars and files that the ApplicationMaster need as LocalResources
    Map<String, LocalResource> appMasterLocalResources = addAppMasterLocalResources(applicationId);

    ContainerLaunchContext amContainerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
    amContainerLaunchContext.setLocalResources(appMasterLocalResources);
    amContainerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
    amContainerLaunchContext
            .setCommands(Lists.newArrayList(buildApplicationMasterCommand(resource.getMemory())));

    Map<ApplicationAccessType, String> acls = new HashMap<>(1);
    acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
    amContainerLaunchContext.setApplicationACLs(acls);

    if (UserGroupInformation.isSecurityEnabled()) {
        setupSecurityTokens(amContainerLaunchContext);
    }

    // Setup the application submission context
    appSubmissionContext.setApplicationName(this.applicationName);
    appSubmissionContext.setResource(resource);
    appSubmissionContext.setQueue(this.appQueueName);
    appSubmissionContext.setPriority(Priority.newInstance(0));
    appSubmissionContext.setAMContainerSpec(amContainerLaunchContext);
    // Also setup container local resources by copying local jars and files the container need to HDFS
    addContainerLocalResources(applicationId);

    // Submit the application
    LOGGER.info("Submitting application " + applicationId);
    this.yarnClient.submitApplication(appSubmissionContext);

    LOGGER.info("Application successfully submitted and accepted");
    ApplicationReport applicationReport = this.yarnClient.getApplicationReport(applicationId);
    LOGGER.info("Application Name: " + applicationReport.getName());
    LOGGER.info("Application Tracking URL: " + applicationReport.getTrackingUrl());
    LOGGER.info("Application User: " + applicationReport.getUser() + " Queue: " + applicationReport.getQueue());

    return applicationId;
}

From source file:org.apache.tez.client.TezClientUtils.java

License:Apache License

/**
 * Create an ApplicationSubmissionContext to launch a Tez AM
 * @param appId Application Id// www .j a v a  2  s . c o m
 * @param dag DAG to be submitted
 * @param amName Name for the application
 * @param amConfig AM Configuration
 * @param tezJarResources Resources to be used by the AM
 * @param sessionCreds the credential object which will be populated with session specific
 * @param historyACLPolicyManager
 * @return an ApplicationSubmissionContext to launch a Tez AM
 * @throws IOException
 * @throws YarnException
 */
@Private
@VisibleForTesting
public static ApplicationSubmissionContext createApplicationSubmissionContext(ApplicationId appId, DAG dag,
        String amName, AMConfiguration amConfig, Map<String, LocalResource> tezJarResources,
        Credentials sessionCreds, boolean tezLrsAsArchive, TezApiVersionInfo apiVersionInfo,
        HistoryACLPolicyManager historyACLPolicyManager) throws IOException, YarnException {

    Preconditions.checkNotNull(sessionCreds);
    TezConfiguration conf = amConfig.getTezConfiguration();

    FileSystem fs = TezClientUtils.ensureStagingDirExists(conf, TezCommonUtils.getTezBaseStagingPath(conf));
    String strAppId = appId.toString();
    Path tezSysStagingPath = TezCommonUtils.createTezSystemStagingPath(conf, strAppId);
    Path binaryConfPath = TezCommonUtils.getTezConfStagingPath(tezSysStagingPath);
    binaryConfPath = fs.makeQualified(binaryConfPath);

    // Setup resource requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amConfig.getTezConfiguration().getInt(TezConfiguration.TEZ_AM_RESOURCE_MEMORY_MB,
            TezConfiguration.TEZ_AM_RESOURCE_MEMORY_MB_DEFAULT));
    capability.setVirtualCores(amConfig.getTezConfiguration().getInt(
            TezConfiguration.TEZ_AM_RESOURCE_CPU_VCORES, TezConfiguration.TEZ_AM_RESOURCE_CPU_VCORES_DEFAULT));
    if (LOG.isDebugEnabled()) {
        LOG.debug("AppMaster capability = " + capability);
    }

    // Setup required Credentials for the AM launch. DAG specific credentials
    // are handled separately.
    ByteBuffer securityTokens = null;
    // Setup security tokens
    Credentials amLaunchCredentials = new Credentials();
    if (amConfig.getCredentials() != null) {
        amLaunchCredentials.addAll(amConfig.getCredentials());
    }

    // Add Staging dir creds to the list of session credentials.
    TokenCache.obtainTokensForFileSystems(sessionCreds, new Path[] { binaryConfPath }, conf);

    // Add session specific credentials to the AM credentials.
    amLaunchCredentials.mergeAll(sessionCreds);

    DataOutputBuffer dob = new DataOutputBuffer();
    amLaunchCredentials.writeTokenStorageToStream(dob);
    securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Setup the command to run the AM
    List<String> vargs = new ArrayList<String>(8);
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");

    String amOpts = constructAMLaunchOpts(amConfig.getTezConfiguration(), capability);
    vargs.add(amOpts);

    String amLogLevelString = amConfig.getTezConfiguration().get(TezConfiguration.TEZ_AM_LOG_LEVEL,
            TezConfiguration.TEZ_AM_LOG_LEVEL_DEFAULT);
    String[] amLogParams = parseLogParams(amLogLevelString);

    String amLogLevel = amLogParams[0];
    maybeAddDefaultLoggingJavaOpts(amLogLevel, vargs);

    // FIX sun bug mentioned in TEZ-327
    vargs.add("-Dsun.nio.ch.bugLevel=''");

    vargs.add(TezConstants.TEZ_APPLICATION_MASTER_CLASS);
    if (dag == null) {
        vargs.add("--" + TezConstants.TEZ_SESSION_MODE_CLI_OPTION);
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + ApplicationConstants.STDOUT);
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + ApplicationConstants.STDERR);

    Vector<String> vargsFinal = new Vector<String>(8);
    // Final command
    StringBuilder mergedCommand = new StringBuilder();
    for (CharSequence str : vargs) {
        mergedCommand.append(str).append(" ");
    }
    vargsFinal.add(mergedCommand.toString());

    if (LOG.isDebugEnabled()) {
        LOG.debug("Command to launch container for ApplicationMaster is : " + mergedCommand);
    }

    Map<String, String> environment = new TreeMap<String, String>();
    TezYARNUtils.setupDefaultEnv(environment, conf, TezConfiguration.TEZ_AM_LAUNCH_ENV,
            TezConfiguration.TEZ_AM_LAUNCH_ENV_DEFAULT, tezLrsAsArchive);

    addVersionInfoToEnv(environment, apiVersionInfo);
    addLogParamsToEnv(environment, amLogParams);

    Map<String, LocalResource> amLocalResources = new TreeMap<String, LocalResource>();

    // Not fetching credentials for AMLocalResources. Expect this to be provided via AMCredentials.
    if (amConfig.getAMLocalResources() != null) {
        amLocalResources.putAll(amConfig.getAMLocalResources());
    }
    amLocalResources.putAll(tezJarResources);

    // Setup Session ACLs and update conf as needed
    Map<String, String> aclConfigs = null;
    if (historyACLPolicyManager != null) {
        if (dag == null) {
            aclConfigs = historyACLPolicyManager.setupSessionACLs(amConfig.getTezConfiguration(), appId);
        } else {
            // Non-session mode
            // As only a single DAG is support, we should combine AM and DAG ACLs under the same
            // acl management layer
            aclConfigs = historyACLPolicyManager.setupNonSessionACLs(amConfig.getTezConfiguration(), appId,
                    dag.getDagAccessControls());
        }
    }

    // emit conf as PB file
    ConfigurationProto finalConfProto = createFinalConfProtoForApp(amConfig.getTezConfiguration(), aclConfigs);

    FSDataOutputStream amConfPBOutBinaryStream = null;
    try {
        amConfPBOutBinaryStream = TezCommonUtils.createFileForAM(fs, binaryConfPath);
        finalConfProto.writeTo(amConfPBOutBinaryStream);
    } finally {
        if (amConfPBOutBinaryStream != null) {
            amConfPBOutBinaryStream.close();
        }
    }

    LocalResource binaryConfLRsrc = TezClientUtils.createLocalResource(fs, binaryConfPath,
            LocalResourceType.FILE, LocalResourceVisibility.APPLICATION);
    amConfig.setBinaryConfLR(binaryConfLRsrc);
    amLocalResources.put(TezConstants.TEZ_PB_BINARY_CONF_NAME, binaryConfLRsrc);

    // Create Session Jars definition to be sent to AM as a local resource
    Path sessionJarsPath = TezCommonUtils.getTezAMJarStagingPath(tezSysStagingPath);
    FSDataOutputStream sessionJarsPBOutStream = null;
    try {
        sessionJarsPBOutStream = TezCommonUtils.createFileForAM(fs, sessionJarsPath);
        // Write out the initial list of resources which will be available in the AM
        DAGProtos.PlanLocalResourcesProto amResourceProto;
        if (amLocalResources != null && !amLocalResources.isEmpty()) {
            amResourceProto = DagTypeConverters.convertFromLocalResources(amLocalResources);
        } else {
            amResourceProto = DAGProtos.PlanLocalResourcesProto.getDefaultInstance();
        }
        amResourceProto.writeDelimitedTo(sessionJarsPBOutStream);
    } finally {
        if (sessionJarsPBOutStream != null) {
            sessionJarsPBOutStream.close();
        }
    }

    LocalResource sessionJarsPBLRsrc = TezClientUtils.createLocalResource(fs, sessionJarsPath,
            LocalResourceType.FILE, LocalResourceVisibility.APPLICATION);
    amLocalResources.put(TezConstants.TEZ_AM_LOCAL_RESOURCES_PB_FILE_NAME, sessionJarsPBLRsrc);

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    ACLManager aclManager = new ACLManager(user, amConfig.getTezConfiguration());
    Map<ApplicationAccessType, String> acls = aclManager.toYARNACls();

    if (dag != null) {

        DAGPlan dagPB = prepareAndCreateDAGPlan(dag, amConfig, tezJarResources, tezLrsAsArchive, sessionCreds);

        // emit protobuf DAG file style
        Path binaryPath = TezCommonUtils.getTezBinPlanStagingPath(tezSysStagingPath);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Stage directory information for AppId :" + appId + " tezSysStagingPath :"
                    + tezSysStagingPath + " binaryConfPath :" + binaryConfPath + " sessionJarsPath :"
                    + sessionJarsPath + " binaryPlanPath :" + binaryPath);
        }

        FSDataOutputStream dagPBOutBinaryStream = null;

        try {
            //binary output
            dagPBOutBinaryStream = TezCommonUtils.createFileForAM(fs, binaryPath);
            dagPB.writeTo(dagPBOutBinaryStream);
        } finally {
            if (dagPBOutBinaryStream != null) {
                dagPBOutBinaryStream.close();
            }
        }

        amLocalResources.put(TezConstants.TEZ_PB_PLAN_BINARY_NAME, TezClientUtils.createLocalResource(fs,
                binaryPath, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));

        if (Level.DEBUG.isGreaterOrEqual(Level.toLevel(amLogLevel))) {
            Path textPath = localizeDagPlanAsText(dagPB, fs, amConfig, strAppId, tezSysStagingPath);
            amLocalResources.put(TezConstants.TEZ_PB_PLAN_TEXT_NAME, TezClientUtils.createLocalResource(fs,
                    textPath, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
        }
    }

    // Setup ContainerLaunchContext for AM container
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(amLocalResources, environment,
            vargsFinal, null, securityTokens, acls);

    // Set up the ApplicationSubmissionContext
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    appContext.setApplicationType(TezConstants.TEZ_APPLICATION_TYPE);
    appContext.setApplicationId(appId);
    appContext.setResource(capability);
    if (amConfig.getQueueName() != null) {
        appContext.setQueue(amConfig.getQueueName());
    }
    appContext.setApplicationName(amName);
    appContext.setCancelTokensWhenComplete(amConfig.getTezConfiguration().getBoolean(
            TezConfiguration.TEZ_CANCEL_DELEGATION_TOKENS_ON_COMPLETION,
            TezConfiguration.TEZ_CANCEL_DELEGATION_TOKENS_ON_COMPLETION_DEFAULT));
    appContext.setAMContainerSpec(amContainer);

    appContext.setMaxAppAttempts(amConfig.getTezConfiguration().getInt(TezConfiguration.TEZ_AM_MAX_APP_ATTEMPTS,
            TezConfiguration.TEZ_AM_MAX_APP_ATTEMPTS_DEFAULT));

    return appContext;

}

From source file:org.elasticsearch.hadoop.yarn.client.YarnLauncher.java

License:Apache License

private ApplicationSubmissionContext setupAM(YarnClientApplication clientApp) {
    ApplicationSubmissionContext appContext = clientApp.getApplicationSubmissionContext();
    // already happens inside Hadoop but to be consistent
    appContext.setApplicationId(clientApp.getNewApplicationResponse().getApplicationId());
    appContext.setApplicationName(clientCfg.appName());
    appContext.setAMContainerSpec(createContainerContext());
    appContext.setResource(/*from www .  j  av  a 2  s .co  m*/
            YarnCompat.resource(client.getConfiguration(), clientCfg.amMem(), clientCfg.amVCores()));
    appContext.setPriority(Priority.newInstance(clientCfg.amPriority()));
    appContext.setQueue(clientCfg.amQueue());
    appContext.setApplicationType(clientCfg.appType());
    YarnCompat.setApplicationTags(appContext, clientCfg.appTags());

    return appContext;
}

From source file:yrun.YarnRunner.java

License:Apache License

public void execute() throws IOException, YarnException, InterruptedException {
    LOG.info("Using application path [" + _installPath + "]");
    Path jarPath = installThisJar(_installPath, _appJarFile);
    LOG.info("Driver installed [" + jarPath + "]");
    List<Path> installedArchivePathList = install(_installPath, _archivePathList);
    for (Path p : installedArchivePathList) {
        LOG.info("Archive installed [" + p + "]");
    }//from   w  w  w .  ja  v a  2  s  .  co  m

    YarnRunnerArgs yarnRunnerArgs = new YarnRunnerArgs();
    yarnRunnerArgs.setCommand(_command);

    Path argsPath = installThisArgs(_installPath, yarnRunnerArgs);

    final YarnClient client = YarnClient.createYarnClient();
    _configuration.setInt("yarn.nodemanager.delete.debug-delay-sec", (int) TimeUnit.HOURS.toSeconds(1));
    client.init(_configuration);
    client.start();

    YarnClientApplication app = client.createApplication();
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv, _appJarFile);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    {
        LocalResource appMasterJar = Records.newRecord(LocalResource.class);
        setupAppMasterJar(jarPath, appMasterJar);
        localResources.put(jarPath.getName(), appMasterJar);
    }
    {
        LocalResource appMasterArgs = Records.newRecord(LocalResource.class);
        setupAppMasterArgs(argsPath, appMasterArgs);
        localResources.put(MASTER_JSON, appMasterArgs);
    }

    List<String> vargs = new ArrayList<String>();
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    vargs.add("-Xmx256m");
    vargs.add("-Djava.net.preferIPv4Stack=true");
    vargs.add(YarnRunnerApplicationMaster.class.getName());

    String strCommand = "(echo ENV && set && echo CURRENT_DIR_LISTING && ls -la && echo PWD && pwd && ("
            + StringUtils.join(" ", vargs) + "))";
    strCommand += " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout";
    strCommand += " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
    LOG.debug("Application Master command [" + strCommand + "]");

    amContainer.setCommands(Collections.singletonList(strCommand));
    amContainer.setLocalResources(localResources);
    amContainer.setEnvironment(appMasterEnv);

    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName(_yarnName);
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    if (_queue != null) {
        appContext.setQueue(_queue);
    }
    appContext.setApplicationType("yrun");

    ApplicationId appId = appContext.getApplicationId();
    AtomicBoolean shutdown = new AtomicBoolean();
    if (!_isDaemon) {
        addShutdownHook(client, appId, shutdown);
    }

    LOG.info("Submitting application with id [" + appId + "]");
    client.submitApplication(appContext);
    ApplicationReport report;
    YarnApplicationState state;
    do {
        report = client.getApplicationReport(appId);
        state = report.getYarnApplicationState();
        if (state == YarnApplicationState.RUNNING) {
            if (_isDaemon) {
                LOG.info("Application is running.  This is a daemon application driver program exiting.");
                return;
            }
        }
        Thread.sleep(100);
    } while (isNoLongerRunning(state));
    shutdown.set(true);
    LOG.info("Application has finished with state [" + state + "]");
}