Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormatBase.java

License:Apache License

@Override
public void finalizeGlobal(int parallelism) throws IOException {

    JobContext jobContext;//  w w w.  ja v a2s  .c  o m
    TaskAttemptContext taskContext;
    try {
        TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_"
                + String.format("%" + (6 - Integer.toString(1).length()) + "s", " ").replace(" ", "0")
                + Integer.toString(1) + "_0");

        jobContext = HadoopUtils.instantiateJobContext(this.configuration, new JobID());
        taskContext = HadoopUtils.instantiateTaskAttemptContext(this.configuration, taskAttemptID);
        this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(taskContext);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    jobContext.getCredentials().addAll(this.credentials);
    Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
    if (currentUserCreds != null) {
        jobContext.getCredentials().addAll(currentUserCreds);
    }

    // finalize HDFS output format
    if (this.outputCommitter != null) {
        this.outputCommitter.commitJob(jobContext);
    }
}

From source file:org.apache.flink.batch.connectors.hive.HiveTableInputFormat.java

License:Apache License

@SuppressWarnings("unchecked")
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
    super.read(in);
    if (jobConf == null) {
        jobConf = new JobConf();
    }/*from www .j  ava2 s.c om*/
    jobConf.readFields(in);
    jobConf.getCredentials().addAll(this.credentials);
    Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
    if (currentUserCreds != null) {
        jobConf.getCredentials().addAll(currentUserCreds);
    }
    isPartitioned = (boolean) in.readObject();
    rowTypeInfo = (RowTypeInfo) in.readObject();
    partitionColNames = (String[]) in.readObject();
    partitions = (List<HiveTablePartition>) in.readObject();
}

From source file:org.apache.flink.batch.connectors.hive.HiveTableOutputFormat.java

License:Apache License

@SuppressWarnings("unchecked")
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
    super.read(in);
    if (jobConf == null) {
        jobConf = new JobConf();
    }/*from   w  w  w. jav a  2s.co m*/
    jobConf.readFields(in);
    jobConf.getCredentials().addAll(this.credentials);
    Credentials currentUserCreds = HadoopInputFormatCommonBase
            .getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
    if (currentUserCreds != null) {
        jobConf.getCredentials().addAll(currentUserCreds);
    }
    isPartitioned = (boolean) in.readObject();
    isDynamicPartition = (boolean) in.readObject();
    overwrite = (boolean) in.readObject();
    rowTypeInfo = (RowTypeInfo) in.readObject();
    hiveTablePartition = (HiveTablePartition) in.readObject();
    partitionCols = (List<String>) in.readObject();
    dbName = (String) in.readObject();
    tableName = (String) in.readObject();
    partitionToWriter = new HashMap<>();
    tblProperties = (Properties) in.readObject();
}

From source file:org.apache.flink.mesos.runtime.clusterframework.MesosApplicationMasterRunner.java

License:Apache License

/**
 * The instance entry point for the Mesos AppMaster. Obtains user group
 * information and calls the main work method {@link #runPrivileged()} as a
 * privileged action.// w w  w.j  ava 2 s  . c  om
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final UserGroupInformation currentUser;
        try {
            currentUser = UserGroupInformation.getCurrentUser();
        } catch (Throwable t) {
            throw new Exception("Cannot access UserGroupInformation information for current user", t);
        }

        LOG.info("Running Flink as user {}", currentUser.getShortUserName());

        // run the actual work in a secured privileged action
        return currentUser.doAs(new PrivilegedAction<Integer>() {
            @Override
            public Integer run() {
                return runPrivileged();
            }
        });
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("Mesos AppMaster initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.flink.mesos.runtime.clusterframework.MesosTaskManagerRunner.java

License:Apache License

public static void runTaskManager(String[] args, final Class<? extends TaskManager> taskManager)
        throws IOException {
    EnvironmentInformation.logEnvironmentInfo(LOG, taskManager.getSimpleName(), args);
    org.apache.flink.runtime.util.SignalHandler.register(LOG);

    // try to parse the command line arguments
    final Configuration configuration;
    try {/* w ww .  j  av  a2  s .c  o m*/
        configuration = TaskManager.parseArgsAndLoadConfig(args);

        // add dynamic properties to TaskManager configuration.
        final Configuration dynamicProperties = FlinkMesosSessionCli
                .decodeDynamicProperties(ENV.get(MesosConfigKeys.ENV_DYNAMIC_PROPERTIES));
        LOG.debug("Mesos dynamic properties: {}", dynamicProperties);
        configuration.addAll(dynamicProperties);
    } catch (Throwable t) {
        LOG.error("Failed to load the TaskManager configuration and dynamic properties.", t);
        System.exit(TaskManager.STARTUP_FAILURE_RETURN_CODE());
        return;
    }

    // read the environment variables
    final Map<String, String> envs = System.getenv();
    final String effectiveUsername = envs.get(MesosConfigKeys.ENV_CLIENT_USERNAME);
    final String tmpDirs = envs.get(MesosConfigKeys.ENV_FLINK_TMP_DIR);

    // configure local directory
    String flinkTempDirs = configuration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null);
    if (flinkTempDirs != null) {
        LOG.info(
                "Overriding Mesos temporary file directories with those " + "specified in the Flink config: {}",
                flinkTempDirs);
    } else if (tmpDirs != null) {
        LOG.info("Setting directories for temporary files to: {}", tmpDirs);
        configuration.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, tmpDirs);
    }

    LOG.info("Mesos task runs as '{}', setting user to execute Flink TaskManager to '{}'",
            UserGroupInformation.getCurrentUser().getShortUserName(), effectiveUsername);

    // tell akka to die in case of an error
    configuration.setBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true);

    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(effectiveUsername);
    for (Token<? extends TokenIdentifier> toks : UserGroupInformation.getCurrentUser().getTokens()) {
        ugi.addToken(toks);
    }

    // Infer the resource identifier from the environment variable
    String containerID = Preconditions.checkNotNull(envs.get(MesosConfigKeys.ENV_FLINK_CONTAINER_ID));
    final ResourceID resourceId = new ResourceID(containerID);
    LOG.info("ResourceID assigned for this container: {}", resourceId);

    ugi.doAs(new PrivilegedAction<Object>() {
        @Override
        public Object run() {
            try {
                TaskManager.selectNetworkInterfaceAndRunTaskManager(configuration, resourceId, taskManager);
            } catch (Throwable t) {
                LOG.error("Error while starting the TaskManager", t);
                System.exit(TaskManager.STARTUP_FAILURE_RETURN_CODE());
            }
            return null;
        }
    });
}

From source file:org.apache.flink.runtime.security.SecurityUtils.java

License:Apache License

public static <T> T runSecured(final FlinkSecuredRunner<T> runner) throws Exception {
    UserGroupInformation.setConfiguration(hdConf);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    if (!ugi.hasKerberosCredentials()) {
        LOG.error("Security is enabled but no Kerberos credentials have been found. "
                + "You may authenticate using the kinit command.");
    }//from w  w w.  j  a  v a 2s. co  m
    return ugi.doAs(new PrivilegedExceptionAction<T>() {
        @Override
        public T run() throws Exception {
            return runner.run();
        }
    });
}

From source file:org.apache.flink.runtime.util.EnvironmentInformation.java

License:Apache License

public static String getUserRunning() {
    try {//from ww  w .  j a  v  a2  s.  c  om
        return UserGroupInformation.getCurrentUser().getShortUserName();
    } catch (Throwable t) {
        if (LOG.isDebugEnabled() && !(t instanceof ClassNotFoundException)) {
            LOG.debug("Cannot determine user/group information using Hadoop utils.", t);
        }
    }

    String user = System.getProperty("user.name");
    if (user == null) {
        user = UNKNOWN;
        if (LOG.isDebugEnabled()) {
            LOG.debug("Cannot determine user/group information for the current user.");
        }
    }
    return user;
}

From source file:org.apache.flink.runtime.util.HadoopUtils.java

License:Apache License

/**
 * Indicates whether the current user has an HDFS delegation token.
 *///  w w  w .ja v a 2s  .c  om
public static boolean hasHDFSDelegationToken() throws Exception {
    UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
    Collection<Token<? extends TokenIdentifier>> usrTok = loginUser.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        if (token.getKind().equals(HDFS_DELEGATION_TOKEN_KIND)) {
            return true;
        }
    }
    return false;
}

From source file:org.apache.flink.yarn.AbstractYarnClusterDescriptor.java

License:Apache License

@Override
public YarnClusterClient deploy() {
    try {//from   w  w  w. ja v a  2 s  . co m
        if (UserGroupInformation.isSecurityEnabled()) {
            // note: UGI::hasKerberosCredentials inaccurately reports false
            // for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
            // so we check only in ticket cache scenario.
            boolean useTicketCache = flinkConfiguration
                    .getBoolean(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE);

            UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
            if (loginUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS
                    && useTicketCache && !loginUser.hasKerberosCredentials()) {
                LOG.error(
                        "Hadoop security with Kerberos is enabled but the login user does not have Kerberos credentials");
                throw new RuntimeException("Hadoop security with Kerberos is enabled but the login user "
                        + "does not have Kerberos credentials");
            }
        }
        return deployInternal();
    } catch (Exception e) {
        throw new RuntimeException("Couldn't deploy Yarn cluster", e);
    }
}

From source file:org.apache.flink.yarn.AbstractYarnClusterDescriptor.java

License:Apache License

public ApplicationReport startAppMaster(JobGraph jobGraph, YarnClient yarnClient,
        YarnClientApplication yarnApplication) throws Exception {

    // ------------------ Set default file system scheme -------------------------

    try {// w w w  .  ja v a2 s  .c om
        org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration);
    } catch (IOException e) {
        throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e);
    }

    // initialize file system
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    final FileSystem fs = FileSystem.get(conf);

    // hard coded check for the GoogleHDFS client because its not overriding the getScheme() method.
    if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values."
                + "The Flink YARN client needs to store its files in a distributed file system");
    }

    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
    Set<File> effectiveShipFiles = new HashSet<>(shipFiles.size());
    for (File file : shipFiles) {
        effectiveShipFiles.add(file.getAbsoluteFile());
    }

    //check if there is a logback or log4j file
    File logbackFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOGBACK_NAME);
    final boolean hasLogback = logbackFile.exists();
    if (hasLogback) {
        effectiveShipFiles.add(logbackFile);
    }

    File log4jFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOG4J_NAME);
    final boolean hasLog4j = log4jFile.exists();
    if (hasLog4j) {
        effectiveShipFiles.add(log4jFile);
        if (hasLogback) {
            // this means there is already a logback configuration file --> fail
            LOG.warn("The configuration directory ('" + configurationDirectory + "') contains both LOG4J and "
                    + "Logback configuration files. Please delete or rename one of them.");
        }
    }

    addLibFolderToShipFiles(effectiveShipFiles);

    // add the user jar to the classpath of the to-be-created cluster
    if (userJarFiles != null) {
        effectiveShipFiles.addAll(userJarFiles);
    }

    // Set-up ApplicationSubmissionContext for the application

    final ApplicationId appId = appContext.getApplicationId();

    // ------------------ Add Zookeeper namespace to local flinkConfiguraton ------
    String zkNamespace = getZookeeperNamespace();
    // no user specified cli argument for namespace?
    if (zkNamespace == null || zkNamespace.isEmpty()) {
        // namespace defined in config? else use applicationId as default.
        zkNamespace = flinkConfiguration.getString(HighAvailabilityOptions.HA_CLUSTER_ID,
                String.valueOf(appId));
        setZookeeperNamespace(zkNamespace);
    }

    flinkConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace);

    if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfiguration)) {
        // activate re-execution of failed applications
        appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS,
                YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));

        activateHighAvailabilitySupport(appContext);
    } else {
        // set number of application retries to 1 in the default case
        appContext
                .setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1));
    }

    // local resource map for Yarn
    final Map<String, LocalResource> localResources = new HashMap<>(2 + effectiveShipFiles.size());
    // list of remote paths (after upload)
    final List<Path> paths = new ArrayList<>(2 + effectiveShipFiles.size());
    // classpath assembler
    final StringBuilder classPathBuilder = new StringBuilder();
    // ship list that enables reuse of resources for task manager containers
    StringBuilder envShipFileList = new StringBuilder();

    // upload and register ship files
    for (File shipFile : effectiveShipFiles) {
        LocalResource shipResources = Records.newRecord(LocalResource.class);

        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        Path remotePath = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());

        paths.add(remotePath);

        localResources.put(shipFile.getName(), shipResources);

        if (shipFile.isDirectory()) {
            // add directories to the classpath
            java.nio.file.Path shipPath = shipFile.toPath();
            final java.nio.file.Path parentPath = shipPath.getParent();

            Files.walkFileTree(shipPath, new SimpleFileVisitor<java.nio.file.Path>() {
                @Override
                public FileVisitResult preVisitDirectory(java.nio.file.Path dir, BasicFileAttributes attrs)
                        throws IOException {
                    super.preVisitDirectory(dir, attrs);

                    java.nio.file.Path relativePath = parentPath.relativize(dir);

                    classPathBuilder.append(relativePath).append(File.separator).append("*")
                            .append(File.pathSeparator);

                    return FileVisitResult.CONTINUE;
                }
            });
        } else {
            // add files to the classpath
            classPathBuilder.append(shipFile.getName()).append(File.pathSeparator);
        }

        envShipFileList.append(remotePath).append(",");
    }

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf,
            fs.getHomeDirectory());
    localResources.put("flink.jar", appMasterJar);
    localResources.put("flink-conf.yaml", flinkConf);

    paths.add(remotePathJar);
    classPathBuilder.append("flink.jar").append(File.pathSeparator);
    paths.add(remotePathConf);
    classPathBuilder.append("flink-conf.yaml").append(File.pathSeparator);

    // write job graph to tmp file and add it to local resource
    // TODO: server use user main method to generate job graph
    if (jobGraph != null) {
        try {
            File fp = File.createTempFile(appId.toString(), null);
            fp.deleteOnExit();
            try (FileOutputStream output = new FileOutputStream(fp);
                    ObjectOutputStream obOutput = new ObjectOutputStream(output);) {
                obOutput.writeObject(jobGraph);
            }
            LocalResource jobgraph = Records.newRecord(LocalResource.class);
            Path remoteJobGraph = Utils.setupLocalResource(fs, appId.toString(), new Path(fp.toURI()), jobgraph,
                    fs.getHomeDirectory());
            localResources.put("job.graph", jobgraph);
            paths.add(remoteJobGraph);
            classPathBuilder.append("job.graph").append(File.pathSeparator);
        } catch (Exception e) {
            LOG.warn("Add job graph to local resource fail");
            throw e;
        }
    }

    sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/");

    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    fs.setPermission(sessionFilesDir, permission); // set permission for path.

    //To support Yarn Secure Integration Test Scenario
    //In Integration test setup, the Yarn containers created by YarnMiniCluster does not have the Yarn site XML
    //and KRB5 configuration files. We are adding these files as container local resources for the container
    //applications (JM/TMs) to have proper secure cluster setup
    Path remoteKrb5Path = null;
    Path remoteYarnSiteXmlPath = null;
    boolean hasKrb5 = false;
    if (System.getenv("IN_TESTS") != null) {
        String krb5Config = System.getProperty("java.security.krb5.conf");
        if (krb5Config != null && krb5Config.length() != 0) {
            File krb5 = new File(krb5Config);
            LOG.info("Adding KRB5 configuration {} to the AM container local resource bucket",
                    krb5.getAbsolutePath());
            LocalResource krb5ConfResource = Records.newRecord(LocalResource.class);
            Path krb5ConfPath = new Path(krb5.getAbsolutePath());
            remoteKrb5Path = Utils.setupLocalResource(fs, appId.toString(), krb5ConfPath, krb5ConfResource,
                    fs.getHomeDirectory());
            localResources.put(Utils.KRB5_FILE_NAME, krb5ConfResource);

            File f = new File(System.getenv("YARN_CONF_DIR"), Utils.YARN_SITE_FILE_NAME);
            LOG.info("Adding Yarn configuration {} to the AM container local resource bucket",
                    f.getAbsolutePath());
            LocalResource yarnConfResource = Records.newRecord(LocalResource.class);
            Path yarnSitePath = new Path(f.getAbsolutePath());
            remoteYarnSiteXmlPath = Utils.setupLocalResource(fs, appId.toString(), yarnSitePath,
                    yarnConfResource, fs.getHomeDirectory());
            localResources.put(Utils.YARN_SITE_FILE_NAME, yarnConfResource);

            hasKrb5 = true;
        }
    }

    // setup security tokens
    LocalResource keytabResource = null;
    Path remotePathKeytab = null;
    String keytab = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_KEYTAB);
    if (keytab != null) {
        LOG.info("Adding keytab {} to the AM container local resource bucket", keytab);
        keytabResource = Records.newRecord(LocalResource.class);
        Path keytabPath = new Path(keytab);
        remotePathKeytab = Utils.setupLocalResource(fs, appId.toString(), keytabPath, keytabResource,
                fs.getHomeDirectory());
        localResources.put(Utils.KEYTAB_FILE_NAME, keytabResource);
    }

    final ContainerLaunchContext amContainer = setupApplicationMasterContainer(hasLogback, hasLog4j, hasKrb5);

    if (UserGroupInformation.isSecurityEnabled() && keytab == null) {
        //set tokens only when keytab is not provided
        LOG.info("Adding delegation token to the AM container..");
        Utils.setTokensFor(amContainer, paths, conf);
    }

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH and environment variables for ApplicationMaster
    final Map<String, String> appMasterEnv = new HashMap<>();
    // set user specified app master environment variables
    appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX,
            flinkConfiguration));
    // set Flink app class path
    appMasterEnv.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, classPathBuilder.toString());

    // set Flink on YARN internal configuration values
    appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb));
    appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots));
    appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached));
    appMasterEnv.put(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE, getZookeeperNamespace());

    // https://github.com/apache/hadoop/blob/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md#identity-on-an-insecure-cluster-hadoop_user_name
    appMasterEnv.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    if (keytabResource != null) {
        appMasterEnv.put(YarnConfigKeys.KEYTAB_PATH, remotePathKeytab.toString());
        String principal = flinkConfiguration.getString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL);
        appMasterEnv.put(YarnConfigKeys.KEYTAB_PRINCIPAL, principal);
    }

    //To support Yarn Secure Integration Test Scenario
    if (remoteYarnSiteXmlPath != null && remoteKrb5Path != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_YARN_SITE_XML_PATH, remoteYarnSiteXmlPath.toString());
        appMasterEnv.put(YarnConfigKeys.ENV_KRB5_PATH, remoteKrb5Path.toString());
    }

    if (dynamicPropertiesEncoded != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded);
    }

    // set classpath from YARN configuration
    Utils.setupYarnClassPath(conf, appMasterEnv);

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jobManagerMemoryMb);
    capability.setVirtualCores(1);

    String name;
    if (customName == null) {
        name = "Flink session with " + taskManagerCount + " TaskManagers";
        if (detached) {
            name += " (detached)";
        }
    } else {
        name = customName;
    }

    appContext.setApplicationName(name);
    appContext.setApplicationType("Apache Flink");
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    if (yarnQueue != null) {
        appContext.setQueue(yarnQueue);
    }

    setApplicationTags(appContext);

    // add a hook to clean up in case deployment fails
    Thread deploymentFailureHook = new DeploymentFailureHook(yarnClient, yarnApplication);
    Runtime.getRuntime().addShutdownHook(deploymentFailureHook);
    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for the cluster to be allocated");
    final long startTime = System.currentTimeMillis();
    ApplicationReport report;
    YarnApplicationState lastAppState = YarnApplicationState.NEW;
    loop: while (true) {
        try {
            report = yarnClient.getApplicationReport(appId);
        } catch (IOException e) {
            throw new YarnDeploymentException("Failed to deploy the cluster.", e);
        }
        YarnApplicationState appState = report.getYarnApplicationState();
        LOG.debug("Application State: {}", appState);
        switch (appState) {
        case FAILED:
        case FINISHED: //TODO: the finished state may be valid in flip-6
        case KILLED:
            throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState
                    + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n"
                    + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n"
                    + "yarn logs -applicationId " + appId);
            //break ..
        case RUNNING:
            LOG.info("YARN application has been deployed successfully.");
            break loop;
        default:
            if (appState != lastAppState) {
                LOG.info("Deploying cluster, current state " + appState);
            }
            if (System.currentTimeMillis() - startTime > 60000) {
                LOG.info(
                        "Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster");
            }

        }
        lastAppState = appState;
        Thread.sleep(250);
    }
    // print the application id for user to cancel themselves.
    if (isDetachedMode()) {
        LOG.info("The Flink YARN client has been started in detached mode. In order to stop "
                + "Flink on YARN, use the following command or a YARN web interface to stop "
                + "it:\nyarn application -kill " + appId + "\nPlease also note that the "
                + "temporary files of the YARN session in the home directoy will not be removed.");
    }
    // since deployment was successful, remove the hook
    try {
        Runtime.getRuntime().removeShutdownHook(deploymentFailureHook);
    } catch (IllegalStateException e) {
        // we're already in the shut down hook.
    }
    return report;
}