Example usage for org.apache.hadoop.security UserGroupInformation getCredentials

List of usage examples for org.apache.hadoop.security UserGroupInformation getCredentials

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCredentials.

Prototype

public Credentials getCredentials() 

Source Link

Document

Obtain the tokens in credentials form associated with this user.

Usage

From source file:org.apache.flink.yarn.YarnApplicationMasterRunner.java

License:Apache License

/**
 * Creates the launch context, which describes how to bring up a TaskManager process in
 * an allocated YARN container./*ww w .ja v a  2  s  .  c o m*/
 * 
 * <p>This code is extremely YARN specific and registers all the resources that the TaskManager
 * needs (such as JAR file, config file, ...) and all environment variables in a YARN
 * container launch context. The launch context then ensures that those resources will be
 * copied into the containers transient working directory. 
 * 
 * <p>We do this work before we start the ResourceManager actor in order to fail early if
 * any of the operations here fail.
 * 
 * @param flinkConfig
 *         The Flink configuration object.
 * @param yarnConfig
 *         The YARN configuration object.
 * @param env
 *         The environment variables.
 * @param tmParams
 *         The TaskManager container memory parameters. 
 * @param taskManagerConfig
 *         The configuration for the TaskManagers.
 * @param workingDirectory
 *         The current application master container's working directory. 
 * @param taskManagerMainClass
 *         The class with the main method.
 * @param log
 *         The logger.
 * 
 * @return The launch context for the TaskManager processes.
 * 
 * @throws Exception Thrown if teh launch context could not be created, for example if
 *                   the resources could not be copied.
 */
public static ContainerLaunchContext createTaskManagerContext(Configuration flinkConfig,
        YarnConfiguration yarnConfig, Map<String, String> env, ContaineredTaskManagerParameters tmParams,
        Configuration taskManagerConfig, String workingDirectory, Class<?> taskManagerMainClass, Logger log)
        throws Exception {

    log.info("Setting up resources for TaskManagers");

    // get and validate all relevant variables

    String remoteFlinkJarPath = env.get(YarnConfigKeys.FLINK_JAR_PATH);
    require(remoteFlinkJarPath != null, "Environment variable %s not set", YarnConfigKeys.FLINK_JAR_PATH);

    String appId = env.get(YarnConfigKeys.ENV_APP_ID);
    require(appId != null, "Environment variable %s not set", YarnConfigKeys.ENV_APP_ID);

    String clientHomeDir = env.get(YarnConfigKeys.ENV_CLIENT_HOME_DIR);
    require(clientHomeDir != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_HOME_DIR);

    String shipListString = env.get(YarnConfigKeys.ENV_CLIENT_SHIP_FILES);
    require(shipListString != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_SHIP_FILES);

    String yarnClientUsername = env.get(YarnConfigKeys.ENV_CLIENT_USERNAME);
    require(yarnClientUsername != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_USERNAME);

    // obtain a handle to the file system used by YARN
    final org.apache.hadoop.fs.FileSystem yarnFileSystem;
    try {
        yarnFileSystem = org.apache.hadoop.fs.FileSystem.get(yarnConfig);
    } catch (IOException e) {
        throw new Exception("Could not access YARN's default file system", e);
    }

    // register Flink Jar with remote HDFS
    LocalResource flinkJar = Records.newRecord(LocalResource.class);
    {
        Path remoteJarPath = new Path(remoteFlinkJarPath);
        Utils.registerLocalResource(yarnFileSystem, remoteJarPath, flinkJar);
    }

    // register conf with local fs
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    {
        // write the TaskManager configuration to a local file
        final File taskManagerConfigFile = new File(workingDirectory,
                UUID.randomUUID() + "-taskmanager-conf.yaml");
        LOG.debug("Writing TaskManager configuration to {}", taskManagerConfigFile.getAbsolutePath());
        BootstrapTools.writeConfiguration(taskManagerConfig, taskManagerConfigFile);

        Utils.setupLocalResource(yarnFileSystem, appId, new Path(taskManagerConfigFile.toURI()), flinkConf,
                new Path(clientHomeDir));

        log.info("Prepared local resource for modified yaml: {}", flinkConf);
    }

    Map<String, LocalResource> taskManagerLocalResources = new HashMap<>();
    taskManagerLocalResources.put("flink.jar", flinkJar);
    taskManagerLocalResources.put("flink-conf.yaml", flinkConf);

    // prepare additional files to be shipped
    for (String pathStr : shipListString.split(",")) {
        if (!pathStr.isEmpty()) {
            LocalResource resource = Records.newRecord(LocalResource.class);
            Path path = new Path(pathStr);
            Utils.registerLocalResource(yarnFileSystem, path, resource);
            taskManagerLocalResources.put(path.getName(), resource);
        }
    }

    // now that all resources are prepared, we can create the launch context

    log.info("Creating container launch context for TaskManagers");

    boolean hasLogback = new File(workingDirectory, "logback.xml").exists();
    boolean hasLog4j = new File(workingDirectory, "log4j.properties").exists();

    String launchCommand = BootstrapTools.getTaskManagerShellCommand(flinkConfig, tmParams, ".",
            ApplicationConstants.LOG_DIR_EXPANSION_VAR, hasLogback, hasLog4j, taskManagerMainClass);

    log.info("Starting TaskManagers with command: " + launchCommand);

    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    ctx.setCommands(Collections.singletonList(launchCommand));
    ctx.setLocalResources(taskManagerLocalResources);

    Map<String, String> containerEnv = new HashMap<>();
    containerEnv.putAll(tmParams.taskManagerEnv());

    // add YARN classpath, etc to the container environment
    Utils.setupEnv(yarnConfig, containerEnv);
    containerEnv.put(YarnConfigKeys.ENV_CLIENT_USERNAME, yarnClientUsername);

    ctx.setEnvironment(containerEnv);

    try {
        UserGroupInformation user = UserGroupInformation.getCurrentUser();
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        ctx.setTokens(securityTokens);
    } catch (Throwable t) {
        log.error("Getting current user info failed when trying to launch the container", t);
    }

    return ctx;
}

From source file:org.apache.gobblin.util.hadoop.TokenUtils.java

License:Apache License

/**
 * Get Hadoop tokens (tokens for job history server, job tracker, hive and HDFS) using Kerberos keytab,
 * on behalf on a proxy user, embed tokens into a {@link UserGroupInformation} as returned result, persist in-memory
 * credentials if tokenFile specified//from ww w . j a  v  a  2  s .  c om
 *
 * Note that when a super-user is fetching tokens for other users,
 * {@link #fetchHcatToken(String, HiveConf, String, IMetaStoreClient)} getDelegationToken} explicitly
 * contains a string parameter indicating proxy user, while other hadoop services require impersonation first.
 *
 * @param state A {@link State} object that should contain properties.
 * @param tokenFile If present, the file will store materialized credentials.
 * @param ugi The {@link UserGroupInformation} that used to impersonate into the proxy user by a "doAs block".
 * @param targetUser The user to be impersonated as, for fetching hadoop tokens.
 * @return A {@link UserGroupInformation} containing negotiated credentials.
 */
public static UserGroupInformation getHadoopAndHiveTokensForProxyUser(final State state,
        Optional<File> tokenFile, UserGroupInformation ugi, IMetaStoreClient client, String targetUser)
        throws IOException, InterruptedException {
    final Credentials cred = new Credentials();
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            getHadoopTokens(state, Optional.absent(), cred);
            return null;
        }
    });

    ugi.getCredentials().addAll(cred);
    // Will add hive tokens into ugi in this method.
    getHiveToken(state, client, cred, targetUser, ugi);

    if (tokenFile.isPresent()) {
        persistTokens(cred, tokenFile.get());
    }
    // at this point, tokens in ugi can be more than that in Credential object,
    // since hive token is not put in Credential object.
    return ugi;
}

From source file:org.apache.hoya.yarn.appmaster.HoyaAppMaster.java

License:Apache License

/**
 * Create and run the cluster.//from w w w. j  a  va 2s .co  m
 * @return exit code
 * @throws Throwable on a failure
 */
private int createAndRunCluster(String clustername) throws Throwable {
    HoyaVersionInfo.loadAndPrintVersionInfo(log);

    //load the cluster description from the cd argument
    String hoyaClusterDir = serviceArgs.getHoyaClusterURI();
    URI hoyaClusterURI = new URI(hoyaClusterDir);
    Path clusterDirPath = new Path(hoyaClusterURI);
    HoyaFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);

    log.info("Deploying cluster {}:", instanceDefinition);

    //REVISIT: why is this done?
    appState.updateInstanceDefinition(instanceDefinition);
    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
        log.error("Bad conf dir {}", confDir);
        File parentFile = confDir.getParentFile();
        log.error("Parent dir {}:\n{}", parentFile, HoyaUtils.listDir(parentFile));
        throw new BadCommandArgumentsException("Configuration directory %s doesn't exist", confDir);
    }

    Configuration serviceConf = getConfig();
    // Try to get the proper filtering of static resources through the yarn proxy working
    serviceConf.set("hadoop.http.filter.initializers",
            "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");

    conf = new YarnConfiguration(serviceConf);
    //get our provider
    MapOperations globalOptions = instanceDefinition.getInternalOperations().getGlobalOptions();
    String providerType = globalOptions.getMandatoryOption(OptionKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    HoyaProviderFactory factory = HoyaProviderFactory.createHoyaProviderFactory(providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    providerService.init(getConfig());
    addService(providerService);

    InetSocketAddress address = HoyaUtils.getRmSchedulerAddress(conf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(conf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
            HoyaUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name()));
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("Hoya AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    Credentials credentials = currentUser.getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    dob.close();
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        log.info("Token {}", token.getKind());
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // set up secret manager
    secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
        hoyaUsername = System.getenv(HADOOP_USER_NAME);
        log.info(HADOOP_USER_NAME + "='{}'", hoyaUsername);
    }

    Map<String, String> envVars;

    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
        int heartbeatInterval = HEARTBEAT_INTERVAL;

        //add the RM client -this brings the callbacks in
        asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this);
        addService(asyncRMClient);
        //wrap it for the app state model
        rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient);
        //now bring it up
        runChildService(asyncRMClient);

        //nmclient relays callbacks back to this class
        nmClientAsync = new NMClientAsyncImpl("nmclient", this);
        runChildService(nmClientAsync);

        //bring up the Hoya RPC service
        startHoyaRPCServer();

        InetSocketAddress rpcServiceAddr = rpcService.getConnectAddress();
        appMasterHostname = rpcServiceAddr.getHostName();
        appMasterRpcPort = rpcServiceAddr.getPort();
        appMasterTrackingUrl = null;
        log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
        appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
        appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

        //build the role map
        List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles());
        providerRoles.addAll(HoyaAMClientProvider.ROLES);

        // Start up the WebApp and track the URL for it
        webApp = new HoyaAMWebApp();
        WebApps.$for("hoyaam", WebAppApi.class, new WebAppApiImpl(this, appState, providerService), "ws")
                .with(serviceConf).start(webApp);
        appMasterTrackingUrl = "http://" + appMasterHostname + ":" + webApp.port();
        WebAppService<HoyaAMWebApp> webAppService = new WebAppService<HoyaAMWebApp>("hoya", webApp);

        webAppService.init(conf);
        webAppService.start();
        addService(webAppService);

        appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
        appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());

        // Register self with ResourceManager
        // This will start heartbeating to the RM
        // address = HoyaUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
        log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl);
        RegisterApplicationMasterResponse response = asyncRMClient.registerApplicationMaster(appMasterHostname,
                appMasterRpcPort, appMasterTrackingUrl);
        Resource maxResources = response.getMaximumResourceCapability();
        containerMaxMemory = maxResources.getMemory();
        containerMaxCores = maxResources.getVirtualCores();
        appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores());
        // set the RM-defined maximum cluster values
        appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
        appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));

        boolean securityEnabled = UserGroupInformation.isSecurityEnabled();
        if (securityEnabled) {
            secretManager.setMasterKey(response.getClientToAMTokenMasterKey().array());
            applicationACLs = response.getApplicationACLs();

            //tell the server what the ACLs are 
            rpcService.getServer().refreshServiceAcl(conf, new HoyaAMPolicyProvider());
        }

        // extract container list
        List<Container> liveContainers = AMRestartSupport.retrieveContainersFromPreviousAttempt(response);
        String amRestartSupported = Boolean.toString(liveContainers != null);
        appInformation.put(StatusKeys.INFO_AM_RESTART_SUPPORTED, amRestartSupported);

        //now validate the installation
        Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir);

        providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled);

        //determine the location for the role history data
        Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

        //build the instance
        appState.buildInstance(instanceDefinition, providerConf, providerRoles, fs.getFileSystem(), historyDir,
                liveContainers, appInformation);

        // add the AM to the list of nodes in the cluster

        appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(),
                appMasterHostname + ":" + webApp.port());

        // build up environment variables that the AM wants set in every container
        // irrespective of provider and role.
        envVars = new HashMap<String, String>();
        if (hoyaUsername != null) {
            envVars.put(HADOOP_USER_NAME, hoyaUsername);
        }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalOptions.getMandatoryOption(OptionKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);

    //launcher service
    launchService = new RoleLaunchService(this, providerService, fs, new Path(getGeneratedConfDir()), envVars,
            launcherTmpDirPath);

    runChildService(launchService);

    appState.noteAMLaunched();

    //Give the provider restricted access to the state
    providerService.bind(appState);

    // launch the provider; this is expected to trigger a callback that
    // brings up the service
    launchProviderService(instanceDefinition, confDir);

    try {
        //now block waiting to be told to exit the process
        waitForAMCompletionSignal();
        //shutdown time
    } finally {
        finish();
    }

    return amExitCode;
}

From source file:org.apache.reef.runtime.yarn.client.unmanaged.YarnProxyUser.java

License:Apache License

/**
 * Set YARN user. This method can be called only once per class instance.
 * @param name Name of the new proxy user.
 * @param hostUser User credentials to copy. Must be an instance of YarnProxyUser.
 *///from  w ww  . jav  a2 s .c o  m
@Override
public void set(final String name, final UserCredentials hostUser) throws IOException {

    assert this.proxyUGI == null;
    assert hostUser instanceof YarnProxyUser;

    LOG.log(Level.FINE, "UGI: user {0} copy from: {1}", new Object[] { name, hostUser });

    final UserGroupInformation hostUGI = ((YarnProxyUser) hostUser).get();
    final Collection<Token<? extends TokenIdentifier>> tokens = hostUGI.getCredentials().getAllTokens();

    this.set(name, hostUGI, tokens.toArray(new Token[tokens.size()]));
}

From source file:org.apache.reef.runtime.yarn.client.unmanaged.YarnProxyUser.java

License:Apache License

private static String ugiToString(final String prefix, final UserGroupInformation ugi) {
    return String.format("UGI: { %s user: %s tokens: %s }", prefix, ugi, ugi.getCredentials().getAllTokens());
}

From source file:org.apache.reef.runtime.yarn.client.UserCredentialSecurityTokenProvider.java

License:Apache License

@Override
public byte[] getTokens() {
    try {//from   ww  w. j ava  2 s . co  m
        final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        final Credentials credentials = ugi.getCredentials();
        if (credentials.numberOfTokens() > 0) {
            try (final DataOutputBuffer dob = new DataOutputBuffer()) {
                credentials.writeTokenStorageToStream(dob);
                return dob.getData();
            }
        }
    } catch (IOException e) {
        LOG.log(Level.WARNING, "Could not access tokens in user credentials.", e);
    }

    LOG.log(Level.FINE, "No security token found.");
    return null;
}

From source file:org.apache.reef.runtime.yarn.driver.unmanaged.UnmanagedAmTest.java

License:Apache License

private static ByteBuffer getTokens() throws IOException {

    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    final Credentials credentials = ugi.getCredentials();

    try (final DataOutputBuffer dob = new DataOutputBuffer()) {
        credentials.writeTokenStorageToStream(dob);
        return ByteBuffer.wrap(dob.getData());
    }//from  w  w  w  .  j av a2 s .  c om
}

From source file:org.apache.slider.client.TokensOperation.java

License:Apache License

public int actionTokens(ActionTokensArgs args, FileSystem fs, Configuration conf, YarnClientImpl yarnClient)
        throws IOException, YarnException {
    Credentials credentials;/*from  w  ww  .j a  v  a  2s . co m*/
    String footnote = "";
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    boolean isSecure = UserGroupInformation.isSecurityEnabled();
    if (args.keytab != null) {
        File keytab = args.keytab;
        if (!keytab.isFile()) {
            throw new NotFoundException(E_NO_KEYTAB + keytab.getAbsolutePath());
        }
        String principal = args.principal;
        log.info("Logging in as {} from keytab {}", principal, keytab);
        user = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getCanonicalPath());
    }
    Credentials userCredentials = user.getCredentials();
    File output = args.output;
    if (output != null) {
        if (!isSecure) {
            throw new BadClusterStateException(E_INSECURE);
        }
        credentials = new Credentials(userCredentials);
        // filesystem
        addRMRenewableFSDelegationTokens(conf, fs, credentials);
        addRMDelegationToken(yarnClient, credentials);
        if (maybeAddTimelineToken(conf, credentials) != null) {
            log.debug("Added timeline token");
        }
        saveTokens(output, credentials);
        String filename = output.getCanonicalPath();
        footnote = String.format(
                "%d tokens saved to %s\n" + "To use these in the environment:\n" + "export %s=%s",
                credentials.numberOfTokens(), filename, UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,
                filename);
    } else if (args.source != null) {
        File source = args.source;
        log.info("Reading credentials from file {}", source);
        if (!source.isFile()) {
            throw new NotFoundException(E_MISSING_SOURCE_FILE + source.getAbsolutePath());
        }
        credentials = Credentials.readTokenStorageFile(args.source, conf);
    } else {
        StringBuffer origin = new StringBuffer();
        File file = locateEnvCredentials(System.getenv(), conf, origin);
        if (file != null) {
            log.info("Credential Source {}", origin);
        } else {
            log.info("Credential source: logged in user");
        }
        credentials = userCredentials;
    }
    // list the tokens
    log.info("\n{}", dumpTokens(credentials, "\n"));
    if (!footnote.isEmpty()) {
        log.info(footnote);
    }
    return 0;
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * Create and run the cluster.//from   w  w w  .j  a v  a  2 s  .co m
 * @return exit code
 * @throws Throwable on a failure
 */
private int createAndRunCluster(String clustername) throws Throwable {

    //load the cluster description from the cd argument
    String sliderClusterDir = serviceArgs.getSliderClusterURI();
    URI sliderClusterURI = new URI(sliderClusterDir);
    Path clusterDirPath = new Path(sliderClusterURI);
    log.info("Application defined at {}", sliderClusterURI);
    SliderFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);
    instanceDefinition.setName(clustername);

    log.info("Deploying cluster {}:", instanceDefinition);

    stateForProviders.setApplicationName(clustername);

    Configuration serviceConf = getConfig();

    SecurityConfiguration securityConfiguration = new SecurityConfiguration(serviceConf, instanceDefinition,
            clustername);
    // obtain security state
    boolean securityEnabled = securityConfiguration.isSecurityEnabled();
    // set the global security flag for the instance definition
    instanceDefinition.getAppConfOperations().set(KEY_SECURITY_ENABLED, securityEnabled);

    // triggers resolution and snapshotting in agent
    appState.updateInstanceDefinition(instanceDefinition);

    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
        log.info("Conf dir {} does not exist.", confDir);
        File parentFile = confDir.getParentFile();
        log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile));
    }

    // IP filtering
    serviceConf.set(HADOOP_HTTP_FILTER_INITIALIZERS, AM_FILTER_NAME);

    //get our provider
    MapOperations globalInternalOptions = getGlobalInternalOptions();
    String providerType = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    initAndAddService(providerService);
    providerRMOperationHandler = new ProviderNotifyingOperationHandler(providerService);

    // create a slider AM provider
    sliderAMProvider = new SliderAMProviderService();
    initAndAddService(sliderAMProvider);

    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(serviceConf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(serviceConf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
            SliderUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name()));
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());

    Map<String, String> envVars;
    List<Container> liveContainers;
    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
        int heartbeatInterval = HEARTBEAT_INTERVAL;

        //add the RM client -this brings the callbacks in
        asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this);
        addService(asyncRMClient);
        //now bring it up
        deployChildService(asyncRMClient);

        //nmclient relays callbacks back to this class
        nmClientAsync = new NMClientAsyncImpl("nmclient", this);
        deployChildService(nmClientAsync);

        // set up secret manager
        secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

        if (securityEnabled) {
            // fix up the ACLs if they are not set
            String acls = getConfig().get(SliderXmlConfKeys.KEY_PROTOCOL_ACL);
            if (acls == null) {
                getConfig().set(SliderXmlConfKeys.KEY_PROTOCOL_ACL, "*");
            }
        }
        //bring up the Slider RPC service
        startSliderRPCServer(instanceDefinition);

        rpcServiceAddress = rpcService.getConnectAddress();
        appMasterHostname = rpcServiceAddress.getHostName();
        appMasterRpcPort = rpcServiceAddress.getPort();
        appMasterTrackingUrl = null;
        log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
        appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
        appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

        log.info("Starting Yarn registry");
        registryOperations = startRegistryOperationsService();
        log.info(registryOperations.toString());

        //build the role map
        List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles());
        providerRoles.addAll(SliderAMClientProvider.ROLES);

        // Start up the WebApp and track the URL for it
        certificateManager = new CertificateManager();
        MapOperations component = instanceDefinition.getAppConfOperations()
                .getComponent(SliderKeys.COMPONENT_AM);
        certificateManager.initialize(component);
        certificateManager.setPassphrase(instanceDefinition.getPassphrase());

        if (component.getOptionBool(AgentKeys.KEY_AGENT_TWO_WAY_SSL_ENABLED, false)) {
            uploadServerCertForLocalization(clustername, fs);
        }

        startAgentWebApp(appInformation, serviceConf);

        int port = getPortToRequest(instanceDefinition);

        webApp = new SliderAMWebApp(registryOperations);
        WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class,
                new WebAppApiImpl(this, stateForProviders, providerService, certificateManager,
                        registryOperations),
                RestPaths.WS_CONTEXT).withHttpPolicy(serviceConf, HttpConfig.Policy.HTTP_ONLY).at(port)
                .start(webApp);
        String scheme = WebAppUtils.HTTP_PREFIX;
        appMasterTrackingUrl = scheme + appMasterHostname + ":" + webApp.port();
        WebAppService<SliderAMWebApp> webAppService = new WebAppService<SliderAMWebApp>("slider", webApp);

        webAppService.init(serviceConf);
        webAppService.start();
        addService(webAppService);

        appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
        appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());

        // Register self with ResourceManager
        // This will start heartbeating to the RM
        // address = SliderUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
        log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl);
        amRegistrationData = asyncRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort,
                appMasterTrackingUrl);
        Resource maxResources = amRegistrationData.getMaximumResourceCapability();
        containerMaxMemory = maxResources.getMemory();
        containerMaxCores = maxResources.getVirtualCores();
        appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores());

        // build the handler for RM request/release operations; this uses
        // the max value as part of its lookup
        rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maxResources);

        // set the RM-defined maximum cluster values
        appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
        appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));

        // process the initial user to obtain the set of user
        // supplied credentials (tokens were passed in by client). Remove AMRM
        // token and HDFS delegation token, the latter because we will provide an
        // up to date token for container launches (getContainerCredentials()).
        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
        Credentials credentials = currentUser.getCredentials();
        Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<? extends TokenIdentifier> token = iter.next();
            log.info("Token {}", token.getKind());
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)
                    || token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
                iter.remove();
            }
        }
        // at this point this credentials map is probably clear, but leaving this
        // code to allow for future tokens...
        containerCredentials = credentials;

        if (securityEnabled) {
            secretManager.setMasterKey(amRegistrationData.getClientToAMTokenMasterKey().array());
            applicationACLs = amRegistrationData.getApplicationACLs();

            //tell the server what the ACLs are
            rpcService.getServer().refreshServiceAcl(serviceConf, new SliderAMPolicyProvider());
            // perform keytab based login to establish kerberos authenticated
            // principal.  Can do so now since AM registration with RM above required
            // tokens associated to principal
            String principal = securityConfiguration.getPrincipal();
            File localKeytabFile = securityConfiguration.getKeytabFile(instanceDefinition);
            // Now log in...
            login(principal, localKeytabFile);
            // obtain new FS reference that should be kerberos based and different
            // than the previously cached reference
            fs = getClusterFS();
        }

        // extract container list

        liveContainers = amRegistrationData.getContainersFromPreviousAttempts();

        //now validate the installation
        Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir);

        providerService.initializeApplicationConfiguration(instanceDefinition, fs);

        providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled);

        //determine the location for the role history data
        Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

        //build the instance
        appState.buildInstance(instanceDefinition, serviceConf, providerConf, providerRoles, fs.getFileSystem(),
                historyDir, liveContainers, appInformation, new SimpleReleaseSelector());

        providerService.rebuildContainerDetails(liveContainers, instanceDefinition.getName(),
                appState.getRolePriorityMap());

        // add the AM to the list of nodes in the cluster

        appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(),
                appMasterHostname + ":" + webApp.port());

        // build up environment variables that the AM wants set in every container
        // irrespective of provider and role.
        envVars = new HashMap<String, String>();
        if (hadoop_user_name != null) {
            envVars.put(HADOOP_USER_NAME, hadoop_user_name);
        }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);

    //launcher service
    launchService = new RoleLaunchService(actionQueues, providerService, fs, new Path(getGeneratedConfDir()),
            envVars, launcherTmpDirPath);

    deployChildService(launchService);

    appState.noteAMLaunched();

    //Give the provider access to the state, and AM
    providerService.bind(stateForProviders, actionQueues, liveContainers);
    sliderAMProvider.bind(stateForProviders, actionQueues, liveContainers);

    // chaos monkey
    maybeStartMonkey();

    // setup token renewal and expiry handling for long lived apps
    //    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
    //      fsDelegationTokenManager = new FsDelegationTokenManager(actionQueues);
    //      fsDelegationTokenManager.acquireDelegationToken(getConfig());
    //    }

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
        hadoop_user_name = System.getenv(HADOOP_USER_NAME);
        log.info(HADOOP_USER_NAME + "='{}'", hadoop_user_name);
    }
    service_user_name = RegistryUtils.currentUser();
    log.info("Registry service username ={}", service_user_name);

    // now do the registration
    registerServiceInstance(clustername, appid);

    // log the YARN and web UIs
    log.info("RM Webapp address {}", serviceConf.get(YarnConfiguration.RM_WEBAPP_ADDRESS));
    log.info("slider Webapp address {}", appMasterTrackingUrl);

    // declare the cluster initialized
    log.info("Application Master Initialization Completed");
    initCompleted.set(true);

    try {
        // start handling any scheduled events

        startQueueProcessing();

        // Start the Slider AM provider
        sliderAMProvider.start();

        // launch the real provider; this is expected to trigger a callback that
        // starts the node review process
        launchProviderService(instanceDefinition, confDir);

        //now block waiting to be told to exit the process
        waitForAMCompletionSignal();
    } catch (Exception e) {
        log.error("Exception : {}", e, e);
        onAMStop(new ActionStopSlider(e));
    }
    //shutdown time
    return finish();
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * Ensure that the user is generated from a keytab and has no HDFS delegation
 * tokens.// w w w. j a va  2  s .c o m
 *
 * @param user user to validate
 * @throws SliderException
 */
protected void validateLoginUser(UserGroupInformation user) throws SliderException {
    if (!user.isFromKeytab()) {
        throw new SliderException(SliderExitCodes.EXIT_BAD_STATE,
                "User is " + "not based on a keytab in a secure deployment.");
    }
    Credentials credentials = user.getCredentials();
    Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<? extends TokenIdentifier> token = iter.next();
        log.info("Token {}", token.getKind());
        if (token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
            log.info("HDFS delegation token {}.  Removing...", token);
            iter.remove();
        }
    }
}