Example usage for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHORIZATION

List of usage examples for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHORIZATION

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHORIZATION.

Prototype

String HADOOP_SECURITY_AUTHORIZATION

To view the source code for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHORIZATION.

Click Source Link

Usage

From source file:com.datatorrent.stram.StreamingContainerParent.java

License:Apache License

protected void startRpcServer() {
    Configuration conf = getConfig();
    LOG.info("Config: " + conf);
    LOG.info("Listener thread count " + listenerThreadCount);
    try {/* ww  w .j  a  v a 2s.  c  om*/
        server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class).setInstance(this)
                .setBindAddress("0.0.0.0").setPort(0).setNumHandlers(listenerThreadCount)
                .setSecretManager(tokenSecretManager).setVerbose(false).build();

        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            //refreshServiceAcls(conf, new MRAMPolicyProvider());
            server.refreshServiceAcl(conf, new PolicyProvider() {

                @Override
                public Service[] getServices() {
                    return (new Service[] { new Service(StreamingContainerUmbilicalProtocol.class.getName(),
                            StreamingContainerUmbilicalProtocol.class) });
                }

            });
        }

        server.start();
        this.address = NetUtils.getConnectAddress(server);
        LOG.info("Container callback server listening at " + this.address);
    } catch (IOException e) {
        throw new YarnRuntimeException(e);
    }
}

From source file:com.github.sakserv.minicluster.impl.KdcLocalCluster.java

License:Apache License

protected void prepareSecureConfiguration(String username) throws Exception {
    baseConf = new Configuration(false);
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
    baseConf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    //baseConf.set(CommonConfigurationKeys.HADOOP_RPC_PROTECTION, "authentication");

    String sslConfigDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
    KeyStoreTestUtil.setupSSLConfig(baseDir, sslConfigDir, baseConf, false);

    // User/*from   w w  w.  jav  a2 s  .com*/
    baseConf.set("hadoop.proxyuser." + username + ".hosts", "*");
    baseConf.set("hadoop.proxyuser." + username + ".groups", "*");

    // HTTP
    String spnegoPrincipal = getKrbPrincipalWithRealm(SPNEGO_USER_NAME);
    baseConf.set("hadoop.proxyuser." + SPNEGO_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.proxyuser." + SPNEGO_USER_NAME + ".hosts", "*");

    // Oozie
    String ooziePrincipal = getKrbPrincipalWithRealm(OOZIE_USER_NAME);
    baseConf.set("hadoop.proxyuser." + OOZIE_USER_NAME + ".hosts", "*");
    baseConf.set("hadoop.proxyuser." + OOZIE_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.user.group.static.mapping.overrides", OOZIE_PROXIED_USER_NAME + "=oozie");
    baseConf.set("oozie.service.HadoopAccessorService.keytab.file", getKeytabForPrincipal(OOZIE_USER_NAME));
    baseConf.set("oozie.service.HadoopAccessorService.kerberos.principal", ooziePrincipal);
    baseConf.setBoolean("oozie.service.HadoopAccessorService.kerberos.enabled", true);

    // HDFS
    String hdfsPrincipal = getKrbPrincipalWithRealm(HDFS_USER_NAME);
    baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, getKeytabForPrincipal(HDFS_USER_NAME));
    baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, getKeytabForPrincipal(HDFS_USER_NAME));
    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, getKeytabForPrincipal(SPNEGO_USER_NAME));
    baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
    baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
    baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

    // HBase
    String hbasePrincipal = getKrbPrincipalWithRealm(HBASE_USER_NAME);
    baseConf.set("hbase.security.authentication", "kerberos");
    baseConf.setBoolean("hbase.security.authorization", true);
    baseConf.set("hbase.regionserver.kerberos.principal", hbasePrincipal);
    baseConf.set("hbase.regionserver.keytab.file", getKeytabForPrincipal(HBASE_USER_NAME));
    baseConf.set("hbase.master.kerberos.principal", hbasePrincipal);
    baseConf.set("hbase.master.keytab.file", getKeytabForPrincipal(HBASE_USER_NAME));
    baseConf.set("hbase.coprocessor.region.classes", "org.apache.hadoop.hbase.security.token.TokenProvider");
    baseConf.set("hbase.rest.authentication.kerberos.keytab", getKeytabForPrincipal(SPNEGO_USER_NAME));
    baseConf.set("hbase.rest.authentication.kerberos.principal", spnegoPrincipal);
    baseConf.set("hbase.rest.kerberos.principal", hbasePrincipal);
    baseConf.set("hadoop.proxyuser." + HBASE_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.proxyuser." + HBASE_USER_NAME + ".hosts", "*");

    //hbase.coprocessor.master.classes -> org.apache.hadoop.hbase.security.access.AccessController
    //hbase.coprocessor.region.classes -> org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController

    // Storm
    //String stormPrincipal = getKrbPrincipalWithRealm(STORM_USER_NAME);

    // Yarn
    String yarnPrincipal = getKrbPrincipalWithRealm(YARN_USER_NAME);
    baseConf.set("yarn.resourcemanager.keytab", getKeytabForPrincipal(YARN_USER_NAME));
    baseConf.set("yarn.resourcemanager.principal", yarnPrincipal);
    baseConf.set("yarn.nodemanager.keytab", getKeytabForPrincipal(YARN_USER_NAME));
    baseConf.set("yarn.nodemanager.principal", yarnPrincipal);

    // Mapreduce
    String mrv2Principal = getKrbPrincipalWithRealm(MRV2_USER_NAME);
    baseConf.set("mapreduce.jobhistory.keytab", getKeytabForPrincipal(MRV2_USER_NAME));
    baseConf.set("mapreduce.jobhistory.principal", mrv2Principal);
}

From source file:org.apache.atlas.web.listeners.LoginProcessorIT.java

License:Apache License

@Test
public void testKerberosLogin() throws Exception {
    final File keytab = setupKDCAndPrincipals();

    LoginProcessor processor = new LoginProcessor() {
        @Override/*from www  . j a  va2 s. co  m*/
        protected org.apache.commons.configuration.Configuration getApplicationConfiguration() {
            PropertiesConfiguration config = new PropertiesConfiguration();
            config.setProperty("atlas.authentication.method", "kerberos");
            config.setProperty("atlas.authentication.principal", "dgi@EXAMPLE.COM");
            config.setProperty("atlas.authentication.keytab", keytab.getAbsolutePath());
            return config;
        }

        @Override
        protected Configuration getHadoopConfiguration() {
            Configuration config = new Configuration(false);
            config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            config.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
            config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, kerberosRule);

            return config;
        }

        @Override
        protected boolean isHadoopCluster() {
            return true;
        }
    };
    processor.login();

    Assert.assertTrue(UserGroupInformation.getLoginUser().getShortUserName().endsWith("dgi"));
    Assert.assertNotNull(UserGroupInformation.getCurrentUser());
    Assert.assertTrue(UserGroupInformation.isLoginKeytabBased());
    Assert.assertTrue(UserGroupInformation.isSecurityEnabled());

    kdc.stop();

}

From source file:org.apache.flink.yarn.AbstractYarnFlinkApplicationMasterRunner.java

License:Apache License

/**
 * The instance entry point for the YARN application master. Obtains user group
 * information and calls the main work method {@link #runApplicationMaster(org.apache.flink.configuration.Configuration)} as a
 * privileged action./*from  w w w .  jav  a2  s .co  m*/
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
        Preconditions.checkArgument(yarnClientUsername != null,
                "YARN client user name environment variable {} not set", YarnConfigKeys.ENV_HADOOP_USER_NAME);

        final String currDir = ENV.get(Environment.PWD.key());
        Preconditions.checkArgument(currDir != null, "Current working directory variable (%s) not set",
                Environment.PWD.key());
        LOG.debug("Current working directory: {}", currDir);

        final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
        LOG.debug("Remote keytab path obtained {}", remoteKeytabPath);

        final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
        LOG.info("Remote keytab principal obtained {}", remoteKeytabPrincipal);

        String keytabPath = null;
        if (remoteKeytabPath != null) {
            File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
            keytabPath = f.getAbsolutePath();
            LOG.debug("Keytab path: {}", keytabPath);
        }

        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

        LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
                yarnClientUsername);

        // Flink configuration
        final Map<String, String> dynamicProperties = FlinkYarnSessionCli
                .getDynamicProperties(ENV.get(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES));
        LOG.debug("YARN dynamic properties: {}", dynamicProperties);

        final Configuration flinkConfig = createConfiguration(currDir, dynamicProperties);
        if (keytabPath != null && remoteKeytabPrincipal != null) {
            flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
            flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
        }

        org.apache.hadoop.conf.Configuration hadoopConfiguration = null;

        //To support Yarn Secure Integration Test Scenario
        File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
        if (krb5Conf.exists() && krb5Conf.canRead()) {
            String krb5Path = krb5Conf.getAbsolutePath();
            LOG.info("KRB5 Conf: {}", krb5Path);
            hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        }

        SecurityUtils.SecurityConfiguration sc;
        if (hadoopConfiguration != null) {
            sc = new SecurityUtils.SecurityConfiguration(flinkConfig, hadoopConfiguration);
        } else {
            sc = new SecurityUtils.SecurityConfiguration(flinkConfig);
        }

        SecurityUtils.install(sc);

        // Note that we use the "appMasterHostname" given by YARN here, to make sure
        // we use the hostnames given by YARN consistently throughout akka.
        // for akka "localhost" and "localhost.localdomain" are different actors.
        this.appMasterHostname = ENV.get(Environment.NM_HOST.key());
        Preconditions.checkArgument(appMasterHostname != null, "ApplicationMaster hostname variable %s not set",
                Environment.NM_HOST.key());
        LOG.info("YARN assigned hostname for application master: {}", appMasterHostname);

        return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                return runApplicationMaster(flinkConfig);
            }
        });

    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.flink.yarn.YarnTaskExecutorRunner.java

License:Apache License

/**
 * The instance entry point for the YARN task executor. Obtains user group
 * information and calls the main work method {@link #runTaskExecutor(org.apache.flink.configuration.Configuration)} as a
 * privileged action.//  ww  w .  ja  v a 2 s.c  o m
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
        final String localDirs = ENV.get(Environment.LOCAL_DIRS.key());
        LOG.info("Current working/local Directory: {}", localDirs);

        final String currDir = ENV.get(Environment.PWD.key());
        LOG.info("Current working Directory: {}", currDir);

        final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
        LOG.info("TM: remote keytab path obtained {}", remoteKeytabPath);

        final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
        LOG.info("TM: remote keytab principal obtained {}", remoteKeytabPrincipal);

        final Configuration configuration = GlobalConfiguration.loadConfiguration(currDir);
        FileSystem.setDefaultScheme(configuration);

        // configure local directory
        String flinkTempDirs = configuration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null);
        if (flinkTempDirs == null) {
            LOG.info("Setting directories for temporary file " + localDirs);
            configuration.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, localDirs);
        } else {
            LOG.info("Overriding YARN's temporary file directories with those "
                    + "specified in the Flink config: " + flinkTempDirs);
        }

        // tell akka to die in case of an error
        configuration.setBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true);

        String keytabPath = null;
        if (remoteKeytabPath != null) {
            File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
            keytabPath = f.getAbsolutePath();
            LOG.info("keytab path: {}", keytabPath);
        }

        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

        LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
                yarnClientUsername);

        org.apache.hadoop.conf.Configuration hadoopConfiguration = null;

        //To support Yarn Secure Integration Test Scenario
        File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
        if (krb5Conf.exists() && krb5Conf.canRead()) {
            String krb5Path = krb5Conf.getAbsolutePath();
            LOG.info("KRB5 Conf: {}", krb5Path);
            hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        }

        SecurityUtils.SecurityConfiguration sc;
        if (hadoopConfiguration != null) {
            sc = new SecurityUtils.SecurityConfiguration(configuration, hadoopConfiguration);
        } else {
            sc = new SecurityUtils.SecurityConfiguration(configuration);
        }

        if (keytabPath != null && remoteKeytabPrincipal != null) {
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
        }

        SecurityUtils.install(sc);

        return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                return runTaskExecutor(configuration);
            }
        });

    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.slider.common.tools.SliderUtils.java

License:Apache License

/**
 * Turn on security. This is setup to only run once.
 * @param conf configuration to build up security
 * @return true if security was initialized in this call
 * @throws IOException IO/Net problems// w  w w . j  a v a 2  s. c  om
 * @throws BadConfigException the configuration and system state are inconsistent
 */
public static boolean initProcessSecurity(Configuration conf) throws IOException, BadConfigException {

    if (processSecurityAlreadyInitialized.compareAndSet(true, true)) {
        //security is already inited
        return false;
    }

    log.info("JVM initialized into secure mode with kerberos realm {}", SliderUtils.getKerberosRealm());
    //this gets UGI to reset its previous world view (i.e simple auth)
    //security
    log.debug("java.security.krb5.realm={}", System.getProperty(JAVA_SECURITY_KRB5_REALM, ""));
    log.debug("java.security.krb5.kdc={}", System.getProperty(JAVA_SECURITY_KRB5_KDC, ""));
    log.debug("hadoop.security.authentication={}",
            conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION));
    log.debug("hadoop.security.authorization={}",
            conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION));
    /*    SecurityUtil.setAuthenticationMethod(
            UserGroupInformation.AuthenticationMethod.KERBEROS, conf);*/
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation authUser = UserGroupInformation.getCurrentUser();
    log.debug("Authenticating as " + authUser.toString());
    log.debug("Login user is {}", UserGroupInformation.getLoginUser());
    if (!UserGroupInformation.isSecurityEnabled()) {
        throw new BadConfigException("Although secure mode is enabled,"
                + "the application has already set up its user as an insecure entity %s", authUser);
    }
    if (authUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.SIMPLE) {
        throw new BadConfigException("Auth User is not Kerberized %s"
                + " -security has already been set up with the wrong authentication method. "
                + "This can occur if a file system has already been created prior to the loading of "
                + "the security configuration.", authUser);

    }

    SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
    SliderUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY);
    return true;
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * verify that if the cluster is authed, the ACLs are set.
 * @throws BadConfigException if Authorization is set without any ACL
 *//*from  w w  w .java  2s.  c  om*/
private void verifyIPCAccess() throws BadConfigException {
    boolean authorization = getConfig().getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
            false);
    String acls = getConfig().get(SliderXmlConfKeys.KEY_PROTOCOL_ACL);
    if (authorization && SliderUtils.isUnset(acls)) {
        throw new BadConfigException("Application has IPC authorization enabled in "
                + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + " but no ACLs in "
                + SliderXmlConfKeys.KEY_PROTOCOL_ACL);
    }
}

From source file:org.apache.tez.dag.api.client.DAGClientServer.java

License:Apache License

@Override
public void serviceStart() {
    try {//from  w ww  .  ja  v a 2 s  .  c o m
        Configuration conf = getConfig();
        InetSocketAddress addr = new InetSocketAddress(0);

        DAGClientAMProtocolBlockingPBServerImpl service = new DAGClientAMProtocolBlockingPBServerImpl(
                realInstance);

        BlockingService blockingService = DAGClientAMProtocol.newReflectiveBlockingService(service);

        int numHandlers = conf.getInt(TezConfiguration.TEZ_AM_CLIENT_THREAD_COUNT,
                TezConfiguration.TEZ_AM_CLIENT_THREAD_COUNT_DEFAULT);

        server = createServer(DAGClientAMProtocolBlockingPB.class, addr, conf, numHandlers, blockingService,
                TezConfiguration.TEZ_AM_CLIENT_AM_PORT_RANGE);

        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            refreshServiceAcls(conf, new TezAMPolicyProvider());
        }

        server.start();
        InetSocketAddress serverBindAddress = NetUtils.getConnectAddress(server);
        this.bindAddress = NetUtils.createSocketAddrForHost(
                serverBindAddress.getAddress().getCanonicalHostName(), serverBindAddress.getPort());
        LOG.info("Instantiated DAGClientRPCServer at " + bindAddress);
    } catch (Exception e) {
        LOG.error("Failed to start DAGClientServer: ", e);
        throw new TezUncheckedException(e);
    }
}

From source file:org.apache.tez.dag.app.TaskAttemptListenerImpTezDag.java

License:Apache License

protected void startRpcServer() {
    Configuration conf = getConfig();
    if (!conf.getBoolean(TezConfiguration.TEZ_LOCAL_MODE, TezConfiguration.TEZ_LOCAL_MODE_DEFAULT)) {
        try {//w ww  . j  a  v a 2 s .  c o m
            server = new RPC.Builder(conf).setProtocol(TezTaskUmbilicalProtocol.class).setBindAddress("0.0.0.0")
                    .setPort(0).setInstance(this)
                    .setNumHandlers(conf.getInt(TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT,
                            TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT_DEFAULT))
                    .setSecretManager(jobTokenSecretManager).build();

            // Enable service authorization?
            if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
                refreshServiceAcls(conf, new TezAMPolicyProvider());
            }

            server.start();
            this.address = NetUtils.getConnectAddress(server);
        } catch (IOException e) {
            throw new TezUncheckedException(e);
        }
    } else {
        try {
            this.address = new InetSocketAddress(InetAddress.getLocalHost(), 0);
        } catch (UnknownHostException e) {
            throw new TezUncheckedException(e);
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Not starting TaskAttemptListener RPC in LocalMode");
        }
    }
}

From source file:org.apache.tez.dag.app.TezTaskCommunicatorImpl.java

License:Apache License

protected void startRpcServer() {
    try {/*from ww w.  java 2s .co m*/
        JobTokenSecretManager jobTokenSecretManager = new JobTokenSecretManager();
        jobTokenSecretManager.addTokenForJob(tokenIdentifier, sessionToken);

        server = new RPC.Builder(conf).setProtocol(TezTaskUmbilicalProtocol.class).setBindAddress("0.0.0.0")
                .setPort(0).setInstance(taskUmbilical)
                .setNumHandlers(conf.getInt(TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT,
                        TezConfiguration.TEZ_AM_TASK_LISTENER_THREAD_COUNT_DEFAULT))
                .setPortRangeConfig(TezConfiguration.TEZ_AM_TASK_AM_PORT_RANGE)
                .setSecretManager(jobTokenSecretManager).build();

        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            refreshServiceAcls(conf, new TezAMPolicyProvider());
        }

        server.start();
        InetSocketAddress serverBindAddress = NetUtils.getConnectAddress(server);
        this.address = NetUtils.createSocketAddrForHost(serverBindAddress.getAddress().getCanonicalHostName(),
                serverBindAddress.getPort());
        LOG.info("Instantiated TezTaskCommunicator RPC at " + this.address);
    } catch (IOException e) {
        throw new TezUncheckedException(e);
    }
}