Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

private HadoopSecurityManager_H_1_0(Props props) throws HadoopSecurityManagerException, IOException {

    // for now, assume the same/compatible native library, the same/compatible
    // hadoop-core jar
    String hadoopHome = props.getString("hadoop.home", null);
    String hadoopConfDir = props.getString("hadoop.conf.dir", null);

    if (hadoopHome == null) {
        hadoopHome = System.getenv("HADOOP_HOME");
    }/*w  ww.  ja v a  2  s . co m*/
    if (hadoopConfDir == null) {
        hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
    }

    List<URL> resources = new ArrayList<URL>();
    if (hadoopConfDir != null) {
        logger.info("Using hadoop config found in " + new File(hadoopConfDir).toURI().toURL());
        resources.add(new File(hadoopConfDir).toURI().toURL());
    } else if (hadoopHome != null) {
        logger.info("Using hadoop config found in " + new File(hadoopHome, "conf").toURI().toURL());
        resources.add(new File(hadoopHome, "conf").toURI().toURL());
    } else {
        logger.info("HADOOP_HOME not set, using default hadoop config.");
    }

    ucl = new URLClassLoader(resources.toArray(new URL[resources.size()]));

    conf = new Configuration();
    conf.setClassLoader(ucl);

    if (props.containsKey("fs.hdfs.impl.disable.cache")) {
        logger.info("Setting fs.hdfs.impl.disable.cache to " + props.get("fs.hdfs.impl.disable.cache"));
        conf.setBoolean("fs.hdfs.impl.disable.cache", Boolean.valueOf(props.get("fs.hdfs.impl.disable.cache")));
    }

    logger.info("hadoop.security.authentication set to " + conf.get("hadoop.security.authentication"));
    logger.info("hadoop.security.authorization set to " + conf.get("hadoop.security.authorization"));
    logger.info("DFS name " + conf.get("fs.default.name"));

    UserGroupInformation.setConfiguration(conf);

    securityEnabled = UserGroupInformation.isSecurityEnabled();
    if (securityEnabled) {
        logger.info("The Hadoop cluster has enabled security");
        shouldProxy = true;
        try {
            keytabLocation = props.getString(PROXY_KEYTAB_LOCATION);
            keytabPrincipal = props.getString(PROXY_USER);
        } catch (UndefinedPropertyException e) {
            throw new HadoopSecurityManagerException(e.getMessage());
        }

        // try login
        try {
            if (loginUser == null) {
                logger.info("No login user. Creating login user");
                logger.info("Logging with " + keytabPrincipal + " and " + keytabLocation);
                UserGroupInformation.loginUserFromKeytab(keytabPrincipal, keytabLocation);
                loginUser = UserGroupInformation.getLoginUser();
                logger.info("Logged in with user " + loginUser);
            } else {
                logger.info("loginUser (" + loginUser + ") already created, refreshing tgt.");
                loginUser.checkTGTAndReloginFromKeytab();
            }
        } catch (IOException e) {
            throw new HadoopSecurityManagerException("Failed to login with kerberos ", e);
        }

    }

    userUgiMap = new ConcurrentHashMap<String, UserGroupInformation>();

    logger.info("Hadoop Security Manager Initiated");
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private HadoopSecurityManager_H_2_0(Props props) throws HadoopSecurityManagerException, IOException {

    // for now, assume the same/compatible native library, the same/compatible
    // hadoop-core jar
    String hadoopHome = props.getString("hadoop.home", null);
    String hadoopConfDir = props.getString("hadoop.conf.dir", null);

    if (hadoopHome == null) {
        hadoopHome = System.getenv("HADOOP_HOME");
    }//w  ww  .  ja v a 2 s .  c  o m
    if (hadoopConfDir == null) {
        hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
    }

    List<URL> resources = new ArrayList<URL>();
    URL urlToHadoop = null;
    if (hadoopConfDir != null) {
        urlToHadoop = new File(hadoopConfDir).toURI().toURL();
        logger.info("Using hadoop config found in " + urlToHadoop);
        resources.add(urlToHadoop);
    } else if (hadoopHome != null) {
        urlToHadoop = new File(hadoopHome, "conf").toURI().toURL();
        logger.info("Using hadoop config found in " + urlToHadoop);
        resources.add(urlToHadoop);
    } else {
        logger.info("HADOOP_HOME not set, using default hadoop config.");
    }

    ucl = new URLClassLoader(resources.toArray(new URL[resources.size()]));

    conf = new Configuration();
    conf.setClassLoader(ucl);

    if (props.containsKey(FS_HDFS_IMPL_DISABLE_CACHE)) {
        logger.info("Setting " + FS_HDFS_IMPL_DISABLE_CACHE + " to " + props.get(FS_HDFS_IMPL_DISABLE_CACHE));
        conf.setBoolean(FS_HDFS_IMPL_DISABLE_CACHE, Boolean.valueOf(props.get(FS_HDFS_IMPL_DISABLE_CACHE)));
    }

    logger.info(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + ": "
            + conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
    logger.info(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION + ":  "
            + conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION));
    logger.info(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY + ": "
            + conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));

    UserGroupInformation.setConfiguration(conf);

    securityEnabled = UserGroupInformation.isSecurityEnabled();
    if (securityEnabled) {
        logger.info("The Hadoop cluster has enabled security");
        shouldProxy = true;
        try {

            keytabLocation = props.getString(AZKABAN_KEYTAB_LOCATION);
            keytabPrincipal = props.getString(AZKABAN_PRINCIPAL);
        } catch (UndefinedPropertyException e) {
            throw new HadoopSecurityManagerException(e.getMessage());
        }

        // try login
        try {
            if (loginUser == null) {
                logger.info("No login user. Creating login user");
                logger.info("Using principal from " + keytabPrincipal + " and " + keytabLocation);
                UserGroupInformation.loginUserFromKeytab(keytabPrincipal, keytabLocation);
                loginUser = UserGroupInformation.getLoginUser();
                logger.info("Logged in with user " + loginUser);
            } else {
                logger.info("loginUser (" + loginUser + ") already created, refreshing tgt.");
                loginUser.checkTGTAndReloginFromKeytab();
            }
        } catch (IOException e) {
            throw new HadoopSecurityManagerException("Failed to login with kerberos ", e);
        }

    }

    userUgiMap = new ConcurrentHashMap<String, UserGroupInformation>();

    logger.info("Hadoop Security Manager initialized");
}

From source file:azkaban.storage.HdfsAuth.java

License:Apache License

@Inject
public HdfsAuth(final Props props, final Configuration conf) {
    UserGroupInformation.setConfiguration(conf);
    this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled();
    if (this.isSecurityEnabled) {
        log.info("The Hadoop cluster has enabled security");
        this.keytabPath = requireNonNull(props.getString(AZKABAN_KEYTAB_PATH));
        this.keytabPrincipal = requireNonNull(props.getString(AZKABAN_KERBEROS_PRINCIPAL));
    }//from www  .j  ava2  s  .c  om
}

From source file:cn.edu.buaa.act.petuumOnYarn.Client.java

License:Apache License

/**
 * Main run function for the client/*from   ww w.ja  v a  2  s  . c  o  m*/
 * 
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();
    String[] s;
    s = conf.getStrings(YarnConfiguration.RM_ADDRESS);
    for (String ss : s)
        LOG.info("RM address: " + ss);
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers() + ", nodeIdHost" + node.getNodeId().getHost());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    YarnUtil.copyAndAddToLocalResources(fs, appMasterJar, petuumHDFSPathPrefix, appMasterJarPath,
            localResources, null);
    scriptHDFSPath = YarnUtil.copyToHDFS(fs, scriptPath, petuumHDFSPathPrefix, launchPath, null);
    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        YarnUtil.copyAndAddToLocalResources(fs, log4jPropFile, petuumHDFSPathPrefix, log4jPath, localResources,
                null);
    }

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_nodes " + String.valueOf(numNodes));
    vargs.add("--start_port " + String.valueOf(startPort));
    vargs.add("--priority " + String.valueOf(workerPriority));
    vargs.add("--script_hdfs_path " + scriptHDFSPath);

    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null,
            null, null);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and
    // vcores requirements
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and
        // MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // Monitor the application
    currentTime = System.currentTimeMillis();
    LOG.info("submit AM in " + (currentTime - startTime) + "ms");
    return monitorApplication(appId);
}

From source file:co.cask.cdap.app.runtime.spark.SparkProgramRunner.java

License:Apache License

@Override
public ProgramController run(Program program, ProgramOptions options) {
    // Get the RunId first. It is used for the creation of the ClassLoader closing thread.
    Arguments arguments = options.getArguments();
    RunId runId = RunIds.fromString(arguments.getOption(ProgramOptionConstants.RUN_ID));

    Deque<Closeable> closeables = new LinkedList<>();

    try {//from  www  .java  2s  .  com
        // Extract and verify parameters
        ApplicationSpecification appSpec = program.getApplicationSpecification();
        Preconditions.checkNotNull(appSpec, "Missing application specification.");

        ProgramType processorType = program.getType();
        Preconditions.checkNotNull(processorType, "Missing processor type.");
        Preconditions.checkArgument(processorType == ProgramType.SPARK,
                "Only Spark process type is supported.");

        SparkSpecification spec = appSpec.getSpark().get(program.getName());
        Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getName());

        String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
        Preconditions.checkArgument(host != null, "No hostname is provided");

        // Get the WorkflowProgramInfo if it is started by Workflow
        WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
        DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework
                : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo,
                        appSpec);

        // Setup dataset framework context, if required
        if (programDatasetFramework instanceof ProgramContextAware) {
            Id.Program programId = program.getId();
            ((ProgramContextAware) programDatasetFramework).initContext(new Id.Run(programId, runId.getId()));
        }

        PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
        if (pluginInstantiator != null) {
            closeables.addFirst(pluginInstantiator);
        }

        SparkRuntimeContext runtimeContext = new SparkRuntimeContext(new Configuration(hConf), program, runId,
                options.getUserArguments().asMap(), txClient, programDatasetFramework, discoveryServiceClient,
                metricsCollectionService, streamAdmin, workflowInfo, pluginInstantiator);
        closeables.addFirst(runtimeContext);

        Spark spark;
        try {
            spark = new InstantiatorFactory(false).get(TypeToken.of(program.<Spark>getMainClass())).create();

            // Fields injection
            Reflections.visit(spark, spark.getClass(), new PropertyFieldSetter(spec.getProperties()),
                    new DataSetFieldSetter(runtimeContext.getDatasetCache()),
                    new MetricsFieldSetter(runtimeContext));
        } catch (Exception e) {
            LOG.error("Failed to instantiate Spark class for {}", spec.getClassName(), e);
            throw Throwables.propagate(e);
        }

        SparkSubmitter submitter = SparkRuntimeContextConfig.isLocal(hConf) ? new LocalSparkSubmitter()
                : new DistributedSparkSubmitter(hConf, host, runtimeContext,
                        options.getArguments().getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE));

        Service sparkRuntimeService = new SparkRuntimeService(cConf, spark, getPluginArchive(options),
                runtimeContext, submitter, host);

        sparkRuntimeService.addListener(createRuntimeServiceListener(program.getId(), runId, arguments,
                options.getUserArguments(), closeables, store), Threads.SAME_THREAD_EXECUTOR);
        ProgramController controller = new SparkProgramController(sparkRuntimeService, runtimeContext);

        LOG.info("Starting Spark Job: {}", runtimeContext);
        if (SparkRuntimeContextConfig.isLocal(hConf) || UserGroupInformation.isSecurityEnabled()) {
            sparkRuntimeService.start();
        } else {
            ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
        }
        return controller;
    } catch (Throwable t) {
        closeAll(closeables);
        throw Throwables.propagate(t);
    }
}

From source file:co.cask.cdap.common.guice.FileContextProvider.java

License:Apache License

private UserGroupInformation createUGI() {
    String hdfsUser = cConf.get(Constants.CFG_HDFS_USER);
    try {/*from   w ww  .j  a  va  2 s .  co m*/
        if (hdfsUser == null || UserGroupInformation.isSecurityEnabled()) {
            if (hdfsUser != null) {
                LOG.debug("Ignoring configuration {}={}, running on secure Hadoop", Constants.CFG_HDFS_USER,
                        hdfsUser);
            }
            LOG.debug("Getting filesystem for current user");
            return UserGroupInformation.getCurrentUser();
        } else {
            LOG.debug("Getting filesystem for user {}", hdfsUser);
            return UserGroupInformation.createRemoteUser(hdfsUser);
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.common.kerberos.SecurityUtil.java

License:Apache License

public static void loginForMasterService(CConfiguration cConf) throws IOException, LoginException {
    String principal = SecurityUtil
            .expandPrincipal(cConf.get(Constants.Security.CFG_CDAP_MASTER_KRB_PRINCIPAL));
    String keytabPath = cConf.get(Constants.Security.CFG_CDAP_MASTER_KRB_KEYTAB_PATH);

    if (UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Logging in as: principal={}, keytab={}", principal, keytabPath);
        UserGroupInformation.loginUserFromKeytab(principal, keytabPath);

        long delaySec = cConf.getLong(Constants.Security.KERBEROS_KEYTAB_RELOGIN_INTERVAL);
        Executors.newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("Kerberos keytab renewal"))
                .scheduleWithFixedDelay(new Runnable() {
                    @Override/* w w w .j  av  a2s. c o m*/
                    public void run() {
                        try {
                            UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
                        } catch (IOException e) {
                            LOG.error("Failed to relogin from keytab", e);
                        }
                    }
                }, delaySec, delaySec, TimeUnit.SECONDS);
    }
}

From source file:co.cask.cdap.common.security.YarnTokenUtils.java

License:Apache License

/**
 * Gets a Yarn delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *///from   w w w  .jav a  2s  .c o m
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    try {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(configuration);
        yarnClient.start();

        try {
            Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
            org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient
                    .getRMDelegationToken(renewer);

            // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
            // CDAP-4825 is resolved
            List<String> services = new ArrayList<>();
            if (HAUtil.isHAEnabled(configuration)) {
                // If HA is enabled, we need to enumerate all RM hosts
                // and add the corresponding service name to the token service
                // Copy the yarn conf since we need to modify it to get the RM addresses
                YarnConfiguration yarnConf = new YarnConfiguration(configuration);
                for (String rmId : HAUtil.getRMHAIds(configuration)) {
                    yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                    InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                    services.add(SecurityUtil.buildTokenService(address).toString());
                }
            } else {
                services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
            }

            Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                    (InetSocketAddress) null);
            token.setService(new Text(Joiner.on(',').join(services)));
            credentials.addToken(new Text(token.getService()), token);

            // OK to log, it won't log the credential, only information about the token.
            LOG.info("Added RM delegation token: {}", token);

        } finally {
            yarnClient.stop();
        }

        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for Yarn.", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.data.runtime.main.MasterServiceMain.java

License:Apache License

/**
 * Creates an unstarted {@link LeaderElection} for the master service.
 *//* w  ww.j  ava 2  s .com*/
private LeaderElection createLeaderElection() {
    String electionPath = "/election/" + Constants.Service.MASTER_SERVICES;
    return new LeaderElection(zkClient, electionPath, new ElectionHandler() {

        private final AtomicReference<TwillController> controller = new AtomicReference<>();
        private final List<Service> services = new ArrayList<>();
        private Cancellable secureStoreUpdateCancellable;
        // Executor for re-running master twill app if it gets terminated.
        private ScheduledExecutorService executor;
        private TwillRunnerService twillRunner;

        @Override
        public void leader() {
            LOG.info("Became leader for master services");

            final Injector injector = baseInjector.createChildInjector(new TwillModule(),
                    new AppFabricServiceRuntimeModule().getDistributedModules(),
                    new ProgramRunnerRuntimeModule().getDistributedModules());

            twillRunner = injector.getInstance(TwillRunnerService.class);
            twillRunner.start();

            // Schedule secure store update.
            if (User.isHBaseSecurityEnabled(hConf) || UserGroupInformation.isSecurityEnabled()) {
                secureStoreUpdateCancellable = twillRunner.scheduleSecureStoreUpdate(secureStoreUpdater, 30000L,
                        secureStoreUpdater.getUpdateInterval(), TimeUnit.MILLISECONDS);
            }

            // Create app-fabric and dataset services
            services.add(new RetryOnStartFailureService(new Supplier<Service>() {
                @Override
                public Service get() {
                    return injector.getInstance(DatasetService.class);
                }
            }, RetryStrategies.exponentialDelay(200, 5000, TimeUnit.MILLISECONDS)));
            services.add(injector.getInstance(AppFabricServer.class));

            executor = Executors
                    .newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("master-runner"));

            // Start monitoring twill application
            monitorTwillApplication(executor, 0, controller, twillRunner);

            // Start app-fabric and dataset services
            for (Service service : services) {
                LOG.info("Starting service in master: {}", service);
                try {
                    service.startAndWait();
                } catch (Throwable t) {
                    // shut down the executor and stop the twill app,
                    // then throw an exception to cause the leader election service to stop
                    // leaderelection's listener will then shutdown the master
                    stop(true);
                    throw new RuntimeException(
                            String.format("Unable to start service %s: %s", service, t.getMessage()));
                }
            }
            LOG.info("CDAP Master started successfully.");
        }

        @Override
        public void follower() {
            LOG.info("Became follower for master services");
            stop(stopped);
        }

        private void stop(boolean shouldTerminateApp) {
            // Shutdown the retry executor so that no re-run of the twill app will be attempted
            if (executor != null) {
                executor.shutdownNow();
            }
            // Stop secure store update
            if (secureStoreUpdateCancellable != null) {
                secureStoreUpdateCancellable.cancel();
            }
            // If the master process has been explcitly stopped, stop the twill application as well.
            if (shouldTerminateApp) {
                LOG.info("Stopping master twill application");
                TwillController twillController = controller.get();
                if (twillController != null) {
                    Futures.getUnchecked(twillController.terminate());
                }
            }
            // Stop local services last since DatasetService is running locally
            // and remote services need it to preserve states.
            for (Service service : Lists.reverse(services)) {
                // service may not be running if there was an error in startup
                if (service.isRunning()) {
                    LOG.info("Stopping service in master: {}", service);
                    stopQuietly(service);
                }
            }
            services.clear();

            if (twillRunner != null) {
                stopQuietly(twillRunner);
            }
        }
    });
}

From source file:co.cask.cdap.data.runtime.main.MasterServiceMain.java

License:Apache License

/**
 * Starts the {@link TwillApplication} for the master services.
 *
 * @return The {@link TwillController} for the application.
 *//*from  w  ww.jav a 2 s .  com*/
private TwillController startTwillApplication(TwillRunnerService twillRunner) {
    try {
        // Create a temp dir for the run to hold temporary files created to run the application
        Path tempPath = Files.createDirectories(
                new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR), cConf.get(Constants.AppFabric.TEMP_DIR))
                        .toPath());
        final Path runDir = Files.createTempDirectory(tempPath, "master");
        try {
            Path cConfFile = saveCConf(cConf, runDir.resolve("cConf.xml"));
            Path hConfFile = saveHConf(hConf, runDir.resolve("hConf.xml"));
            Path logbackFile = saveLogbackConf(runDir.resolve("logback.xml"));

            TwillPreparer preparer = twillRunner.prepare(new MasterTwillApplication(cConf, cConfFile.toFile(),
                    hConfFile.toFile(), getSystemServiceInstances()));

            if (cConf.getBoolean(Constants.COLLECT_CONTAINER_LOGS)) {
                if (LOG instanceof ch.qos.logback.classic.Logger) {
                    preparer.addLogHandler(new LogHandler() {
                        @Override
                        public void onLog(LogEntry entry) {
                            ch.qos.logback.classic.Logger logger = (ch.qos.logback.classic.Logger) LOG;
                            logger.callAppenders(new TwillLogEntryAdapter(entry));
                        }
                    });
                } else {
                    LOG.warn(
                            "Unsupported logger binding ({}) for container log collection. Falling back to System.out.",
                            LOG.getClass().getName());
                    preparer.addLogHandler(new PrinterLogHandler(new PrintWriter(System.out)));
                }
            } else {
                preparer.addJVMOptions("-Dtwill.disable.kafka=true");
            }

            // Add logback xml
            if (Files.exists(logbackFile)) {
                preparer.withResources().withResources(logbackFile.toUri());
            }

            // Add yarn queue name if defined
            String queueName = cConf.get(Constants.Service.SCHEDULER_QUEUE);
            if (queueName != null) {
                LOG.info("Setting scheduler queue to {} for master services", queueName);
                preparer.setSchedulerQueue(queueName);
            }

            // Add HBase dependencies
            preparer.withDependencies(baseInjector.getInstance(HBaseTableUtil.class).getClass());

            // Add secure tokens
            if (User.isHBaseSecurityEnabled(hConf) || UserGroupInformation.isSecurityEnabled()) {
                // TokenSecureStoreUpdater.update() ignores parameters
                preparer.addSecureStore(secureStoreUpdater.update(null, null));
            }

            // add hadoop classpath to application classpath and exclude hadoop classes from bundle jar.
            String yarnAppClassPath = hConf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
                    Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH));

            preparer.withApplicationClassPaths(Splitter.on(",").trimResults().split(yarnAppClassPath))
                    .withBundlerClassAcceptor(new HadoopClassExcluder());

            // Add explore dependencies
            if (cConf.getBoolean(Constants.Explore.EXPLORE_ENABLED)) {
                prepareExploreContainer(preparer);
            }

            // Add a listener to delete temp files when application started/terminated.
            TwillController controller = preparer.start();
            Runnable cleanup = new Runnable() {
                @Override
                public void run() {
                    try {
                        File dir = runDir.toFile();
                        if (dir.isDirectory()) {
                            DirUtils.deleteDirectoryContents(dir);
                        }
                    } catch (IOException e) {
                        LOG.warn("Failed to cleanup directory {}", runDir, e);
                    }
                }
            };
            controller.onRunning(cleanup, Threads.SAME_THREAD_EXECUTOR);
            controller.onTerminated(cleanup, Threads.SAME_THREAD_EXECUTOR);
            return controller;
        } catch (Exception e) {
            try {
                DirUtils.deleteDirectoryContents(runDir.toFile());
            } catch (IOException ex) {
                LOG.warn("Failed to cleanup directory {}", runDir, ex);
                e.addSuppressed(ex);
            }
            throw e;
        }
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}