Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:co.cask.cdap.data.runtime.main.TokenSecureStoreUpdater.java

License:Apache License

@Inject
public TokenSecureStoreUpdater(Configuration hConf, CConfiguration cConf, LocationFactory locationFactory) {
    this.hConf = hConf;
    this.locationFactory = locationFactory;
    secureExplore = cConf.getBoolean(Constants.Explore.EXPLORE_ENABLED)
            && UserGroupInformation.isSecurityEnabled();
    credentials = new Credentials();
    updateInterval = calculateUpdateInterval();
}

From source file:co.cask.cdap.explore.security.JobHistoryServerTokenUtils.java

License:Apache License

/**
 * Gets a JHS delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *//* w ww .ja  va2  s  .c  om*/
public static Credentials obtainToken(Configuration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    String historyServerAddress = configuration.get("mapreduce.jobhistory.address");
    HostAndPort hostAndPort = HostAndPort.fromString(historyServerAddress);
    try {
        LOG.info("Obtaining delegation token for JHS");

        ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(new YarnConfiguration(configuration));
        MRClientCache clientCache = new MRClientCache(configuration, resourceMgrDelegate);
        MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
        GetDelegationTokenRequest request = new GetDelegationTokenRequestPBImpl();
        request.setRenewer(YarnUtils.getYarnTokenRenewer(configuration));

        InetSocketAddress address = new InetSocketAddress(hostAndPort.getHostText(), hostAndPort.getPort());
        Token<TokenIdentifier> token = ConverterUtils
                .convertFromYarn(hsProxy.getDelegationToken(request).getDelegationToken(), address);

        credentials.addToken(new Text(token.getService()), token);
        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for JHS at {}.", hostAndPort, e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.explore.service.hive.BaseHiveExploreService.java

License:Apache License

protected HiveConf getHiveConf() {
    HiveConf conf = new HiveConf();
    // Read delegation token if security is enabled.
    if (UserGroupInformation.isSecurityEnabled()) {
        conf.set(HIVE_METASTORE_TOKEN_KEY, HiveAuthFactory.HS2_CLIENT_TOKEN);

        // mapreduce.job.credentials.binary is added by Hive only if Kerberos credentials are present and impersonation
        // is enabled. However, in our case we don't have Kerberos credentials for Explore service.
        // Hence it will not be automatically added by Hive, instead we have to add it ourselves.
        // TODO: When Explore does secure impersonation this has to be the tokens of the user,
        // TODO: ... and not the tokens of the service itself.
        String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (hadoopAuthToken != null) {
            conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
            if ("tez".equals(conf.get("hive.execution.engine"))) {
                // Add token file location property for tez if engine is tez
                conf.set("tez.credentials.path", hadoopAuthToken);
            }//from  w w  w.  j  a  v  a2s.  c  o m
        }
    }

    // Since we use delegation token in HIVE, unset the SPNEGO authentication if it is
    // enabled. Please see CDAP-3452 for details.
    conf.unset("hive.server2.authentication.spnego.keytab");
    conf.unset("hive.server2.authentication.spnego.principal");
    return conf;
}

From source file:co.cask.cdap.explore.service.hive.BaseHiveExploreService.java

License:Apache License

private void setupSparkConf() {
    // Copy over hadoop configuration as spark properties since we don't localize hadoop conf dirs due to CDAP-5019
    for (Map.Entry<String, String> entry : hConf) {
        sparkConf.put("spark.hadoop." + entry.getKey(), hConf.get(entry.getKey()));
    }/*w w  w  .j a  va  2 s. com*/

    // don't localize config, we pass all hadoop configuration in spark properties
    sparkConf.put("spark.yarn.localizeConfig", "false");

    // Setup files to be copied over to spark containers
    sparkConf.put(BaseHiveExploreService.SPARK_YARN_DIST_FILES,
            System.getProperty(BaseHiveExploreService.SPARK_YARN_DIST_FILES));

    if (UserGroupInformation.isSecurityEnabled()) {
        // define metastore token key name
        sparkConf.put("spark.hadoop." + HIVE_METASTORE_TOKEN_KEY, HiveAuthFactory.HS2_CLIENT_TOKEN);

        // tokens are already provided for spark client
        sparkConf.put("spark.yarn.security.tokens.hive.enabled", "false");
        sparkConf.put("spark.yarn.security.tokens.hbase.enabled", "false");

        // Hive needs to ignore security settings while running spark job
        sparkConf.put(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.toString(), "NONE");
        sparkConf.put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.toString(), "false");
    }
}

From source file:co.cask.cdap.explore.service.hive.BaseHiveExploreService.java

License:Apache License

protected Map<String, String> startSession(Id.Namespace namespace) throws IOException, ExploreException {
    Map<String, String> sessionConf = Maps.newHashMap();

    QueryHandle queryHandle = QueryHandle.generate();
    sessionConf.put(Constants.Explore.QUERY_ID, queryHandle.getHandle());

    String schedulerQueue = namespace != null ? schedulerQueueResolver.getQueue(namespace)
            : schedulerQueueResolver.getDefaultQueue();

    if (schedulerQueue != null && !schedulerQueue.isEmpty()) {
        sessionConf.put(JobContext.QUEUE_NAME, schedulerQueue);
    }/*from   www . ja va 2  s  . co  m*/

    Transaction tx = startTransaction();
    ConfigurationUtil.set(sessionConf, Constants.Explore.TX_QUERY_KEY, TxnCodec.INSTANCE, tx);
    ConfigurationUtil.set(sessionConf, Constants.Explore.CCONF_KEY, CConfCodec.INSTANCE, cConf);
    ConfigurationUtil.set(sessionConf, Constants.Explore.HCONF_KEY, HConfCodec.INSTANCE, hConf);

    if (ExploreServiceUtils.isSparkEngine(getHiveConf())) {
        sessionConf.putAll(sparkConf);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        // make sure RM does not cancel delegation tokens after the query is run
        sessionConf.put("mapreduce.job.complete.cancel.delegation.tokens", "false");
        // refresh delegations for the job - TWILL-170
        updateTokenStore();
    }

    return sessionConf;
}

From source file:co.cask.cdap.internal.app.runtime.batch.MapReduceProgramRunner.java

License:Apache License

@Override
public ProgramController run(final Program program, ProgramOptions options) {
    // Extract and verify parameters
    ApplicationSpecification appSpec = program.getApplicationSpecification();
    Preconditions.checkNotNull(appSpec, "Missing application specification.");

    ProgramType processorType = program.getType();
    Preconditions.checkNotNull(processorType, "Missing processor type.");
    Preconditions.checkArgument(processorType == ProgramType.MAPREDUCE,
            "Only MAPREDUCE process type is supported.");

    MapReduceSpecification spec = appSpec.getMapReduce().get(program.getName());
    Preconditions.checkNotNull(spec, "Missing MapReduceSpecification for %s", program.getName());

    // Optionally get runId. If the map-reduce started by other program (e.g. Workflow), it inherit the runId.
    Arguments arguments = options.getArguments();

    final RunId runId = RunIds.fromString(arguments.getOption(ProgramOptionConstants.RUN_ID));

    WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
    DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework
            : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, appSpec);

    // Setup dataset framework context, if required
    if (programDatasetFramework instanceof ProgramContextAware) {
        Id.Program programId = program.getId();
        ((ProgramContextAware) programDatasetFramework).initContext(new Id.Run(programId, runId.getId()));
    }/* w  w w .  j a  va2s. co  m*/

    MapReduce mapReduce;
    try {
        mapReduce = new InstantiatorFactory(false).get(TypeToken.of(program.<MapReduce>getMainClass()))
                .create();
    } catch (Exception e) {
        LOG.error("Failed to instantiate MapReduce class for {}", spec.getClassName(), e);
        throw Throwables.propagate(e);
    }

    // List of all Closeable resources that needs to be cleanup
    List<Closeable> closeables = new ArrayList<>();
    try {
        PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
        if (pluginInstantiator != null) {
            closeables.add(pluginInstantiator);
        }

        final BasicMapReduceContext context = new BasicMapReduceContext(program, runId,
                options.getUserArguments(), spec, workflowInfo, discoveryServiceClient,
                metricsCollectionService, txSystemClient, programDatasetFramework, streamAdmin,
                getPluginArchive(options), pluginInstantiator);

        Reflections.visit(mapReduce, mapReduce.getClass(),
                new PropertyFieldSetter(context.getSpecification().getProperties()),
                new MetricsFieldSetter(context.getMetrics()), new DataSetFieldSetter(context));

        // note: this sets logging context on the thread level
        LoggingContextAccessor.setLoggingContext(context.getLoggingContext());

        final Service mapReduceRuntimeService = new MapReduceRuntimeService(injector, cConf, hConf, mapReduce,
                spec, context, program.getJarLocation(), locationFactory, streamAdmin, txSystemClient,
                usageRegistry);
        mapReduceRuntimeService.addListener(
                createRuntimeServiceListener(program, runId, closeables, arguments, options.getUserArguments()),
                Threads.SAME_THREAD_EXECUTOR);

        final ProgramController controller = new MapReduceProgramController(mapReduceRuntimeService, context);

        LOG.info("Starting MapReduce Job: {}", context.toString());
        // if security is not enabled, start the job as the user we're using to access hdfs with.
        // if this is not done, the mapred job will be launched as the user that runs the program
        // runner, which is probably the yarn user. This may cause permissions issues if the program
        // tries to access cdap data. For example, writing to a FileSet will fail, as the yarn user will
        // be running the job, but the data directory will be owned by cdap.
        if (MapReduceTaskContextProvider.isLocal(hConf) || UserGroupInformation.isSecurityEnabled()) {
            mapReduceRuntimeService.start();
        } else {
            ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), mapReduceRuntimeService);
        }
        return controller;
    } catch (Exception e) {
        closeAllQuietly(closeables);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.internal.app.runtime.batch.MapReduceRuntimeService.java

License:Apache License

/**
 * Creates a MapReduce {@link Job} instance.
 *
 * @param hadoopTmpDir directory for the "hadoop.tmp.dir" configuration
 *///from w w w  .  j a  va2 s . co  m
private Job createJob(File hadoopTmpDir) throws IOException {
    Job job = Job.getInstance(new Configuration(hConf));
    Configuration jobConf = job.getConfiguration();

    if (MapReduceTaskContextProvider.isLocal(jobConf)) {
        // Set the MR framework local directories inside the given tmp directory.
        // Setting "hadoop.tmp.dir" here has no effect due to Explore Service need to set "hadoop.tmp.dir"
        // as system property for Hive to work in local mode. The variable substitution of hadoop conf
        // gives system property the highest precedence.
        jobConf.set("mapreduce.cluster.local.dir", new File(hadoopTmpDir, "local").getAbsolutePath());
        jobConf.set("mapreduce.jobtracker.system.dir", new File(hadoopTmpDir, "system").getAbsolutePath());
        jobConf.set("mapreduce.jobtracker.staging.root.dir",
                new File(hadoopTmpDir, "staging").getAbsolutePath());
        jobConf.set("mapreduce.cluster.temp.dir", new File(hadoopTmpDir, "temp").getAbsolutePath());
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        // If runs in secure cluster, this program runner is running in a yarn container, hence not able
        // to get authenticated with the history.
        jobConf.unset("mapreduce.jobhistory.address");
        jobConf.setBoolean(Job.JOB_AM_ACCESS_DISABLED, false);

        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        LOG.info("Running in secure mode; adding all user credentials: {}", credentials.getAllTokens());
        job.getCredentials().addAll(credentials);
    }
    return job;
}

From source file:co.cask.cdap.internal.app.runtime.distributed.AbstractDistributedProgramRunner.java

License:Apache License

@Override
public final ProgramController run(final Program program, final ProgramOptions oldOptions) {
    final String schedulerQueueName = oldOptions.getArguments()
            .getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE);
    final File tempDir = DirUtils.createTempDir(
            new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR), cConf.get(Constants.AppFabric.TEMP_DIR))
                    .getAbsoluteFile());
    try {/* ww w .j a v  a2s  .  co  m*/
        if (schedulerQueueName != null && !schedulerQueueName.isEmpty()) {
            hConf.set(JobContext.QUEUE_NAME, schedulerQueueName);
            LOG.info("Setting scheduler queue to {}", schedulerQueueName);
        }

        Map<String, LocalizeResource> localizeResources = new HashMap<>();
        final ProgramOptions options = addArtifactPluginFiles(oldOptions, localizeResources,
                DirUtils.createTempDir(tempDir));

        // Copy config files and program jar to local temp, and ask Twill to localize it to container.
        // What Twill does is to save those files in HDFS and keep using them during the lifetime of application.
        // Twill will manage the cleanup of those files in HDFS.
        localizeResources.put("hConf.xml",
                new LocalizeResource(saveHConf(hConf, File.createTempFile("hConf", ".xml", tempDir))));
        localizeResources.put("cConf.xml",
                new LocalizeResource(saveCConf(cConf, File.createTempFile("cConf", ".xml", tempDir))));

        final URI logbackURI = getLogBackURI(program, tempDir);
        final String programOptions = GSON.toJson(options);

        // Obtains and add the HBase delegation token as well (if in non-secure mode, it's a no-op)
        // Twill would also ignore it if it is not running in secure mode.
        // The HDFS token should already obtained by Twill.
        return launch(program, options, localizeResources, tempDir, new ApplicationLauncher() {
            @Override
            public TwillController launch(TwillApplication twillApplication, Iterable<String> extraClassPaths,
                    Iterable<? extends Class<?>> extraDependencies) {
                TwillPreparer twillPreparer = twillRunner.prepare(twillApplication);
                // TODO: CDAP-5506. It's a bit hacky to set a Spark environment here. However, we always launch
                // Spark using YARN and it is needed for both Workflow and Spark runner. We need to set it
                // because inside Spark code, it will set and unset the SPARK_YARN_MODE system properties, causing
                // fork in distributed mode not working. Setting it in the environment, which Spark uses for defaults,
                // so it can't be unset by Spark
                twillPreparer.withEnv(Collections.singletonMap("SPARK_YARN_MODE", "true"));
                if (options.isDebug()) {
                    LOG.info("Starting {} with debugging enabled, programOptions: {}, and logback: {}",
                            program.getId(), programOptions, logbackURI);
                    twillPreparer.enableDebugging();
                }
                // Add scheduler queue name if defined
                if (schedulerQueueName != null && !schedulerQueueName.isEmpty()) {
                    LOG.info("Setting scheduler queue for app {} as {}", program.getId(), schedulerQueueName);
                    twillPreparer.setSchedulerQueue(schedulerQueueName);
                }
                if (logbackURI != null) {
                    twillPreparer.withResources(logbackURI);
                }

                String logLevelConf = cConf.get(Constants.COLLECT_APP_CONTAINER_LOG_LEVEL).toUpperCase();
                if ("OFF".equals(logLevelConf)) {
                    twillPreparer.addJVMOptions("-Dtwill.disable.kafka=true");
                } else {
                    LogEntry.Level logLevel = LogEntry.Level.ERROR;
                    if ("ALL".equals(logLevelConf)) {
                        logLevel = LogEntry.Level.TRACE;
                    } else {
                        try {
                            logLevel = LogEntry.Level.valueOf(logLevelConf.toUpperCase());
                        } catch (Exception e) {
                            LOG.warn("Invalid application container log level {}. Defaulting to ERROR.",
                                    logLevelConf);
                        }
                    }
                    twillPreparer.addLogHandler(new ApplicationLogHandler(
                            new PrinterLogHandler(new PrintWriter(System.out)), logLevel));
                }

                String yarnAppClassPath = hConf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
                        Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH));
                // Add secure tokens
                if (User.isHBaseSecurityEnabled(hConf) || UserGroupInformation.isSecurityEnabled()) {
                    // TokenSecureStoreUpdater.update() ignores parameters
                    twillPreparer.addSecureStore(secureStoreUpdater.update(null, null));
                }

                Iterable<Class<?>> dependencies = Iterables.concat(
                        Collections.singletonList(HBaseTableUtilFactory.getHBaseTableUtilClass()),
                        extraDependencies);
                twillPreparer.withDependencies(dependencies)
                        .withClassPaths(Iterables.concat(extraClassPaths,
                                Splitter.on(',').trimResults()
                                        .split(hConf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH, ""))))
                        .withApplicationClassPaths(Splitter.on(",").trimResults().split(yarnAppClassPath))
                        .withBundlerClassAcceptor(new HadoopClassExcluder() {
                            @Override
                            public boolean accept(String className, URL classUrl, URL classPathUrl) {
                                // Exclude both hadoop and spark classes.
                                return super.accept(className, classUrl, classPathUrl)
                                        && !className.startsWith("org.apache.spark.");
                            }
                        }).withApplicationArguments(String.format("--%s", RunnableOptions.JAR),
                                program.getJarLocation().getName(),
                                String.format("--%s", RunnableOptions.PROGRAM_OPTIONS), programOptions);

                TwillController twillController;
                // Change the context classloader to the combine classloader of this ProgramRunner and
                // all the classloaders of the dependencies classes so that Twill can trace classes.
                ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(new CombineClassLoader(
                        AbstractDistributedProgramRunner.this.getClass().getClassLoader(),
                        Iterables.transform(dependencies, new Function<Class<?>, ClassLoader>() {
                            @Override
                            public ClassLoader apply(Class<?> input) {
                                return input.getClassLoader();
                            }
                        })));
                try {
                    twillController = twillPreparer.start();
                } finally {
                    ClassLoaders.setContextClassLoader(oldClassLoader);
                }
                return addCleanupListener(twillController, program, tempDir);
            }
        });
    } catch (Exception e) {
        deleteDirectory(tempDir);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.security.TokenSecureStoreUpdater.java

License:Apache License

@Inject
public TokenSecureStoreUpdater(YarnConfiguration hConf, CConfiguration cConf, LocationFactory locationFactory) {
    this.hConf = hConf;
    this.locationFactory = locationFactory;
    secureExplore = cConf.getBoolean(Constants.Explore.EXPLORE_ENABLED)
            && UserGroupInformation.isSecurityEnabled();
    updateInterval = calculateUpdateInterval();
}

From source file:co.cask.cdap.security.TokenSecureStoreUpdater.java

License:Apache License

/**
 * Helper method to get delegation tokens for the given LocationFactory.
 * @param config The hadoop configuration.
 * @param locationFactory The LocationFactory for generating tokens.
 * @param credentials Credentials for storing tokens acquired.
 * @return List of delegation Tokens acquired.
 * TODO: copied from Twill 0.6 YarnUtils for CDAP-5350. Remove after this fix is moved to Twill.
 *///from   ww w.  j ava2 s  .c om
private static List<Token<?>> addDelegationTokens(Configuration config, LocationFactory locationFactory,
        Credentials credentials) throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
        LOG.debug("Security is not enabled");
        return ImmutableList.of();
    }

    FileSystem fileSystem = getFileSystem(locationFactory, config);

    if (fileSystem == null) {
        LOG.warn("Unexpected: LocationFactory is not HDFS. Not getting delegation tokens.");
        return ImmutableList.of();
    }

    String renewer = YarnUtils.getYarnTokenRenewer(config);

    Token<?>[] tokens = fileSystem.addDelegationTokens(renewer, credentials);
    LOG.info("Added HDFS DelegationTokens: {}", Arrays.toString(tokens));

    return tokens == null ? ImmutableList.<Token<?>>of() : ImmutableList.copyOf(tokens);
}