Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:org.apache.slider.providers.slideram.SliderAMClientProvider.java

License:Apache License

/**
 * If the cluster is secure, and an HDFS installed keytab is available for AM
 * authentication, add this keytab as a local resource for the AM launch.
 *
 * @param fileSystem//from   w w w  .  ja  va2 s  . c o m
 * @param launcher
 * @param instanceDescription
 * @param providerResources
 * @throws IOException
 */
protected void addKeytabResourceIfNecessary(SliderFileSystem fileSystem, AbstractLauncher launcher,
        AggregateConf instanceDescription, Map<String, LocalResource> providerResources) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        String keytabPathOnHost = instanceDescription.getAppConfOperations()
                .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
        if (SliderUtils.isUnset(keytabPathOnHost)) {
            String amKeytabName = instanceDescription.getAppConfOperations()
                    .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
            String keytabDir = instanceDescription.getAppConfOperations().getComponent(SliderKeys.COMPONENT_AM)
                    .get(SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
            Path keytabPath = fileSystem.buildKeytabPath(keytabDir, amKeytabName,
                    instanceDescription.getName());
            LocalResource keytabRes = fileSystem.createAmResource(keytabPath, LocalResourceType.FILE);

            providerResources.put(SliderKeys.KEYTAB_DIR + "/" + amKeytabName, keytabRes);
        }
    }
    launcher.addLocalResources(providerResources);
}

From source file:org.apache.slider.server.appmaster.rpc.RpcBinder.java

License:Apache License

public static SliderClusterProtocol getProxy(final Configuration conf, ApplicationReport application,
        final int rpcTimeout) throws IOException, SliderException, InterruptedException {

    String host = application.getHost();
    int port = application.getRpcPort();
    String address = host + ":" + port;
    if (host == null || 0 == port) {
        throw new SliderException(SliderExitCodes.EXIT_CONNECTIVITY_PROBLEM,
                "Slider instance " + application.getName() + " isn't providing a valid address for the"
                        + " Slider RPC protocol: " + address);
    }/*from www . j  a v  a  2 s.  c  om*/

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName());
    final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(),
            application.getRpcPort());
    SliderClusterProtocol realProxy;

    log.debug("Connecting to {}", serviceAddr);
    if (UserGroupInformation.isSecurityEnabled()) {
        org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken();
        Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
        newUgi.addToken(token);
        realProxy = newUgi.doAs(new PrivilegedExceptionAction<SliderClusterProtocol>() {
            @Override
            public SliderClusterProtocol run() throws IOException {
                return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
            }
        });
    } else {
        return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
    }
    return realProxy;
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * Create and run the cluster./*from   w w w. java  2 s .com*/
 * @return exit code
 * @throws Throwable on a failure
 */
private int createAndRunCluster(String clustername) throws Throwable {

    //load the cluster description from the cd argument
    String sliderClusterDir = serviceArgs.getSliderClusterURI();
    URI sliderClusterURI = new URI(sliderClusterDir);
    Path clusterDirPath = new Path(sliderClusterURI);
    log.info("Application defined at {}", sliderClusterURI);
    SliderFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);
    instanceDefinition.setName(clustername);

    log.info("Deploying cluster {}:", instanceDefinition);

    stateForProviders.setApplicationName(clustername);

    Configuration serviceConf = getConfig();

    SecurityConfiguration securityConfiguration = new SecurityConfiguration(serviceConf, instanceDefinition,
            clustername);
    // obtain security state
    boolean securityEnabled = securityConfiguration.isSecurityEnabled();
    // set the global security flag for the instance definition
    instanceDefinition.getAppConfOperations().set(KEY_SECURITY_ENABLED, securityEnabled);

    // triggers resolution and snapshotting in agent
    appState.updateInstanceDefinition(instanceDefinition);

    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
        log.info("Conf dir {} does not exist.", confDir);
        File parentFile = confDir.getParentFile();
        log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile));
    }

    // IP filtering
    serviceConf.set(HADOOP_HTTP_FILTER_INITIALIZERS, AM_FILTER_NAME);

    //get our provider
    MapOperations globalInternalOptions = getGlobalInternalOptions();
    String providerType = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    initAndAddService(providerService);
    providerRMOperationHandler = new ProviderNotifyingOperationHandler(providerService);

    // create a slider AM provider
    sliderAMProvider = new SliderAMProviderService();
    initAndAddService(sliderAMProvider);

    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(serviceConf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(serviceConf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
            SliderUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name()));
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());

    Map<String, String> envVars;
    List<Container> liveContainers;
    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
        int heartbeatInterval = HEARTBEAT_INTERVAL;

        //add the RM client -this brings the callbacks in
        asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this);
        addService(asyncRMClient);
        //now bring it up
        deployChildService(asyncRMClient);

        //nmclient relays callbacks back to this class
        nmClientAsync = new NMClientAsyncImpl("nmclient", this);
        deployChildService(nmClientAsync);

        // set up secret manager
        secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

        if (securityEnabled) {
            // fix up the ACLs if they are not set
            String acls = getConfig().get(SliderXmlConfKeys.KEY_PROTOCOL_ACL);
            if (acls == null) {
                getConfig().set(SliderXmlConfKeys.KEY_PROTOCOL_ACL, "*");
            }
        }
        //bring up the Slider RPC service
        startSliderRPCServer(instanceDefinition);

        rpcServiceAddress = rpcService.getConnectAddress();
        appMasterHostname = rpcServiceAddress.getHostName();
        appMasterRpcPort = rpcServiceAddress.getPort();
        appMasterTrackingUrl = null;
        log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
        appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
        appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

        log.info("Starting Yarn registry");
        registryOperations = startRegistryOperationsService();
        log.info(registryOperations.toString());

        //build the role map
        List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles());
        providerRoles.addAll(SliderAMClientProvider.ROLES);

        // Start up the WebApp and track the URL for it
        certificateManager = new CertificateManager();
        MapOperations component = instanceDefinition.getAppConfOperations()
                .getComponent(SliderKeys.COMPONENT_AM);
        certificateManager.initialize(component);
        certificateManager.setPassphrase(instanceDefinition.getPassphrase());

        if (component.getOptionBool(AgentKeys.KEY_AGENT_TWO_WAY_SSL_ENABLED, false)) {
            uploadServerCertForLocalization(clustername, fs);
        }

        startAgentWebApp(appInformation, serviceConf);

        int port = getPortToRequest(instanceDefinition);

        webApp = new SliderAMWebApp(registryOperations);
        WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class,
                new WebAppApiImpl(this, stateForProviders, providerService, certificateManager,
                        registryOperations),
                RestPaths.WS_CONTEXT).withHttpPolicy(serviceConf, HttpConfig.Policy.HTTP_ONLY).at(port)
                .start(webApp);
        String scheme = WebAppUtils.HTTP_PREFIX;
        appMasterTrackingUrl = scheme + appMasterHostname + ":" + webApp.port();
        WebAppService<SliderAMWebApp> webAppService = new WebAppService<SliderAMWebApp>("slider", webApp);

        webAppService.init(serviceConf);
        webAppService.start();
        addService(webAppService);

        appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
        appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());

        // Register self with ResourceManager
        // This will start heartbeating to the RM
        // address = SliderUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
        log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl);
        amRegistrationData = asyncRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort,
                appMasterTrackingUrl);
        Resource maxResources = amRegistrationData.getMaximumResourceCapability();
        containerMaxMemory = maxResources.getMemory();
        containerMaxCores = maxResources.getVirtualCores();
        appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores());

        // build the handler for RM request/release operations; this uses
        // the max value as part of its lookup
        rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maxResources);

        // set the RM-defined maximum cluster values
        appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
        appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));

        // process the initial user to obtain the set of user
        // supplied credentials (tokens were passed in by client). Remove AMRM
        // token and HDFS delegation token, the latter because we will provide an
        // up to date token for container launches (getContainerCredentials()).
        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
        Credentials credentials = currentUser.getCredentials();
        Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<? extends TokenIdentifier> token = iter.next();
            log.info("Token {}", token.getKind());
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)
                    || token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
                iter.remove();
            }
        }
        // at this point this credentials map is probably clear, but leaving this
        // code to allow for future tokens...
        containerCredentials = credentials;

        if (securityEnabled) {
            secretManager.setMasterKey(amRegistrationData.getClientToAMTokenMasterKey().array());
            applicationACLs = amRegistrationData.getApplicationACLs();

            //tell the server what the ACLs are
            rpcService.getServer().refreshServiceAcl(serviceConf, new SliderAMPolicyProvider());
            // perform keytab based login to establish kerberos authenticated
            // principal.  Can do so now since AM registration with RM above required
            // tokens associated to principal
            String principal = securityConfiguration.getPrincipal();
            File localKeytabFile = securityConfiguration.getKeytabFile(instanceDefinition);
            // Now log in...
            login(principal, localKeytabFile);
            // obtain new FS reference that should be kerberos based and different
            // than the previously cached reference
            fs = getClusterFS();
        }

        // extract container list

        liveContainers = amRegistrationData.getContainersFromPreviousAttempts();

        //now validate the installation
        Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir);

        providerService.initializeApplicationConfiguration(instanceDefinition, fs);

        providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled);

        //determine the location for the role history data
        Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

        //build the instance
        appState.buildInstance(instanceDefinition, serviceConf, providerConf, providerRoles, fs.getFileSystem(),
                historyDir, liveContainers, appInformation, new SimpleReleaseSelector());

        providerService.rebuildContainerDetails(liveContainers, instanceDefinition.getName(),
                appState.getRolePriorityMap());

        // add the AM to the list of nodes in the cluster

        appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(),
                appMasterHostname + ":" + webApp.port());

        // build up environment variables that the AM wants set in every container
        // irrespective of provider and role.
        envVars = new HashMap<String, String>();
        if (hadoop_user_name != null) {
            envVars.put(HADOOP_USER_NAME, hadoop_user_name);
        }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);

    //launcher service
    launchService = new RoleLaunchService(actionQueues, providerService, fs, new Path(getGeneratedConfDir()),
            envVars, launcherTmpDirPath);

    deployChildService(launchService);

    appState.noteAMLaunched();

    //Give the provider access to the state, and AM
    providerService.bind(stateForProviders, actionQueues, liveContainers);
    sliderAMProvider.bind(stateForProviders, actionQueues, liveContainers);

    // chaos monkey
    maybeStartMonkey();

    // setup token renewal and expiry handling for long lived apps
    //    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
    //      fsDelegationTokenManager = new FsDelegationTokenManager(actionQueues);
    //      fsDelegationTokenManager.acquireDelegationToken(getConfig());
    //    }

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
        hadoop_user_name = System.getenv(HADOOP_USER_NAME);
        log.info(HADOOP_USER_NAME + "='{}'", hadoop_user_name);
    }
    service_user_name = RegistryUtils.currentUser();
    log.info("Registry service username ={}", service_user_name);

    // now do the registration
    registerServiceInstance(clustername, appid);

    // log the YARN and web UIs
    log.info("RM Webapp address {}", serviceConf.get(YarnConfiguration.RM_WEBAPP_ADDRESS));
    log.info("slider Webapp address {}", appMasterTrackingUrl);

    // declare the cluster initialized
    log.info("Application Master Initialization Completed");
    initCompleted.set(true);

    try {
        // start handling any scheduled events

        startQueueProcessing();

        // Start the Slider AM provider
        sliderAMProvider.start();

        // launch the real provider; this is expected to trigger a callback that
        // starts the node review process
        launchProviderService(instanceDefinition, confDir);

        //now block waiting to be told to exit the process
        waitForAMCompletionSignal();
    } catch (Exception e) {
        log.error("Exception : {}", e, e);
        onAMStop(new ActionStopSlider(e));
    }
    //shutdown time
    return finish();
}

From source file:org.apache.sqoop.connector.hdfs.security.SecurityUtils.java

License:Apache License

/**
 * Generate delegation tokens for current user (this code is suppose to run in doAs) and store them
 * serialized in given mutable context.//from www  .j a va  2  s .c  om
 */
static public void generateDelegationTokens(MutableContext context, Path path, Configuration configuration)
        throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Running on unsecured cluster, skipping delegation token generation.");
        return;
    }

    // String representation of all tokens that we will create (most likely single one)
    List<String> tokens = new LinkedList<>();

    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodes(credentials, new Path[] { path }, configuration);
    for (Token token : credentials.getAllTokens()) {
        LOG.info("Generated token: " + token.toString());
        tokens.add(serializeToken(token));
    }

    // The context classes are transferred via "Credentials" rather then with jobconf, so we're not leaking the DT out here
    if (tokens.size() > 0) {
        context.setString(HdfsConstants.DELEGATION_TOKENS, StringUtils.join(tokens, " "));
    }
}

From source file:org.apache.storm.hbase.security.AutoHBase.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map conf) {
    try {//from  www. java2s . co m
        final Configuration hbaseConf = HBaseConfiguration.create();
        if (UserGroupInformation.isSecurityEnabled()) {
            final String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);

            UserProvider provider = UserProvider.instantiate(hbaseConf);

            hbaseConf.set(HBASE_KEYTAB_FILE_KEY, hbaseKeytab);
            hbaseConf.set(HBASE_PRINCIPAL_KEY, hbasePrincipal);
            provider.login(HBASE_KEYTAB_FILE_KEY, HBASE_PRINCIPAL_KEY,
                    InetAddress.getLocalHost().getCanonicalHostName());

            LOG.info("Logged into Hbase as principal = " + conf.get(HBASE_PRINCIPAL_KEY));
            UserGroupInformation.setConfiguration(hbaseConf);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            User user = User.create(ugi);

            if (user.isHBaseSecurityEnabled(hbaseConf)) {
                TokenUtil.obtainAndCacheToken(hbaseConf, proxyUser);

                LOG.info("Obtained HBase tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();
                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } else {
                throw new RuntimeException("Security is not enabled for HBase.");
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hbase.security.AutoHBaseNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, Configuration hbaseConf,
        final String topologySubmitterUser) {
    try {//ww  w  .  ja va 2  s.  c  o m
        if (UserGroupInformation.isSecurityEnabled()) {
            UserProvider provider = UserProvider.instantiate(hbaseConf);
            provider.login(HBASE_KEYTAB_FILE_KEY, HBASE_PRINCIPAL_KEY,
                    InetAddress.getLocalHost().getCanonicalHostName());

            LOG.info("Logged into Hbase as principal = " + hbaseConf.get(HBASE_PRINCIPAL_KEY));

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            User user = User.create(proxyUser);

            if (user.isHBaseSecurityEnabled(hbaseConf)) {
                final Connection connection = ConnectionFactory.createConnection(hbaseConf, user);
                TokenUtil.obtainAndCacheToken(connection, user);

                LOG.info("Obtained HBase tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();

                for (Token<? extends TokenIdentifier> tokenForLog : credential.getAllTokens()) {
                    LOG.debug("Obtained token info in credential: {} / {}", tokenForLog.toString(),
                            tokenForLog.decodeIdentifier().getUser());
                }

                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } else {
                throw new RuntimeException("Security is not enabled for HBase.");
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hbase.security.HBaseSecurityUtil.java

License:Apache License

public static UserProvider login(Map conf, Configuration hbaseConfig) throws IOException {
    //Allowing keytab based login for backward compatibility.
    if (UserGroupInformation.isSecurityEnabled() && (conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null
            || !(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHBase.class.getName())))) {
        LOG.info("Logging in using keytab as AutoHBase is not specified for " + TOPOLOGY_AUTO_CREDENTIALS);
        //insure that if keytab is used only one login per process executed
        if (legacyProvider == null) {
            synchronized (HBaseSecurityUtil.class) {
                if (legacyProvider == null) {
                    legacyProvider = UserProvider.instantiate(hbaseConfig);
                    String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY);
                    if (keytab != null) {
                        hbaseConfig.set(STORM_KEYTAB_FILE_KEY, keytab);
                    }/*from w w w. jav  a2 s. com*/
                    String userName = (String) conf.get(STORM_USER_NAME_KEY);
                    if (userName != null) {
                        hbaseConfig.set(STORM_USER_NAME_KEY, userName);
                    }
                    legacyProvider.login(STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY,
                            InetAddress.getLocalHost().getCanonicalHostName());
                }
            }
        }
        return legacyProvider;
    } else {
        return UserProvider.instantiate(hbaseConfig);
    }
}

From source file:org.apache.storm.hdfs.common.security.HdfsSecurityUtil.java

License:Apache License

public static void login(Map conf, Configuration hdfsConfig) throws IOException {
    //If AutoHDFS is specified, do not attempt to login using keytabs, only kept for backward compatibility.
    if (conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null
            || (!(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHDFS.class.getName()))
                    && !(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoTGT.class.getName())))) {
        if (UserGroupInformation.isSecurityEnabled()) {
            LOG.info("Logging in using keytab as AutoHDFS is not specified for " + TOPOLOGY_AUTO_CREDENTIALS);
            String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY);
            if (keytab != null) {
                hdfsConfig.set(STORM_KEYTAB_FILE_KEY, keytab);
            }//from  ww  w.  jav a2  s  .  co m
            String userName = (String) conf.get(STORM_USER_NAME_KEY);
            if (userName != null) {
                hdfsConfig.set(STORM_USER_NAME_KEY, userName);
            }
            SecurityUtil.login(hdfsConfig, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY);
        }
    }
}

From source file:org.apache.storm.hdfs.security.AutoHDFS.java

License:Apache License

@SuppressWarnings("unchecked")
private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration) {
    try {/*from   w w  w . j av  a  2  s. c o  m*/
        if (UserGroupInformation.isSecurityEnabled()) {
            login(configuration);

            final String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);

            final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI)
                    ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString())
                    : FileSystem.getDefaultUri(configuration);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() {
                @Override
                public Object run() {
                    try {
                        FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration);
                        Credentials credential = proxyUser.getCredentials();

                        if (configuration.get(STORM_USER_NAME_KEY) == null) {
                            configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal);
                        }

                        fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential);
                        LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser);
                        return credential;
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            });

            ByteArrayOutputStream bao = new ByteArrayOutputStream();
            ObjectOutputStream out = new ObjectOutputStream(bao);

            creds.write(out);
            out.flush();
            out.close();

            return bao.toByteArray();
        } else {
            throw new RuntimeException("Security is not enabled for HDFS");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hdfs.security.AutoHDFSNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration,
        final String topologySubmitterUser) {
    try {/*from w  w  w .  ja  v a2s  . c  om*/
        if (UserGroupInformation.isSecurityEnabled()) {
            login(configuration);

            final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI)
                    ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString())
                    : FileSystem.getDefaultUri(configuration);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() {
                @Override
                public Object run() {
                    try {
                        FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration);
                        Credentials credential = proxyUser.getCredentials();

                        if (configuration.get(STORM_USER_NAME_KEY) == null) {
                            configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal);
                        }

                        fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential);
                        LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser);
                        return credential;
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            });

            ByteArrayOutputStream bao = new ByteArrayOutputStream();
            ObjectOutputStream out = new ObjectOutputStream(bao);

            creds.write(out);
            out.flush();
            out.close();

            return bao.toByteArray();
        } else {
            throw new RuntimeException("Security is not enabled for HDFS");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}