Example usage for org.apache.hadoop.security Credentials getAllTokens

List of usage examples for org.apache.hadoop.security Credentials getAllTokens

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials getAllTokens.

Prototype

public Collection<Token<? extends TokenIdentifier>> getAllTokens() 

Source Link

Document

Return all the tokens in the in-memory map.

Usage

From source file:org.apache.slider.core.launch.CredentialUtils.java

License:Apache License

/**
 * Filter a list of tokens from a set of credentials
 * @param credentials credential source (a new credential set os re
 * @param filter List of tokens to strip out
 * @return a new, filtered, set of credentials
 *//*  ww  w  .  jav a  2  s.  co m*/
public static Credentials filterTokens(Credentials credentials, List<Text> filter) {
    Credentials result = new Credentials(credentials);
    Iterator<Token<? extends TokenIdentifier>> iter = result.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<? extends TokenIdentifier> token = iter.next();
        LOG.debug("Token {}", token.getKind());
        if (filter.contains(token.getKind())) {
            LOG.debug("Filtering token {}", token.getKind());
            iter.remove();
        }
    }
    return result;
}

From source file:org.apache.slider.core.launch.CredentialUtils.java

License:Apache License

public static String dumpTokens(Credentials credentials, String separator) {
    ArrayList<Token<? extends TokenIdentifier>> sorted = new ArrayList<>(credentials.getAllTokens());
    Collections.sort(sorted, new TokenComparator());
    StringBuilder buffer = new StringBuilder(sorted.size() * 128);
    for (Token<? extends TokenIdentifier> token : sorted) {
        buffer.append(tokenToString(token)).append(separator);
    }/*from www.  ja v a2s . c  om*/
    return buffer.toString();
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * Create and run the cluster.//  www  .j  a v a  2 s .com
 * @return exit code
 * @throws Throwable on a failure
 */
private int createAndRunCluster(String clustername) throws Throwable {

    //load the cluster description from the cd argument
    String sliderClusterDir = serviceArgs.getSliderClusterURI();
    URI sliderClusterURI = new URI(sliderClusterDir);
    Path clusterDirPath = new Path(sliderClusterURI);
    log.info("Application defined at {}", sliderClusterURI);
    SliderFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);
    instanceDefinition.setName(clustername);

    log.info("Deploying cluster {}:", instanceDefinition);

    stateForProviders.setApplicationName(clustername);

    Configuration serviceConf = getConfig();

    SecurityConfiguration securityConfiguration = new SecurityConfiguration(serviceConf, instanceDefinition,
            clustername);
    // obtain security state
    boolean securityEnabled = securityConfiguration.isSecurityEnabled();
    // set the global security flag for the instance definition
    instanceDefinition.getAppConfOperations().set(KEY_SECURITY_ENABLED, securityEnabled);

    // triggers resolution and snapshotting in agent
    appState.updateInstanceDefinition(instanceDefinition);

    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
        log.info("Conf dir {} does not exist.", confDir);
        File parentFile = confDir.getParentFile();
        log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile));
    }

    // IP filtering
    serviceConf.set(HADOOP_HTTP_FILTER_INITIALIZERS, AM_FILTER_NAME);

    //get our provider
    MapOperations globalInternalOptions = getGlobalInternalOptions();
    String providerType = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    initAndAddService(providerService);
    providerRMOperationHandler = new ProviderNotifyingOperationHandler(providerService);

    // create a slider AM provider
    sliderAMProvider = new SliderAMProviderService();
    initAndAddService(sliderAMProvider);

    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(serviceConf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(serviceConf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
            SliderUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name()));
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());

    Map<String, String> envVars;
    List<Container> liveContainers;
    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
        int heartbeatInterval = HEARTBEAT_INTERVAL;

        //add the RM client -this brings the callbacks in
        asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this);
        addService(asyncRMClient);
        //now bring it up
        deployChildService(asyncRMClient);

        //nmclient relays callbacks back to this class
        nmClientAsync = new NMClientAsyncImpl("nmclient", this);
        deployChildService(nmClientAsync);

        // set up secret manager
        secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

        if (securityEnabled) {
            // fix up the ACLs if they are not set
            String acls = getConfig().get(SliderXmlConfKeys.KEY_PROTOCOL_ACL);
            if (acls == null) {
                getConfig().set(SliderXmlConfKeys.KEY_PROTOCOL_ACL, "*");
            }
        }
        //bring up the Slider RPC service
        startSliderRPCServer(instanceDefinition);

        rpcServiceAddress = rpcService.getConnectAddress();
        appMasterHostname = rpcServiceAddress.getHostName();
        appMasterRpcPort = rpcServiceAddress.getPort();
        appMasterTrackingUrl = null;
        log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
        appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
        appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

        log.info("Starting Yarn registry");
        registryOperations = startRegistryOperationsService();
        log.info(registryOperations.toString());

        //build the role map
        List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles());
        providerRoles.addAll(SliderAMClientProvider.ROLES);

        // Start up the WebApp and track the URL for it
        certificateManager = new CertificateManager();
        MapOperations component = instanceDefinition.getAppConfOperations()
                .getComponent(SliderKeys.COMPONENT_AM);
        certificateManager.initialize(component);
        certificateManager.setPassphrase(instanceDefinition.getPassphrase());

        if (component.getOptionBool(AgentKeys.KEY_AGENT_TWO_WAY_SSL_ENABLED, false)) {
            uploadServerCertForLocalization(clustername, fs);
        }

        startAgentWebApp(appInformation, serviceConf);

        int port = getPortToRequest(instanceDefinition);

        webApp = new SliderAMWebApp(registryOperations);
        WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class,
                new WebAppApiImpl(this, stateForProviders, providerService, certificateManager,
                        registryOperations),
                RestPaths.WS_CONTEXT).withHttpPolicy(serviceConf, HttpConfig.Policy.HTTP_ONLY).at(port)
                .start(webApp);
        String scheme = WebAppUtils.HTTP_PREFIX;
        appMasterTrackingUrl = scheme + appMasterHostname + ":" + webApp.port();
        WebAppService<SliderAMWebApp> webAppService = new WebAppService<SliderAMWebApp>("slider", webApp);

        webAppService.init(serviceConf);
        webAppService.start();
        addService(webAppService);

        appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
        appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());

        // Register self with ResourceManager
        // This will start heartbeating to the RM
        // address = SliderUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
        log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl);
        amRegistrationData = asyncRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort,
                appMasterTrackingUrl);
        Resource maxResources = amRegistrationData.getMaximumResourceCapability();
        containerMaxMemory = maxResources.getMemory();
        containerMaxCores = maxResources.getVirtualCores();
        appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores());

        // build the handler for RM request/release operations; this uses
        // the max value as part of its lookup
        rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maxResources);

        // set the RM-defined maximum cluster values
        appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
        appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));

        // process the initial user to obtain the set of user
        // supplied credentials (tokens were passed in by client). Remove AMRM
        // token and HDFS delegation token, the latter because we will provide an
        // up to date token for container launches (getContainerCredentials()).
        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
        Credentials credentials = currentUser.getCredentials();
        Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<? extends TokenIdentifier> token = iter.next();
            log.info("Token {}", token.getKind());
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)
                    || token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
                iter.remove();
            }
        }
        // at this point this credentials map is probably clear, but leaving this
        // code to allow for future tokens...
        containerCredentials = credentials;

        if (securityEnabled) {
            secretManager.setMasterKey(amRegistrationData.getClientToAMTokenMasterKey().array());
            applicationACLs = amRegistrationData.getApplicationACLs();

            //tell the server what the ACLs are
            rpcService.getServer().refreshServiceAcl(serviceConf, new SliderAMPolicyProvider());
            // perform keytab based login to establish kerberos authenticated
            // principal.  Can do so now since AM registration with RM above required
            // tokens associated to principal
            String principal = securityConfiguration.getPrincipal();
            File localKeytabFile = securityConfiguration.getKeytabFile(instanceDefinition);
            // Now log in...
            login(principal, localKeytabFile);
            // obtain new FS reference that should be kerberos based and different
            // than the previously cached reference
            fs = getClusterFS();
        }

        // extract container list

        liveContainers = amRegistrationData.getContainersFromPreviousAttempts();

        //now validate the installation
        Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir);

        providerService.initializeApplicationConfiguration(instanceDefinition, fs);

        providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled);

        //determine the location for the role history data
        Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

        //build the instance
        appState.buildInstance(instanceDefinition, serviceConf, providerConf, providerRoles, fs.getFileSystem(),
                historyDir, liveContainers, appInformation, new SimpleReleaseSelector());

        providerService.rebuildContainerDetails(liveContainers, instanceDefinition.getName(),
                appState.getRolePriorityMap());

        // add the AM to the list of nodes in the cluster

        appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(),
                appMasterHostname + ":" + webApp.port());

        // build up environment variables that the AM wants set in every container
        // irrespective of provider and role.
        envVars = new HashMap<String, String>();
        if (hadoop_user_name != null) {
            envVars.put(HADOOP_USER_NAME, hadoop_user_name);
        }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);

    //launcher service
    launchService = new RoleLaunchService(actionQueues, providerService, fs, new Path(getGeneratedConfDir()),
            envVars, launcherTmpDirPath);

    deployChildService(launchService);

    appState.noteAMLaunched();

    //Give the provider access to the state, and AM
    providerService.bind(stateForProviders, actionQueues, liveContainers);
    sliderAMProvider.bind(stateForProviders, actionQueues, liveContainers);

    // chaos monkey
    maybeStartMonkey();

    // setup token renewal and expiry handling for long lived apps
    //    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
    //      fsDelegationTokenManager = new FsDelegationTokenManager(actionQueues);
    //      fsDelegationTokenManager.acquireDelegationToken(getConfig());
    //    }

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
        hadoop_user_name = System.getenv(HADOOP_USER_NAME);
        log.info(HADOOP_USER_NAME + "='{}'", hadoop_user_name);
    }
    service_user_name = RegistryUtils.currentUser();
    log.info("Registry service username ={}", service_user_name);

    // now do the registration
    registerServiceInstance(clustername, appid);

    // log the YARN and web UIs
    log.info("RM Webapp address {}", serviceConf.get(YarnConfiguration.RM_WEBAPP_ADDRESS));
    log.info("slider Webapp address {}", appMasterTrackingUrl);

    // declare the cluster initialized
    log.info("Application Master Initialization Completed");
    initCompleted.set(true);

    try {
        // start handling any scheduled events

        startQueueProcessing();

        // Start the Slider AM provider
        sliderAMProvider.start();

        // launch the real provider; this is expected to trigger a callback that
        // starts the node review process
        launchProviderService(instanceDefinition, confDir);

        //now block waiting to be told to exit the process
        waitForAMCompletionSignal();
    } catch (Exception e) {
        log.error("Exception : {}", e, e);
        onAMStop(new ActionStopSlider(e));
    }
    //shutdown time
    return finish();
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

/**
 * Ensure that the user is generated from a keytab and has no HDFS delegation
 * tokens./*from   w  ww . j a  v a2 s . c o  m*/
 *
 * @param user user to validate
 * @throws SliderException
 */
protected void validateLoginUser(UserGroupInformation user) throws SliderException {
    if (!user.isFromKeytab()) {
        throw new SliderException(SliderExitCodes.EXIT_BAD_STATE,
                "User is " + "not based on a keytab in a secure deployment.");
    }
    Credentials credentials = user.getCredentials();
    Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<? extends TokenIdentifier> token = iter.next();
        log.info("Token {}", token.getKind());
        if (token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
            log.info("HDFS delegation token {}.  Removing...", token);
            iter.remove();
        }
    }
}

From source file:org.apache.sqoop.connector.hdfs.security.SecurityUtils.java

License:Apache License

/**
 * Generate delegation tokens for current user (this code is suppose to run in doAs) and store them
 * serialized in given mutable context./* w ww . j a v  a2s  . c o  m*/
 */
static public void generateDelegationTokens(MutableContext context, Path path, Configuration configuration)
        throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Running on unsecured cluster, skipping delegation token generation.");
        return;
    }

    // String representation of all tokens that we will create (most likely single one)
    List<String> tokens = new LinkedList<>();

    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodes(credentials, new Path[] { path }, configuration);
    for (Token token : credentials.getAllTokens()) {
        LOG.info("Generated token: " + token.toString());
        tokens.add(serializeToken(token));
    }

    // The context classes are transferred via "Credentials" rather then with jobconf, so we're not leaking the DT out here
    if (tokens.size() > 0) {
        context.setString(HdfsConstants.DELEGATION_TOKENS, StringUtils.join(tokens, " "));
    }
}

From source file:org.apache.storm.common.AbstractAutoCreds.java

License:Apache License

private void addTokensToUGI(Subject subject) {
    if (subject != null) {
        Set<Credentials> privateCredentials = subject.getPrivateCredentials(Credentials.class);
        if (privateCredentials != null) {
            for (Credentials cred : privateCredentials) {
                Collection<Token<? extends TokenIdentifier>> allTokens = cred.getAllTokens();
                if (allTokens != null) {
                    for (Token<? extends TokenIdentifier> token : allTokens) {
                        try {
                            UserGroupInformation.getCurrentUser().addToken(token);
                            LOG.info("Added delegation tokens to UGI.");
                        } catch (IOException e) {
                            LOG.error("Exception while trying to add tokens to ugi", e);
                        }//w ww  .ja  v a 2  s .c  o  m
                    }
                }
            }
        }
    }
}

From source file:org.apache.storm.common.AbstractHadoopAutoCreds.java

License:Apache License

private void addTokensToUGI(Subject subject) {
    if (subject != null) {
        Set<Credentials> privateCredentials = subject.getPrivateCredentials(Credentials.class);
        if (privateCredentials != null) {
            for (Credentials cred : privateCredentials) {
                Collection<Token<? extends TokenIdentifier>> allTokens = cred.getAllTokens();
                if (allTokens != null) {
                    for (Token<? extends TokenIdentifier> token : allTokens) {
                        try {
                            if (token == null) {
                                LOG.debug("Ignoring null token");
                                continue;
                            }//from w  ww.  j  a  v a  2  s  .  co m

                            LOG.debug("Current user: {}", UserGroupInformation.getCurrentUser());
                            LOG.debug("Token from Credentials : {}", token);

                            TokenIdentifier tokenId = token.decodeIdentifier();
                            if (tokenId != null) {
                                LOG.debug("Token identifier : {}", tokenId);
                                LOG.debug("Username in token identifier : {}", tokenId.getUser());
                            }

                            UserGroupInformation.getCurrentUser().addToken(token);
                            LOG.info("Added delegation tokens to UGI.");
                        } catch (IOException e) {
                            LOG.error("Exception while trying to add tokens to ugi", e);
                        }
                    }
                }
            }
        }
    }
}

From source file:org.apache.storm.hbase.security.AutoHBaseNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, Configuration hbaseConf,
        final String topologySubmitterUser) {
    try {/* w  w  w.  j a  v a2 s  .c  o  m*/
        if (UserGroupInformation.isSecurityEnabled()) {
            UserProvider provider = UserProvider.instantiate(hbaseConf);
            provider.login(HBASE_KEYTAB_FILE_KEY, HBASE_PRINCIPAL_KEY,
                    InetAddress.getLocalHost().getCanonicalHostName());

            LOG.info("Logged into Hbase as principal = " + hbaseConf.get(HBASE_PRINCIPAL_KEY));

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            User user = User.create(proxyUser);

            if (user.isHBaseSecurityEnabled(hbaseConf)) {
                final Connection connection = ConnectionFactory.createConnection(hbaseConf, user);
                TokenUtil.obtainAndCacheToken(connection, user);

                LOG.info("Obtained HBase tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();

                for (Token<? extends TokenIdentifier> tokenForLog : credential.getAllTokens()) {
                    LOG.debug("Obtained token info in credential: {} / {}", tokenForLog.toString(),
                            tokenForLog.decodeIdentifier().getUser());
                }

                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } else {
                throw new RuntimeException("Security is not enabled for HBase.");
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.tez.common.impl.LogUtils.java

License:Apache License

public static void logCredentials(Log log, Credentials credentials, String identifier) {
    if (log.isDebugEnabled()) {
        StringBuilder sb = new StringBuilder();
        sb.append("#" + identifier + "Tokens=").append(credentials.numberOfTokens());
        if (credentials.numberOfTokens() > 0) {
            sb.append(", Services: ");
            for (Token<?> t : credentials.getAllTokens()) {
                sb.append(t.getService()).append(",");
            }/*from www. j  a v  a 2 s . c o  m*/
        }
        log.debug(sb.toString());
    }
}

From source file:org.apache.tez.common.security.TestTokenCache.java

License:Apache License

@Test(timeout = 5000)
@SuppressWarnings("deprecation")
public void testBinaryCredentials() throws Exception {
    String binaryTokenFile = null;
    try {//w w  w .  j  av  a  2s.  com
        Path TEST_ROOT_DIR = new Path("target");
        binaryTokenFile = FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
                .getPath();

        MockFileSystem fs1 = createFileSystemForServiceName("service1");
        MockFileSystem fs2 = createFileSystemForServiceName("service2");
        MockFileSystem fs3 = createFileSystemForServiceName("service3");

        // get the tokens for fs1 & fs2 and write out to binary creds file
        Credentials creds = new Credentials();
        Token<?> token1 = fs1.getDelegationToken(renewer);
        Token<?> token2 = fs2.getDelegationToken(renewer);
        creds.addToken(token1.getService(), token1);
        creds.addToken(token2.getService(), token2);
        creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);

        Credentials newCreds = new Credentials();
        TokenCache.mergeBinaryTokens(newCreds, conf, binaryTokenFile);

        Assert.assertTrue(newCreds.getAllTokens().size() > 0);
        checkTokens(creds, newCreds);
    } finally {
        if (binaryTokenFile != null) {
            try {
                FileSystem.getLocal(conf).delete(new Path(binaryTokenFile));
            } catch (IOException e) {
                // Ignore
            }
        }
    }
}