Example usage for org.apache.hadoop.security SecurityUtil buildTokenService

List of usage examples for org.apache.hadoop.security SecurityUtil buildTokenService

Introduction

In this page you can find the example usage for org.apache.hadoop.security SecurityUtil buildTokenService.

Prototype

public static Text buildTokenService(URI uri) 

Source Link

Document

Construct the service key for a token

Usage

From source file:co.cask.cdap.common.security.YarnTokenUtils.java

License:Apache License

/**
 * Gets a Yarn delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *//*from  w w w .  j ava  2s  .  c o m*/
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    try {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(configuration);
        yarnClient.start();

        try {
            Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
            org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient
                    .getRMDelegationToken(renewer);

            // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
            // CDAP-4825 is resolved
            List<String> services = new ArrayList<>();
            if (HAUtil.isHAEnabled(configuration)) {
                // If HA is enabled, we need to enumerate all RM hosts
                // and add the corresponding service name to the token service
                // Copy the yarn conf since we need to modify it to get the RM addresses
                YarnConfiguration yarnConf = new YarnConfiguration(configuration);
                for (String rmId : HAUtil.getRMHAIds(configuration)) {
                    yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                    InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                    services.add(SecurityUtil.buildTokenService(address).toString());
                }
            } else {
                services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
            }

            Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                    (InetSocketAddress) null);
            token.setService(new Text(Joiner.on(',').join(services)));
            credentials.addToken(new Text(token.getService()), token);

            // OK to log, it won't log the credential, only information about the token.
            LOG.info("Added RM delegation token: {}", token);

        } finally {
            yarnClient.stop();
        }

        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for Yarn.", e);
        throw Throwables.propagate(e);
    }
}

From source file:com.bigstep.datalake.DLFileSystem.java

License:Apache License

@Override
public synchronized void initialize(URI uri, Configuration conf) throws IOException {
    super.initialize(uri, conf);

    uri = selectDatalakeEndpointURI(uri, conf);

    /* set user pattern based on configuration file */
    UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
            DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    kerberosIdentity = initialiseKerberosIdentity(conf);

    this.shouldUseEncryption = conf.getBoolean(FS_DL_IMPL_SHOULD_USE_ENCRYPTION_CONFIG_NAME, false);
    if (this.shouldUseEncryption) {
        initialiseAesEncryption(conf);//w ww  .  java 2  s  .  com
    }

    this.homeDirectory = conf.get(FS_DL_IMPL_HOME_DIRECTORY);

    if (homeDirectory == null)
        throw new IOException(
                "The Datalake requires a home directory to be configured in the fs.dl.impl.homeDirectory configuration variable. This is in the form /data_lake/dlxxxx");

    this.defaultEndpoint = conf.get(FS_DL_IMPL_DEFAULT_ENDPOINT);

    if (defaultEndpoint == null)
        throw new IOException(
                "The Datalake requires a default endpoint to be configured the fs.dl.impl.defaultEndpoint configuration variable. This is in the form /data_lake/dlxxxx");

    URI defaultEndpointURI = URI.create(defaultEndpoint);

    String authority = uri.getAuthority() == null ? defaultEndpointURI.getAuthority() : uri.getAuthority();

    this.baseUri = URI.create(uri.getScheme() + "://" + authority + this.homeDirectory);
    this.nnAddrs = resolveNNAddr();

    LOG.debug("Created kerberosIdentity " + kerberosIdentity + " for " + this.baseUri);

    boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.baseUri);
    boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.baseUri);
    // In non-HA or non-logical URI case, the code needs to call
    // getCanonicalUri() in order to handle the case where no port is
    // specified in the URI
    this.tokenServiceName = isLogicalUri ? HAUtil.buildTokenServiceForLogicalUri(this.baseUri, getScheme())
            : SecurityUtil.buildTokenService(getCanonicalUri());

    if (!isHA) {
        this.retryPolicy = RetryUtils.getDefaultRetryPolicy(conf,
                DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
                DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT, SafeModeException.class);
    } else {

        int maxFailoverAttempts = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
        int maxRetryAttempts = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
        int failoverSleepBaseMillis = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
        int failoverSleepMaxMillis = conf.getInt(DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
                DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

        this.retryPolicy = RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
                maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, failoverSleepMaxMillis);
    }

    this.workingDir = getHomeDirectory();
    //Delegation tokens don't work with httpfs
    this.canRefreshDelegationToken = false;
    this.disallowFallbackToInsecureCluster = !conf.getBoolean(
            CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
            CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
    this.delegationToken = null;

    this.defaultFilePermissions = Short
            .decode(conf.get(FS_DL_IMPL_DEFAULT_FILE_PERMISSIONS, this.DEFAULT_FILE_PERMISSIONS));
    this.defaultUMask = Short.decode(conf.get(FS_DL_IMPL_DEFAULT_UMASK, this.DEFAULT_UMASK));

    this.transportScheme = conf.get(FS_DL_IMPL_TRANSPORT_SCHEME_CONFIG_NAME,
            FS_DL_IMPL_DEFAULT_TRANSPORT_SCHEME);

    if (!checkJCE())
        throw new IOException(JCE_ERROR);

}

From source file:com.datatorrent.stram.StramClient.java

License:Apache License

private Token<RMDelegationTokenIdentifier> getRMHAToken(
        org.apache.hadoop.yarn.api.records.Token rmDelegationToken) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    for (String rmId : conf.getStringCollection(RM_HA_IDS)) {
        LOG.info("Yarn Resource Manager id: {}", rmId);
        // Set RM_ID to get the corresponding RM_ADDRESS
        services.add(// w ww .  j a v a2s .  c om
                SecurityUtil.buildTokenService(NetUtils.createSocketAddr(conf.get(RM_HOSTNAME_PREFIX + rmId),
                        YarnConfiguration.DEFAULT_RM_PORT, RM_HOSTNAME_PREFIX + rmId)).toString());
    }
    Text rmTokenService = new Text(Joiner.on(',').join(services));

    return new Token<RMDelegationTokenIdentifier>(rmDelegationToken.getIdentifier().array(),
            rmDelegationToken.getPassword().array(), new Text(rmDelegationToken.getKind()), rmTokenService);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Modify the config and start up additional DataNodes. The info port for
 * DataNodes is guaranteed to use a free port.
 * /*ww  w  .  ja v  a2s  .c o m*/
 * Data nodes can run with the name node in the mini cluster or
 * a real name node. For example, running with a real name node is useful
 * when running simulated data nodes with a real name node.
 * If minicluster's name node is null assume that the conf has been
 * set with the right address:port of the name node.
 *
 * @param conf
 *            the base configuration to use in starting the DataNodes. This
 *            will be modified as necessary.
 * @param numDataNodes
 *            Number of DataNodes to start; may be zero
 * @param manageDfsDirs
 *            if true, the data directories for DataNodes will be
 *            created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
 *            set in the conf
 * @param operation
 *            the operation with which to start the DataNodes. If null
 *            or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks
 *            array of strings indicating the rack that each DataNode is on
 * @param hosts
 *            array of strings indicating the hostnames for each DataNode
 * @param simulatedCapacities
 *            array of capacities of the simulated data nodes
 * @param setupHostsFile
 *            add new nodes to dfs hosts files
 * @param checkDataNodeAddrConfig
 *            if true, only set DataNode port addresses if not already set in config
 * @param checkDataNodeHostConfig
 *            if true, only set DataNode hostname key if not already set in config
 * @param dnConfOverlays
 *            An array of {@link Configuration} objects that will overlay the
 *            global MiniDFSCluster Configuration for the corresponding DataNode.
 * @throws IllegalStateException
 *             if NameNode has been shutdown
 */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType,
        boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts,
        long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig,
        boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    // Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageType);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: "
                    + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        Configuration newconf = new HdfsConfiguration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }

        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
                IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // Work around issue testing security where rapidly starting multiple
                // DataNodes using the same principal gets rejected by the KDC as a
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        // since the HDFS does things based on host|ip:port, we need to add the
        // mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}

From source file:org.apache.oozie.action.hadoop.JHSCredentials.java

License:Apache License

/**
 * Add an MR_DELEGATION_TOKEN to the {@link Credentials} provided.
 * @param credentials the credentials object which is updated
 * @param config launcher AM configuration
 * @param props properties for getting credential token or certificate
 * @param context workflow context//  ww w .  j a va 2 s. c om
 * @throws Exception thrown if failed
 */
@Override
public void updateCredentials(Credentials credentials, Configuration config, CredentialsProperties props,
        ActionExecutor.Context context) throws Exception {
    try {
        LOG.debug("Instantiating JHS Proxy");
        MRClientProtocol hsProxy = instantiateHistoryProxy(config, context);
        Text hsService = SecurityUtil.buildTokenService(hsProxy.getConnectAddress());
        LOG.debug("Getting delegation token for {0}", hsService.toString());
        Token<?> jhsToken = getDelegationTokenFromJHS(hsProxy,
                new HadoopTokenHelper().getServerPrincipal(config));
        LOG.debug("Acquired token {0}", jhsToken);
        credentials.addToken(hsService, jhsToken);
    } catch (IOException | InterruptedException ex) {
        LOG.debug("exception in updateCredentials", ex);
        throw new CredentialException(ErrorCode.E0512, ex.getMessage(), ex);
    }
}

From source file:org.apache.sqoop.client.request.ResourceRequest.java

License:Apache License

private Text getDelegationTokenService(String strURL) throws IOException {
    URL url = new URL(strURL);
    InetSocketAddress addr = new InetSocketAddress(url.getHost(), url.getPort());
    Text dtService = SecurityUtil.buildTokenService(addr);
    return dtService;
}

From source file:org.apache.twill.internal.yarn.Hadoop23YarnAppClient.java

License:Apache License

/**
 * Overrides parent method to adds RM delegation token to the given context. If YARN is running with HA RM,
 * delegation tokens for each RM service will be added.
 *///from   w w  w.  j a v  a 2s  .  c  o m
protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return;
    }

    try {
        Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
        org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);

        // The following logic is copied from ClientRMProxy.getRMDelegationTokenService, which is not available in
        // YARN older than 2.4
        List<String> services = new ArrayList<>();
        if (HAUtil.isHAEnabled(configuration)) {
            // If HA is enabled, we need to enumerate all RM hosts
            // and add the corresponding service name to the token service
            // Copy the yarn conf since we need to modify it to get the RM addresses
            YarnConfiguration yarnConf = new YarnConfiguration(configuration);
            for (String rmId : HAUtil.getRMHAIds(configuration)) {
                yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                        YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                services.add(SecurityUtil.buildTokenService(address).toString());
            }
        } else {
            services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
        }

        Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());

        // casting needed for later Hadoop version
        @SuppressWarnings("RedundantCast")
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                (InetSocketAddress) null);

        token.setService(new Text(Joiner.on(',').join(services)));
        credentials.addToken(new Text(token.getService()), token);

        LOG.debug("Added RM delegation token {} for application {}", token, appId);
        credentials.addToken(token.getService(), token);

        context.setTokens(YarnUtils.encodeCredentials(credentials));

    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}