Example usage for org.apache.hadoop.security Credentials addToken

List of usage examples for org.apache.hadoop.security Credentials addToken

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials addToken.

Prototype

public void addToken(Text alias, Token<? extends TokenIdentifier> t) 

Source Link

Document

Add a token in the storage (in memory).

Usage

From source file:gobblin.hadoop.token.TokenUtils.java

License:Open Source License

private static void getHdfsToken(Configuration conf, Credentials cred) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    LOG.info("Getting DFS token from " + fs.getUri());
    Token<?> fsToken = fs.getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
    if (fsToken == null) {
        LOG.error("Failed to fetch DFS token for ");
        throw new IOException("Failed to fetch DFS token.");
    }/*from w  ww  .j a v a2  s  .c  om*/
    LOG.info("Created DFS token: " + fsToken.toString());
    LOG.info("Token kind: " + fsToken.getKind());
    LOG.info("Token id: " + fsToken.getIdentifier());
    LOG.info("Token service: " + fsToken.getService());

    cred.addToken(fsToken.getService(), fsToken);
}

From source file:gobblin.hadoop.token.TokenUtils.java

License:Open Source License

private static void getJtToken(Credentials cred) throws IOException {
    try {//from  www  . jav  a 2  s .co m
        JobConf jobConf = new JobConf();
        JobClient jobClient = new JobClient(jobConf);
        LOG.info("Pre-fetching JT token from JobTracker");

        Token<DelegationTokenIdentifier> mrdt = jobClient
                .getDelegationToken(getMRTokenRenewerInternal(jobConf));
        if (mrdt == null) {
            LOG.error("Failed to fetch JT token");
            throw new IOException("Failed to fetch JT token.");
        }
        LOG.info("Created JT token: " + mrdt.toString());
        LOG.info("Token kind: " + mrdt.getKind());
        LOG.info("Token id: " + mrdt.getIdentifier());
        LOG.info("Token service: " + mrdt.getService());
        cred.addToken(mrdt.getService(), mrdt);
    } catch (InterruptedException ie) {
        throw new IOException(ie);
    }
}

From source file:gobblin.util.hadoop.TokenUtils.java

License:Apache License

private static void getJhToken(Configuration conf, Credentials cred) throws IOException {
    YarnRPC rpc = YarnRPC.create(conf);//w  ww  .j ava2 s. c  o m
    final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

    LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
    HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
            NetUtils.createSocketAddr(serviceAddr), conf);
    LOG.info("Pre-fetching JH token from job history server");

    Token<?> jhToken = null;
    try {
        jhToken = getDelegationTokenFromHS(hsProxy, conf);
    } catch (Exception exc) {
        throw new IOException("Failed to fetch JH token.", exc);
    }

    if (jhToken == null) {
        LOG.error("getDelegationTokenFromHS() returned null");
        throw new IOException("Unable to fetch JH token.");
    }

    LOG.info("Created JH token: " + jhToken.toString());
    LOG.info("Token kind: " + jhToken.getKind());
    LOG.info("Token id: " + Arrays.toString(jhToken.getIdentifier()));
    LOG.info("Token service: " + jhToken.getService());

    cred.addToken(jhToken.getService(), jhToken);
}

From source file:gobblin.util.hadoop.TokenUtils.java

License:Apache License

private static void getHdfsToken(Configuration conf, Credentials cred) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    LOG.info("Getting DFS token from " + fs.getUri());
    Token<?> fsToken = fs.getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
    if (fsToken == null) {
        LOG.error("Failed to fetch DFS token for ");
        throw new IOException("Failed to fetch DFS token.");
    }/*from ww  w  . ja v  a  2s.  c om*/
    LOG.info("Created DFS token: " + fsToken.toString());
    LOG.info("Token kind: " + fsToken.getKind());
    LOG.info("Token id: " + Arrays.toString(fsToken.getIdentifier()));
    LOG.info("Token service: " + fsToken.getService());

    cred.addToken(fsToken.getService(), fsToken);
}

From source file:gobblin.util.hadoop.TokenUtils.java

License:Apache License

private static void getJtToken(Credentials cred) throws IOException {
    try {// www. j ava 2s.  com
        JobConf jobConf = new JobConf();
        JobClient jobClient = new JobClient(jobConf);
        LOG.info("Pre-fetching JT token from JobTracker");

        Token<DelegationTokenIdentifier> mrdt = jobClient
                .getDelegationToken(getMRTokenRenewerInternal(jobConf));
        if (mrdt == null) {
            LOG.error("Failed to fetch JT token");
            throw new IOException("Failed to fetch JT token.");
        }
        LOG.info("Created JT token: " + mrdt.toString());
        LOG.info("Token kind: " + mrdt.getKind());
        LOG.info("Token id: " + Arrays.toString(mrdt.getIdentifier()));
        LOG.info("Token service: " + mrdt.getService());
        cred.addToken(mrdt.getService(), mrdt);
    } catch (InterruptedException ie) {
        throw new IOException(ie);
    }
}

From source file:gobblin.yarn.YarnHelixUtils.java

License:Apache License

/**
 * Write a {@link Token} to a given file.
 *
 * @param token the token to write// ww w . ja v a 2  s .  com
 * @param tokenFilePath the token file path
 * @param configuration a {@link Configuration} object carrying Hadoop configuration properties
 * @throws IOException
 */
public static void writeTokenToFile(Token<? extends TokenIdentifier> token, Path tokenFilePath,
        Configuration configuration) throws IOException {
    Credentials credentials = new Credentials();
    credentials.addToken(token.getService(), token);
    credentials.writeTokenStorageFile(tokenFilePath, configuration);
}

From source file:org.apache.gobblin.util.hadoop.TokenUtils.java

License:Apache License

/**
 *
 * @param userToProxy The user that hiveClient is impersonating as to fetch the delegation tokens.
 * @param ugi The {@link UserGroupInformation} that to be added with negotiated credentials.
 *///w w  w  .j ava 2  s  . co m
public static void getHiveToken(final State state, IMetaStoreClient hiveClient, Credentials cred,
        final String userToProxy, UserGroupInformation ugi) {
    try {
        // Fetch the delegation token with "service" field overwritten with the metastore.uri configuration.
        // org.apache.gobblin.hive.HiveMetaStoreClientFactory.getHiveConf(com.google.common.base.Optional<java.lang.String>)
        // sets the signature field to the same value to retrieve the token correctly.
        HiveConf hiveConf = new HiveConf();
        Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf,
                hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname), hiveClient);
        cred.addToken(hcatToken.getService(), hcatToken);
        ugi.addToken(hcatToken);

        // Fetch extra Hcat location user specified.
        final List<String> extraHcatLocations = state.contains(USER_DEFINED_HIVE_LOCATIONS)
                ? state.getPropAsList(USER_DEFINED_HIVE_LOCATIONS)
                : Collections.EMPTY_LIST;
        if (!extraHcatLocations.isEmpty()) {
            LOG.info("Need to fetch extra metaStore tokens from hive.");

            // start to process the user inputs.
            for (final String thriftUrl : extraHcatLocations) {
                LOG.info("Fetching metaStore token from : " + thriftUrl);

                hiveConf = new HiveConf();
                hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
                hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, hiveClient);
                cred.addToken(hcatToken.getService(), hcatToken);
                ugi.addToken(hcatToken);

                LOG.info("Successfully fetched token for:" + thriftUrl);
            }
        }
    } catch (final Throwable t) {
        final String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
        LOG.error(message, t);
        throw new RuntimeException(message);
    }
}

From source file:org.apache.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, HiveMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()),
                    tokenSignature);/* www  .  j  a  va 2  s  .  c  o  m*/

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hcatalog.templeton.SecureProxySupport.java

License:Apache License

private void writeProxyDelegationTokens(final Token<?> fsToken, final Token<?> msToken,
        final Configuration conf, String user, final Path tokenPath) throws IOException, InterruptedException {

    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final UserGroupInformation ugi = UgiFactory.getUgi(user);

    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        public Object run() throws IOException {
            Credentials cred = new Credentials();
            cred.addToken(fsToken.getService(), fsToken);
            cred.addToken(msToken.getService(), msToken);
            cred.writeTokenStorageFile(tokenPath, conf);
            return null;
        }//from  w  ww. ja va 2 s. c  o m
    });

}

From source file:org.apache.hive.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, IMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(
                    client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature);

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                ShimLoader.getHadoopShims().getHCatShim().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }/*from w ww  .ja  v a 2  s.  co  m*/
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}