Example usage for org.apache.hadoop.security.token Token getService

List of usage examples for org.apache.hadoop.security.token Token getService

Introduction

In this page you can find the example usage for org.apache.hadoop.security.token Token getService.

Prototype

public Text getService() 

Source Link

Document

Get the service on which the token is supposed to be used.

Usage

From source file:UnmanagedAMLauncher.java

License:Apache License

public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    // Service will be empty but that's okay, we are just passing down only
    // AMRMToken down to the real AM which eventually sets the correct
    // service-address.
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {/*from  w  ww  .  j  a va  2  s  . c om*/
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true));
    credentials.writeTokenStorageToStream(os);
    os.close();

    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }

    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newContainerId(attemptId, 0);

    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());

    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());

    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));

    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream()));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream()));

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }

    // wait for the process to finish and check the exit code
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }

    try {
        // make sure that the error thread exits
        // on Windows these threads sometimes get stuck and hang the execution
        // timeout and join later after destroying the process.
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}

From source file:alluxio.yarn.Client.java

License:Apache License

private void setupContainerLaunchContext() throws IOException, YarnException {
    Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers",
            Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath);

    final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER,
            applicationMasterArgs);/*w w w.j a v a2  s .  c o  m*/

    System.out.println("ApplicationMaster command: " + amCommand);
    mAmContainer.setCommands(Collections.singletonList(amCommand));

    // Setup local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("alluxio.tar.gz",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz"));
    localResources.put("alluxio-yarn-setup.sh",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh"));
    localResources.put("alluxio.jar",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar"));
    mAmContainer.setLocalResources(localResources);

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    mAmContainer.setEnvironment(appMasterEnv);

    // Set up security tokens for launching our ApplicationMaster container.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf);
        // getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        // getting yarn resource manager token
        org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig();
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
                mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)),
                ClientRMProxy.getRMDelegationTokenService(config));
        LOG.info("Added RM delegation token: " + token);
        credentials.addToken(token.getService(), token);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        mAmContainer.setTokens(buffer);
    }
}

From source file:azkaban.security.commons.SecurityUtils.java

License:Apache License

public static synchronized void prefetchToken(final File tokenFile, final Props p, final Logger logger)
        throws InterruptedException, IOException {

    final Configuration conf = new Configuration();
    logger.info("Getting proxy user for " + p.getString(TO_PROXY));
    logger.info("Getting proxy user for " + p.toString());

    getProxiedUser(p.toProperties(), logger, conf).doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from  w w  w  . j a  v  a2s.  com
        public Void run() throws Exception {
            getToken(p);
            return null;
        }

        private void getToken(Props p) throws InterruptedException, IOException {
            String shouldPrefetch = p.getString(OBTAIN_BINARY_TOKEN);
            if (shouldPrefetch != null && shouldPrefetch.equals("true")) {
                logger.info("Pre-fetching token");

                logger.info("Pre-fetching fs token");
                FileSystem fs = FileSystem.get(conf);
                Token<?> fsToken = fs.getDelegationToken(p.getString("user.to.proxy"));
                logger.info("Created token: " + fsToken.toString());

                Job job = new Job(conf, "totally phony, extremely fake, not real job");
                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching job token: Got new JobClient: " + jc);
                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("hi"));
                logger.info("Created token: " + mrdt.toString());

                job.getCredentials().addToken(new Text("howdy"), mrdt);
                job.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    job.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                logger.info("Loading hadoop tokens into " + tokenFile.getAbsolutePath());
                p.put("HadoopTokenFileLoc", tokenFile.getAbsolutePath());
            } else {
                logger.info("Not pre-fetching token");
            }
        }
    });
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {/*from  w  ww  .j a v  a 2  s.com*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken/*from   ww w.  j  a v  a 2 s .  c o  m*/
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());
            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                cancelNameNodeToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {//from   w ww  . j  a  v a2 s. c om
            logger.info("Pre-fetching Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            logger.info("HiveConf.ConfVars.METASTOREURIS.varname "
                    + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
            logger.info("HIVE_METASTORE_SASL_ENABLED " + hiveConf.get(HIVE_METASTORE_SASL_ENABLED));
            logger.info("HIVE_METASTORE_KERBEROS_PRINCIPAL " + hiveConf.get(HIVE_METASTORE_KERBEROS_PRINCIPAL));
            logger.info("HIVE_METASTORE_LOCAL " + hiveConf.get(HIVE_METASTORE_LOCAL));

            HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
            String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
                    UserGroupInformation.getLoginUser().getShortUserName());
            Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
            hcatToken.decodeFromUrlString(hcatTokenStr);
            logger.info("Created hive metastore token: " + hcatTokenStr);
            logger.info("Token kind: " + hcatToken.getKind());
            logger.info("Token id: " + hcatToken.getIdentifier());
            logger.info("Token service: " + hcatToken.getService());
            cred.addToken(hcatToken.getService(), hcatToken);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Failed to get hive metastore token." + e.getMessage() + e.getCause());
        } catch (Throwable t) {
            t.printStackTrace();
            logger.error("Failed to get hive metastore token." + t.getMessage() + t.getCause());
        }
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs.getDelegationToken(userToProxy);
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());
                    cred.addToken(fsToken.getService(), fsToken);
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobClient jobClient = new JobClient(new JobConf());
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }
            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                dos.close();
            }
            if (fos != null) {
                fos.close();
            }
        }

        // stash them to cancel after use.
        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());
    } catch (Throwable t) {
        t.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {/*from ww  w . ja  v a2 s .c  o m*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        try {
                            dos.close();
                        } catch (Throwable t) {
                            // best effort
                            logger.error(
                                    "encountered exception while closing DataOutputStream of the tokenFile", t);
                        }
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private void cancelJhsToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    // it appears yarn would clean up this token after app finish, after a long
    // while though.
    org.apache.hadoop.yarn.api.records.Token token = org.apache.hadoop.yarn.api.records.Token
            .newInstance(t.getIdentifier(), t.getKind().toString(), t.getPassword(), t.getService().toString());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress jhsAddress = SecurityUtil.getTokenServiceAddr(t);
    MRClientProtocol jhsProxy = null;/*from  w ww.j av a 2 s .c o m*/
    try {
        jhsProxy = UserGroupInformation.getCurrentUser().doAs(new PrivilegedAction<MRClientProtocol>() {
            @Override
            public MRClientProtocol run() {
                return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, jhsAddress, conf);
            }
        });
        CancelDelegationTokenRequest request = Records.newRecord(CancelDelegationTokenRequest.class);
        request.setDelegationToken(token);
        jhsProxy.cancelDelegationToken(request);
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e);
    } finally {
        RPC.stopProxy(jhsProxy);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken/*  www.  j a  va2 s.  c o m*/
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {

            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());

            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                // cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                // cancelNameNodeToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
                logger.info("Cancelling jobhistoryserver mr token " + new String(t.getIdentifier()));
                // cancelJhsToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel tokens " + e.getMessage() + e.getCause(), e);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

/**
 * function to fetch hcat token as per the specified hive configuration and
 * then store the token in to the credential store specified .
 *
 * @param userToProxy String value indicating the name of the user the token
 *          will be fetched for./*from w  w w . ja v a  2  s .c om*/
 * @param hiveConf the configuration based off which the hive client will be
 *          initialized.
 * @param logger the logger instance which writes the logging content to the
 *          job logs.
 *
 * @throws IOException
 * @throws TException
 * @throws MetaException
 *
 * */
private Token<DelegationTokenIdentifier> fetchHcatToken(String userToProxy, HiveConf hiveConf,
        String tokenSignatureOverwrite, final Logger logger) throws IOException, MetaException, TException {

    logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));

    logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));

    logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));

    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
    String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
            UserGroupInformation.getLoginUser().getShortUserName());
    Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
    hcatToken.decodeFromUrlString(hcatTokenStr);

    // overwrite the value of the service property of the token if the signature
    // override is specified.
    if (tokenSignatureOverwrite != null && tokenSignatureOverwrite.trim().length() > 0) {
        hcatToken.setService(new Text(tokenSignatureOverwrite.trim().toLowerCase()));

        logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":"
                + (tokenSignatureOverwrite == null ? "" : tokenSignatureOverwrite));
    }

    logger.info("Created hive metastore token: " + hcatTokenStr);
    logger.info("Token kind: " + hcatToken.getKind());
    logger.info("Token id: " + hcatToken.getIdentifier());
    logger.info("Token service: " + hcatToken.getService());
    return hcatToken;
}