Example usage for org.apache.hadoop.mapreduce.v2.jobhistory JHAdminConfig MR_HISTORY_ADDRESS

List of usage examples for org.apache.hadoop.mapreduce.v2.jobhistory JHAdminConfig MR_HISTORY_ADDRESS

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.v2.jobhistory JHAdminConfig MR_HISTORY_ADDRESS.

Prototype

String MR_HISTORY_ADDRESS

To view the source code for org.apache.hadoop.mapreduce.v2.jobhistory JHAdminConfig MR_HISTORY_ADDRESS.

Click Source Link

Document

host:port address for History Server API.

Usage

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens based on props for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {//  w w  w .j  ava 2s .c o m

            // first we fetch and save the default hcat token.
            logger.info("Pre-fetching default Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf, null, logger);

            cred.addToken(hcatToken.getService(), hcatToken);

            // check and see if user specified the extra hcat locations we need to
            // look at and fetch token.
            final List<String> extraHcatLocations = props.getStringList(EXTRA_HCAT_LOCATION);
            if (Collections.EMPTY_LIST != extraHcatLocations) {
                logger.info("Need to pre-fetch extra metaStore tokens from hive.");

                // start to process the user inputs.
                for (String thriftUrl : extraHcatLocations) {
                    logger.info("Pre-fetching metaStore token from : " + thriftUrl);

                    hiveConf = new HiveConf();
                    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
                    hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, logger);
                    cred.addToken(hcatToken.getService(), hcatToken);
                }

            }

        } catch (Throwable t) {
            String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
            logger.error(message, t);
            throw new HadoopSecurityManagerException(message);
        }
    }

    if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
        YarnRPC rpc = YarnRPC.create(conf);
        final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

        logger.debug("Connecting to HistoryServer at: " + serviceAddr);
        HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
                NetUtils.createSocketAddr(serviceAddr), conf);
        logger.info("Pre-fetching JH token from job history server");

        Token<?> jhsdt = null;
        try {
            jhsdt = getDelegationTokenFromHS(hsProxy);
        } catch (Exception e) {
            logger.error("Failed to fetch JH token", e);
            throw new HadoopSecurityManagerException("Failed to fetch JH token for " + userToProxy);
        }

        if (jhsdt == null) {
            logger.error("getDelegationTokenFromHS() returned null");
            throw new HadoopSecurityManagerException("Unable to fetch JH token for " + userToProxy);
        }

        logger.info("Created JH token: " + jhsdt.toString());
        logger.info("Token kind: " + jhsdt.getKind());
        logger.info("Token id: " + jhsdt.getIdentifier());
        logger.info("Token service: " + jhsdt.getService());

        cred.addToken(jhsdt.getService(), jhsdt);
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs
                            .getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());

                    cred.addToken(fsToken.getService(), fsToken);

                    // getting additional name nodes tokens
                    String otherNamenodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
                    if ((otherNamenodes != null) && (otherNamenodes.length() > 0)) {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + ": '" + otherNamenodes + "'");
                        String[] nameNodeArr = otherNamenodes.split(",");
                        Path[] ps = new Path[nameNodeArr.length];
                        for (int i = 0; i < ps.length; i++) {
                            ps[i] = new Path(nameNodeArr[i].trim());
                        }
                        TokenCache.obtainTokensForNamenodes(cred, ps, conf);
                        logger.info("Successfully fetched tokens for: " + otherNamenodes);
                    } else {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
                    }
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobConf jobConf = new JobConf();
                    JobClient jobClient = new JobClient(jobConf);
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient
                            .getDelegationToken(getMRTokenRenewerInternal(jobConf));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }

            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                try {
                    dos.close();
                } catch (Throwable t) {
                    // best effort
                    logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
                }
            }
            if (fos != null) {
                fos.close();
            }
        }
        // stash them to cancel after use.

        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause(), e);
    } catch (Throwable t) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause(), t);
    }

}

From source file:com.github.sakserv.minicluster.impl.MRLocalCluster.java

License:Apache License

@Override
public void configure() throws Exception {

    // Handle Windows
    WindowsLibsUtils.setHadoopHome();/*from   w w  w  . ja  va2 s  .co  m*/

    configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress);
    configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname);
    configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress);
    configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress);
    configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress);
    configuration.set(JHAdminConfig.MR_HISTORY_ADDRESS, jobHistoryAddress);
    configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
    configuration.set(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, "true");
    if (getUseInJvmContainerExecutor()) {
        configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass);
        configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
        configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
    }

    if (null != hdfsDefaultFs) {
        configuration.set("fs.defaultFS", hdfsDefaultFs);
        configuration.set("dfs.replication", "1");
    }
}

From source file:com.google.mr4c.hadoop.yarn.YarnTestBinding.java

License:Open Source License

private void startMrCluster() throws IOException {
    Configuration conf = new JobConf();
    FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri());
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    String addr = MiniYARNCluster.getHostname() + ":0";
    conf.set(YarnConfiguration.RM_ADDRESS, addr);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr);
    m_mrCluster = MiniMRClientClusterFactory.create(HadoopTestUtils.class, "MR4CTests", 1, // num node managers
            conf);//from  ww  w  .  j a v a 2  s  . c  o  m

    // make sure startup is finished
    for (int i = 0; i < 60; i++) {
        String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
        if (newAddr.equals(addr)) {
            s_log.warn("MiniYARNCluster startup not complete");
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
                throw new IOException(ie);
            }
        } else {
            s_log.info("MiniYARNCluster now available at {}", newAddr);
            return;
        }
    }
    throw new IOException("MiniYARNCluster taking too long to startup");

}

From source file:com.twitter.hraven.hadoopJobMonitor.rpc.ClientCache.java

License:Apache License

protected MRClientProtocol instantiateHistoryProxy() throws IOException {
    final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
    if (StringUtils.isEmpty(serviceAddr)) {
        return null;
    }/*ww w . j a  v a2  s  .  c  om*/
    LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
    final YarnRPC rpc = YarnRPC.create(conf);
    LOG.debug("Connected to HistoryServer at: " + serviceAddr);
    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
        @Override
        public MRClientProtocol run() {
            return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
                    NetUtils.createSocketAddr(serviceAddr), conf);
        }
    });
}

From source file:gobblin.hadoop.token.TokenUtils.java

License:Open Source License

private static void getJhToken(Configuration conf, Credentials cred) throws IOException {
    YarnRPC rpc = YarnRPC.create(conf);//w  w w  .j  a v a2s.  c o  m
    final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

    LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
    HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
            NetUtils.createSocketAddr(serviceAddr), conf);
    LOG.info("Pre-fetching JH token from job history server");

    Token<?> jhToken = null;
    try {
        jhToken = getDelegationTokenFromHS(hsProxy, conf);
    } catch (Exception exc) {
        throw new IOException("Failed to fetch JH token.", exc);
    }

    if (jhToken == null) {
        LOG.error("getDelegationTokenFromHS() returned null");
        throw new IOException("Unable to fetch JH token.");
    }

    LOG.info("Created JH token: " + jhToken.toString());
    LOG.info("Token kind: " + jhToken.getKind());
    LOG.info("Token id: " + jhToken.getIdentifier());
    LOG.info("Token service: " + jhToken.getService());

    cred.addToken(jhToken.getService(), jhToken);
}

From source file:gobblin.util.hadoop.TokenUtils.java

License:Apache License

private static void getJhToken(Configuration conf, Credentials cred) throws IOException {
    YarnRPC rpc = YarnRPC.create(conf);//  www  .j a v a 2s .  co m
    final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

    LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
    HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
            NetUtils.createSocketAddr(serviceAddr), conf);
    LOG.info("Pre-fetching JH token from job history server");

    Token<?> jhToken = null;
    try {
        jhToken = getDelegationTokenFromHS(hsProxy, conf);
    } catch (Exception exc) {
        throw new IOException("Failed to fetch JH token.", exc);
    }

    if (jhToken == null) {
        LOG.error("getDelegationTokenFromHS() returned null");
        throw new IOException("Unable to fetch JH token.");
    }

    LOG.info("Created JH token: " + jhToken.toString());
    LOG.info("Token kind: " + jhToken.getKind());
    LOG.info("Token id: " + Arrays.toString(jhToken.getIdentifier()));
    LOG.info("Token service: " + jhToken.getService());

    cred.addToken(jhToken.getService(), jhToken);
}

From source file:org.apache.oozie.action.hadoop.JHSCredentials.java

License:Apache License

/**
 * Create an MRClientProtocol to the JHS
 * Copied over from ClientCache in Hadoop.
 * @return the protocol that can be used to get a token with
 * @throws IOException//ww  w.j  ava  2 s  .  c o  m
 */
private MRClientProtocol instantiateHistoryProxy(final Configuration configuration,
        final ActionExecutor.Context context) throws IOException {
    final String serviceAddr = configuration.get(JHAdminConfig.MR_HISTORY_ADDRESS);
    if (StringUtils.isEmpty(serviceAddr)) {
        return null;
    }
    LOG.debug("Connecting to JHS at: " + serviceAddr);
    final YarnRPC rpc = YarnRPC.create(configuration);
    LOG.debug("Connected to JHS at: " + serviceAddr);
    UserGroupInformation currentUser = Services.get().get(UserGroupInformationService.class)
            .getProxyUser(context.getWorkflow().getUser());
    return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
        @Override
        public MRClientProtocol run() {
            return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
                    NetUtils.createSocketAddr(serviceAddr), configuration);
        }
    });
}