Example usage for org.apache.hadoop.yarn.ipc YarnRPC create

List of usage examples for org.apache.hadoop.yarn.ipc YarnRPC create

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.ipc YarnRPC create.

Prototype

public static YarnRPC create(Configuration conf) 

Source Link

Usage

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private void cancelJhsToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    // it appears yarn would clean up this token after app finish, after a long
    // while though.
    org.apache.hadoop.yarn.api.records.Token token = org.apache.hadoop.yarn.api.records.Token
            .newInstance(t.getIdentifier(), t.getKind().toString(), t.getPassword(), t.getService().toString());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress jhsAddress = SecurityUtil.getTokenServiceAddr(t);
    MRClientProtocol jhsProxy = null;/*from  ww w. jav  a2s .  c om*/
    try {
        jhsProxy = UserGroupInformation.getCurrentUser().doAs(new PrivilegedAction<MRClientProtocol>() {
            @Override
            public MRClientProtocol run() {
                return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, jhsAddress, conf);
            }
        });
        CancelDelegationTokenRequest request = Records.newRecord(CancelDelegationTokenRequest.class);
        request.setDelegationToken(token);
        jhsProxy.cancelDelegationToken(request);
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e);
    } finally {
        RPC.stopProxy(jhsProxy);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens based on props for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {//w w w  .  j a  va 2 s. co m

            // first we fetch and save the default hcat token.
            logger.info("Pre-fetching default Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf, null, logger);

            cred.addToken(hcatToken.getService(), hcatToken);

            // check and see if user specified the extra hcat locations we need to
            // look at and fetch token.
            final List<String> extraHcatLocations = props.getStringList(EXTRA_HCAT_LOCATION);
            if (Collections.EMPTY_LIST != extraHcatLocations) {
                logger.info("Need to pre-fetch extra metaStore tokens from hive.");

                // start to process the user inputs.
                for (String thriftUrl : extraHcatLocations) {
                    logger.info("Pre-fetching metaStore token from : " + thriftUrl);

                    hiveConf = new HiveConf();
                    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
                    hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, logger);
                    cred.addToken(hcatToken.getService(), hcatToken);
                }

            }

        } catch (Throwable t) {
            String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
            logger.error(message, t);
            throw new HadoopSecurityManagerException(message);
        }
    }

    if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
        YarnRPC rpc = YarnRPC.create(conf);
        final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

        logger.debug("Connecting to HistoryServer at: " + serviceAddr);
        HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
                NetUtils.createSocketAddr(serviceAddr), conf);
        logger.info("Pre-fetching JH token from job history server");

        Token<?> jhsdt = null;
        try {
            jhsdt = getDelegationTokenFromHS(hsProxy);
        } catch (Exception e) {
            logger.error("Failed to fetch JH token", e);
            throw new HadoopSecurityManagerException("Failed to fetch JH token for " + userToProxy);
        }

        if (jhsdt == null) {
            logger.error("getDelegationTokenFromHS() returned null");
            throw new HadoopSecurityManagerException("Unable to fetch JH token for " + userToProxy);
        }

        logger.info("Created JH token: " + jhsdt.toString());
        logger.info("Token kind: " + jhsdt.getKind());
        logger.info("Token id: " + jhsdt.getIdentifier());
        logger.info("Token service: " + jhsdt.getService());

        cred.addToken(jhsdt.getService(), jhsdt);
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs
                            .getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());

                    cred.addToken(fsToken.getService(), fsToken);

                    // getting additional name nodes tokens
                    String otherNamenodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
                    if ((otherNamenodes != null) && (otherNamenodes.length() > 0)) {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + ": '" + otherNamenodes + "'");
                        String[] nameNodeArr = otherNamenodes.split(",");
                        Path[] ps = new Path[nameNodeArr.length];
                        for (int i = 0; i < ps.length; i++) {
                            ps[i] = new Path(nameNodeArr[i].trim());
                        }
                        TokenCache.obtainTokensForNamenodes(cred, ps, conf);
                        logger.info("Successfully fetched tokens for: " + otherNamenodes);
                    } else {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
                    }
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobConf jobConf = new JobConf();
                    JobClient jobClient = new JobClient(jobConf);
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient
                            .getDelegationToken(getMRTokenRenewerInternal(jobConf));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }

            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                try {
                    dos.close();
                } catch (Throwable t) {
                    // best effort
                    logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
                }
            }
            if (fos != null) {
                fos.close();
            }
        }
        // stash them to cancel after use.

        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause(), e);
    } catch (Throwable t) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause(), t);
    }

}

From source file:com.cloudera.kitten.appmaster.service.ContainerManagerConnectionFactoryImpl.java

License:Open Source License

public ContainerManagerConnectionFactoryImpl(Configuration conf) {
    this.conf = conf;
    this.rpc = YarnRPC.create(conf);
    this.containerManagers = Maps.newHashMap();
}

From source file:com.cloudera.kitten.appmaster.service.ResourceManagerConnectionFactory.java

License:Open Source License

public ResourceManagerConnectionFactory(Configuration conf) {
    this.conf = conf;
    this.rpc = YarnRPC.create(conf);
}

From source file:com.cloudera.kitten.client.service.ApplicationsManagerConnectionFactory.java

License:Open Source License

public ApplicationsManagerConnectionFactory(Configuration conf) {
    this.conf = Preconditions.checkNotNull(conf);
    this.rpc = YarnRPC.create(conf);
}

From source file:com.continuuity.weave.internal.appmaster.ApplicationMasterService.java

License:Open Source License

private void doStart() throws Exception {
    LOG.info("Start application master with spec: " + WeaveSpecificationAdapter.create().toJson(weaveSpec));

    instanceChangeExecutor = Executors
            .newSingleThreadExecutor(Threads.createDaemonThreadFactory("instanceChanger"));
    yarnRPC = YarnRPC.create(yarnConf);

    amrmClient.init(yarnConf);/* w  w w  .  j  av  a2s .  c  o m*/
    amrmClient.start();
    // TODO: Have RPC host and port
    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster("", 0, null);
    maxCapability = response.getMaximumResourceCapability();
    minCapability = response.getMinimumResourceCapability();

    LOG.info("Maximum resource capability: " + maxCapability);
    LOG.info("Minimum resource capability: " + minCapability);

    // Creates ZK path for runnable and kafka logging service
    Futures.allAsList(
            ImmutableList.of(zkClient.create("/" + runId.getId() + "/runnables", null, CreateMode.PERSISTENT),
                    zkClient.create("/" + runId.getId() + "/kafka", null, CreateMode.PERSISTENT)))
            .get();

    // Starts kafka server
    LOG.info("Starting kafka server");
    kafkaServer = new EmbeddedKafkaServer(new File("kafka.tgz"), generateKafkaConfig());
    kafkaServer.startAndWait();
    LOG.info("Kafka server started");

    runnableContainerRequests = initContainerRequests();
}

From source file:com.continuuity.weave.internal.yarn.Hadoop20YarnAMClient.java

License:Apache License

public Hadoop20YarnAMClient(Configuration conf) {
    String masterContainerId = System.getenv().get(ApplicationConstants.AM_CONTAINER_ID_ENV);
    Preconditions.checkArgument(masterContainerId != null, "Missing %s from environment",
            ApplicationConstants.AM_CONTAINER_ID_ENV);
    this.containerId = ConverterUtils.toContainerId(masterContainerId);
    this.containerRequests = ArrayListMultimap.create();

    this.amrmClient = new AMRMClientImpl(containerId.getApplicationAttemptId());
    this.amrmClient.init(conf);
    this.nmClient = new Hadoop20YarnNMClient(YarnRPC.create(conf), conf);
}

From source file:com.continuuity.weave.internal.yarn.ports.AMRMClientImpl.java

License:Apache License

@Override
public synchronized void start() {
    final YarnConfiguration conf = new YarnConfiguration(getConfig());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,
            YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);

    UserGroupInformation currentUser;//ww  w  .j  a  v a 2s .  c  om
    try {
        currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new YarnException(e);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        String tokenURLEncodedStr = System.getenv().get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
        Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();

        try {
            token.decodeFromUrlString(tokenURLEncodedStr);
        } catch (IOException e) {
            throw new YarnException(e);
        }

        SecurityUtil.setTokenService(token, rmAddress);
        if (LOG.isDebugEnabled()) {
            LOG.debug("AppMasterToken is " + token);
        }
        currentUser.addToken(token);
    }

    rmClient = currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
        @Override
        public AMRMProtocol run() {
            return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf);
        }
    });
    LOG.debug("Connecting to ResourceManager at " + rmAddress);
    super.start();
}

From source file:com.continuuity.weave.yarn.ApplicationMasterService.java

License:Open Source License

private void doStart() throws Exception {
    LOG.info("Start application master with spec: " + WeaveSpecificationAdapter.create().toJson(weaveSpec));

    yarnRPC = YarnRPC.create(yarnConf);

    amrmClient.init(yarnConf);/*from   w  w w.  jav  a 2  s. c  o m*/
    amrmClient.start();
    // TODO: Have RPC host and port
    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster("", 0, null);
    maxCapability = response.getMaximumResourceCapability();
    minCapability = response.getMinimumResourceCapability();

    LOG.info("Maximum resource capability: " + maxCapability);
    LOG.info("Minimum resource capability: " + minCapability);

    // Creates ZK path for runnable and kafka logging service
    Futures.allAsList(
            ImmutableList.of(zkClientService.create("/" + runId + "/runnables", null, CreateMode.PERSISTENT),
                    zkClientService.create("/" + runId + "/kafka", null, CreateMode.PERSISTENT)))
            .get();
}

From source file:com.github.hdl.tensorflow.yarn.app.TFApplicationRpcServer.java

License:Apache License

@Override
public void run() {
    Configuration conf = new Configuration();
    YarnRPC rpc = YarnRPC.create(conf);
    InetSocketAddress address = new InetSocketAddress(rpcAddress, rpcPort);
    this.server = rpc.getServer(TensorFlowCluster.class, this, address, conf, null,
            conf.getInt(YarnConfiguration.RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT,
                    YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT));

    this.server.start();
}