Example usage for org.apache.hadoop.yarn.client.api YarnClient init

List of usage examples for org.apache.hadoop.yarn.client.api YarnClient init

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.client.api YarnClient init.

Prototype

@Override
public void init(Configuration conf) 

Source Link

Document

This invokes #serviceInit

Usage

From source file:azkaban.jobtype.HadoopJobUtils.java

License:Apache License

/**
 * <pre>//w  w w .  j  a  va 2 s  .c  o  m
 * Uses YarnClient to kill the job on HDFS.
 * Using JobClient only works partially:
 *   If yarn container has started but spark job haven't, it will kill
 *   If spark job has started, the cancel will hang until the spark job is complete
 *   If the spark job is complete, it will return immediately, with a job not found on job tracker
 * </pre>
 * 
 * @param applicationId
 * @throws IOException
 * @throws YarnException
 */
public static void killJobOnCluster(String applicationId, Logger log) throws YarnException, IOException {

    YarnConfiguration yarnConf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConf);
    yarnClient.start();

    String[] split = applicationId.split("_");
    ApplicationId aid = ApplicationId.newInstance(Long.parseLong(split[1]), Integer.parseInt(split[2]));

    log.info("start klling application: " + aid);
    yarnClient.killApplication(aid);
    log.info("successfully killed application: " + aid);
}

From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java

License:Apache License

private boolean getAvaliableNodes() {
    List<NodeReport> clusterNodeReports;
    try {/* w  w w  .  jav  a  2 s  .  co m*/
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(conf);
        yarnClient.start();

        clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        for (NodeReport node : clusterNodeReports) {
            LOG.info("node infos:" + node.getHttpAddress());
        }

        avaliableNodeList = new ArrayList<NodeReport>();
        if (numNodes <= clusterNodeReports.size()) {
            for (NodeReport node : clusterNodeReports) {
                if (node.getCapability().getMemory() >= containerMemory
                        && node.getCapability().getVirtualCores() >= containerVirtualCores) {
                    avaliableNodeList.add(node);
                }
            }
            if (avaliableNodeList.size() >= numNodes)
                numTotalContainers = numNodes;
            else {
                LOG.error("Resource isn't enough");
                return false;
            }
        } else {
            LOG.error("cluster nodes isn't enough");
            return false;
        }
    } catch (Exception e) {
        LOG.error(e.getMessage());
        LOG.error(e.getStackTrace());
        return false;
    }
    return true;
}

From source file:co.cask.cdap.common.security.YarnTokenUtils.java

License:Apache License

/**
 * Gets a Yarn delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *//*  ww w .ja  va2s.  c o m*/
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    try {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(configuration);
        yarnClient.start();

        try {
            Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
            org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient
                    .getRMDelegationToken(renewer);

            // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
            // CDAP-4825 is resolved
            List<String> services = new ArrayList<>();
            if (HAUtil.isHAEnabled(configuration)) {
                // If HA is enabled, we need to enumerate all RM hosts
                // and add the corresponding service name to the token service
                // Copy the yarn conf since we need to modify it to get the RM addresses
                YarnConfiguration yarnConf = new YarnConfiguration(configuration);
                for (String rmId : HAUtil.getRMHAIds(configuration)) {
                    yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                    InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                    services.add(SecurityUtil.buildTokenService(address).toString());
                }
            } else {
                services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
            }

            Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                    (InetSocketAddress) null);
            token.setService(new Text(Joiner.on(',').join(services)));
            credentials.addToken(new Text(token.getService()), token);

            // OK to log, it won't log the credential, only information about the token.
            LOG.info("Added RM delegation token: {}", token);

        } finally {
            yarnClient.stop();
        }

        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for Yarn.", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.master.startup.YarnCheck.java

License:Apache License

@Override
public void run() {
    int yarnConnectTimeout = cConf.getInt(Constants.Startup.YARN_CONNECT_TIMEOUT_SECONDS, 60);
    LOG.info("Checking YARN availability -- may take up to {} seconds.", yarnConnectTimeout);

    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(hConf);

    List<NodeReport> nodeReports;
    // if yarn is not up, yarnClient.start() will hang.
    ExecutorService executorService = Executors
            .newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("startup-checker").build());
    try {/*w  ww  . j  a v a2  s  .  c om*/
        Future<List<NodeReport>> result = executorService.submit(new Callable<List<NodeReport>>() {
            @Override
            public List<NodeReport> call() throws Exception {
                yarnClient.start();
                return yarnClient.getNodeReports();
            }
        });
        nodeReports = result.get(yarnConnectTimeout, TimeUnit.SECONDS);
        LOG.info("  YARN availability successfully verified.");
    } catch (Exception e) {
        throw new RuntimeException("Unable to get status of YARN nodemanagers. "
                + "Please check that YARN is running "
                + "and that the correct Hadoop configuration (core-site.xml, yarn-site.xml) and libraries "
                + "are included in the CDAP master classpath.", e);
    } finally {
        try {
            yarnClient.stop();
        } catch (Exception e) {
            LOG.warn("Error stopping yarn client.", e);
        } finally {
            executorService.shutdown();
        }
    }

    checkResources(nodeReports);
}

From source file:co.cask.cdap.operations.yarn.AbstractYarnStats.java

License:Apache License

protected YarnClient createYARNClient() {
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();//from  www .j a  v a2s . c o m
    return yarnClient;
}

From source file:com.cloudera.kitten.client.service.YarnClientFactory.java

License:Open Source License

@Override
public YarnClient connect() {
    YarnClient client = YarnClient.createYarnClient();
    client.init(conf);
    client.start();
    return client;
}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Map<String, String> getDataNodeNodeManagerMapping(Configuration conf) throws Exception {
    Map<String, String> map = new HashMap<String, String>();
    DFSClient dfsClient = new DFSClient(new URI(conf.get("fs.defaultFS")), conf);
    DatanodeInfo[] DNs = dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.ALL);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();/*from  w  w w .  j  ava2  s.  com*/
    List<NodeId> nodeIds = getYarnNodeIds(conf);
    if (nodeIds.size() != DNs.length) {
        throw new RuntimeException("Number of DNs and NMs differ, MiniLlama "
                + "node mapping requires them to be equal at startup");
    }
    LOG.info("HDFS/YARN mapping:");
    for (int i = 0; i < DNs.length; i++) {
        String key = DNs[i].getXferAddr();
        NodeId nodeId = nodeIds.get(i);
        String value = nodeId.getHost() + ":" + nodeId.getPort();
        map.put(key, value);
        LOG.info("  DN/NM: " + key + "/" + value);
    }
    yarnClient.stop();
    nodes = map.size();
    verifySingleHost(map.keySet(), "DataNode");
    verifySingleHost(map.values(), "NodeManager");
    return map;
}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private List<NodeId> getYarnNodeIds(Configuration conf) throws Exception {
    List<NodeId> list = new ArrayList<NodeId>();
    if (miniYarn != null) {
        int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1);
        for (int i = 0; i < clusterNodes; i++) {
            list.add(miniYarn.getNodeManager(i).getNMContext().getNodeId());
        }//from   w  w w.ja v a2 s .c  o m
    } else {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(conf);
        yarnClient.start();
        List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);
        for (int i = 0; i < nodes.size(); i++) {
            list.add(nodes.get(i).getNodeId());
        }
        yarnClient.stop();
    }
    return list;
}

From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java

License:Apache License

/**
 * Test to verify Llama deletes old reservations on startup.
 */// w  w  w .j  a  v a2 s  . co m
@Test(timeout = 60000)
public void testLlamaDeletesOldReservationsOnStartup() throws Exception {
    YarnClient client = null;
    LlamaAM llamaAM1 = null, llamaAM2 = null, llamaAM3 = null;
    EnumSet<YarnApplicationState> running = EnumSet.of(YarnApplicationState.RUNNING);
    try {
        startYarn(createMiniYarnConfig(false));

        client = YarnClient.createYarnClient();
        client.init(miniYarn.getConfig());
        client.start();
        Assert.assertEquals("Non-zero YARN apps even before any reservations", 0,
                client.getApplications().size());

        llamaAM1 = LlamaAM.create(getLlamaConfiguration());
        llamaAM1.start();
        Assert.assertEquals("Mismatch between #YARN apps and #Queues", 2,
                client.getApplications(running).size());

        // Start another Llama of the same cluster-id to see if old YARN apps
        // are deleted.
        llamaAM2 = LlamaAM.create(getLlamaConfiguration());
        llamaAM2.start();
        Assert.assertEquals(
                "Mismatch between #YARN apps and #Queues. Only apps"
                        + " from the latest started Llama should be running.",
                2, client.getApplications(running).size());

        // Start Llama of different cluster-id to see old YARN apps are not
        // deleted.
        Configuration confWithDifferentCluserId = getLlamaConfiguration();
        confWithDifferentCluserId.set(LlamaAM.CLUSTER_ID, "new-cluster");
        llamaAM3 = LlamaAM.create(confWithDifferentCluserId);
        llamaAM3.start();
        Assert.assertEquals("Mismatch between #YARN apps and #Queues for " + "multiple clusters", 4,
                client.getApplications(running).size());

    } finally {
        client.stop();
        llamaAM1.stop();
        llamaAM2.stop();
        llamaAM3.stop();
        stopYarn();
    }
}

From source file:com.datatorrent.stram.client.StramAgent.java

License:Apache License

private StramWebServicesInfo retrieveWebServicesInfo(String appId) {
    YarnClient yarnClient = YarnClient.createYarnClient();
    String url;// w  ww.j  a  v  a 2 s. co m
    try {
        yarnClient.init(conf);
        yarnClient.start();
        ApplicationReport ar = yarnClient.getApplicationReport(ConverterUtils.toApplicationId(appId));
        String trackingUrl = ar.getTrackingUrl();
        if (!trackingUrl.startsWith("http://") && !trackingUrl.startsWith("https://")) {
            url = "http://" + trackingUrl;
        } else {
            url = trackingUrl;
        }
        if (StringUtils.isBlank(url)) {
            LOG.error("Cannot get tracking url from YARN");
            return null;
        }
        if (url.endsWith("/")) {
            url = url.substring(0, url.length() - 1);
        }
        url += WebServices.PATH;
    } catch (Exception ex) {
        //LOG.error("Caught exception when retrieving web services info", ex);
        return null;
    } finally {
        yarnClient.stop();
    }

    WebServicesClient webServicesClient = new WebServicesClient();
    try {
        JSONObject response;
        String secToken = null;
        ClientResponse clientResponse;
        int i = 0;
        while (true) {
            LOG.debug("Accessing url {}", url);
            clientResponse = webServicesClient.process(url, ClientResponse.class,
                    new WebServicesClient.GetWebServicesHandler<ClientResponse>());
            String val = clientResponse.getHeaders().getFirst("Refresh");
            if (val == null) {
                break;
            }
            int index = val.indexOf("url=");
            if (index < 0) {
                break;
            }
            url = val.substring(index + 4);
            if (i++ > MAX_REDIRECTS) {
                LOG.error("Cannot get web service info -- exceeded the max number of redirects");
                return null;
            }
        }

        if (!UserGroupInformation.isSecurityEnabled()) {
            response = new JSONObject(clientResponse.getEntity(String.class));
        } else {
            if (UserGroupInformation.isSecurityEnabled()) {
                for (NewCookie nc : clientResponse.getCookies()) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Cookie " + nc.getName() + " " + nc.getValue());
                    }
                    if (nc.getName().equals(StramWSFilter.CLIENT_COOKIE)) {
                        secToken = nc.getValue();
                    }
                }
            }
            response = new JSONObject(clientResponse.getEntity(String.class));
        }
        String version = response.getString("version");
        response = webServicesClient.process(url + "/" + version + "/stram/info", JSONObject.class,
                new WebServicesClient.GetWebServicesHandler<JSONObject>());
        String appMasterUrl = response.getString("appMasterTrackingUrl");
        String appPath = response.getString("appPath");
        String user = response.getString("user");
        JSONObject permissionsInfo = null;
        FSDataInputStream is = null;
        try {
            is = fileSystem.open(new Path(appPath, "permissions.json"));
            permissionsInfo = new JSONObject(IOUtils.toString(is));
        } catch (JSONException ex) {
            LOG.error("Error reading from the permissions info. Ignoring", ex);
        } catch (IOException ex) {
            // ignore
        } finally {
            IOUtils.closeQuietly(is);
        }
        return new StramWebServicesInfo(appMasterUrl, version, appPath, user, secToken, permissionsInfo);
    } catch (Exception ex) {
        LOG.debug("Caught exception when retrieving web service info for app " + appId, ex);
        return null;
    }
}