Example usage for org.apache.hadoop.yarn.util ConverterUtils toApplicationId

List of usage examples for org.apache.hadoop.yarn.util ConverterUtils toApplicationId

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.util ConverterUtils toApplicationId.

Prototype

@Public
    @Deprecated
    public static ApplicationId toApplicationId(String appIdStr) 

Source Link

Usage

From source file:com.datatorrent.stram.client.StramAgent.java

License:Apache License

private StramWebServicesInfo retrieveWebServicesInfo(String appId) {
    YarnClient yarnClient = YarnClient.createYarnClient();
    String url;/*from  ww  w.  java2  s.  c om*/
    try {
        yarnClient.init(conf);
        yarnClient.start();
        ApplicationReport ar = yarnClient.getApplicationReport(ConverterUtils.toApplicationId(appId));
        String trackingUrl = ar.getTrackingUrl();
        if (!trackingUrl.startsWith("http://") && !trackingUrl.startsWith("https://")) {
            url = "http://" + trackingUrl;
        } else {
            url = trackingUrl;
        }
        if (StringUtils.isBlank(url)) {
            LOG.error("Cannot get tracking url from YARN");
            return null;
        }
        if (url.endsWith("/")) {
            url = url.substring(0, url.length() - 1);
        }
        url += WebServices.PATH;
    } catch (Exception ex) {
        //LOG.error("Caught exception when retrieving web services info", ex);
        return null;
    } finally {
        yarnClient.stop();
    }

    WebServicesClient webServicesClient = new WebServicesClient();
    try {
        JSONObject response;
        String secToken = null;
        ClientResponse clientResponse;
        int i = 0;
        while (true) {
            LOG.debug("Accessing url {}", url);
            clientResponse = webServicesClient.process(url, ClientResponse.class,
                    new WebServicesClient.GetWebServicesHandler<ClientResponse>());
            String val = clientResponse.getHeaders().getFirst("Refresh");
            if (val == null) {
                break;
            }
            int index = val.indexOf("url=");
            if (index < 0) {
                break;
            }
            url = val.substring(index + 4);
            if (i++ > MAX_REDIRECTS) {
                LOG.error("Cannot get web service info -- exceeded the max number of redirects");
                return null;
            }
        }

        if (!UserGroupInformation.isSecurityEnabled()) {
            response = new JSONObject(clientResponse.getEntity(String.class));
        } else {
            if (UserGroupInformation.isSecurityEnabled()) {
                for (NewCookie nc : clientResponse.getCookies()) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Cookie " + nc.getName() + " " + nc.getValue());
                    }
                    if (nc.getName().equals(StramWSFilter.CLIENT_COOKIE)) {
                        secToken = nc.getValue();
                    }
                }
            }
            response = new JSONObject(clientResponse.getEntity(String.class));
        }
        String version = response.getString("version");
        response = webServicesClient.process(url + "/" + version + "/stram/info", JSONObject.class,
                new WebServicesClient.GetWebServicesHandler<JSONObject>());
        String appMasterUrl = response.getString("appMasterTrackingUrl");
        String appPath = response.getString("appPath");
        String user = response.getString("user");
        JSONObject permissionsInfo = null;
        FSDataInputStream is = null;
        try {
            is = fileSystem.open(new Path(appPath, "permissions.json"));
            permissionsInfo = new JSONObject(IOUtils.toString(is));
        } catch (JSONException ex) {
            LOG.error("Error reading from the permissions info. Ignoring", ex);
        } catch (IOException ex) {
            // ignore
        } finally {
            IOUtils.closeQuietly(is);
        }
        return new StramWebServicesInfo(appMasterUrl, version, appPath, user, secToken, permissionsInfo);
    } catch (Exception ex) {
        LOG.debug("Caught exception when retrieving web service info for app " + appId, ex);
        return null;
    }
}

From source file:com.netflix.bdp.inviso.log.LogService.java

License:Apache License

@javax.ws.rs.Path("load/{owner}/{appId}/{containerId}/{nodeId}")
@GET/*from   w ww  .java  2s .c o  m*/
@Produces("text/plain")
public Response log(@PathParam("owner") String owner, @PathParam("appId") String appId,
        @PathParam("containerId") String containerId, @PathParam("nodeId") String nodeId,
        @QueryParam("fs") String fs, @QueryParam("root") String root) throws IOException {

    Configuration conf = new Configuration();

    if (fs != null) {
        conf.set("fs.default.name", fs);
    }

    Path logRoot = new Path(
            conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));

    if (root != null) {
        logRoot = new Path(root);
    }

    Path logPath = LogAggregationUtils.getRemoteNodeLogFileForApp(logRoot,
            ConverterUtils.toApplicationId(appId), owner, ConverterUtils.toNodeId(nodeId),
            LogAggregationUtils.getRemoteNodeLogDirSuffix(conf));

    AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(conf, logPath);

    LogKey key = new LogKey();

    DataInputStream in = reader.next(key);

    while (in != null && !key.toString().equals(containerId)) {
        key = new LogKey();
        in = reader.next(key);
    }

    if (in == null) {
        throw new WebApplicationException(404);
    }

    final DataInputStream fin = in;

    StreamingOutput stream = new StreamingOutput() {
        @Override
        public void write(OutputStream os) throws IOException, WebApplicationException {
            PrintStream out = new PrintStream(os);

            while (true) {
                try {
                    LogReader.readAContainerLogsForALogType(fin, out);
                    out.flush();
                } catch (EOFException e) {
                    break;
                }
            }
        }
    };

    return Response.ok(stream).build();
}

From source file:com.yahoo.storm.yarn.StormOnYarn.java

License:Open Source License

public static StormOnYarn attachToApp(String appId, @SuppressWarnings("rawtypes") Map stormConf) {
    return new StormOnYarn(ConverterUtils.toApplicationId(appId), stormConf);
}

From source file:edu.uci.ics.asterix.aoya.AsterixYARNClient.java

License:Apache License

private ApplicationId getLockFile() throws IOException, YarnException {
    if (instanceFolder == "") {
        throw new IllegalStateException("Instance name not given.");
    }//  w  w  w . ja va2  s .  co m
    FileSystem fs = FileSystem.get(conf);
    Path lockPath = new Path(fs.getHomeDirectory(), CONF_DIR_REL + instanceFolder + instanceLock);
    if (!fs.exists(lockPath)) {
        throw new YarnException("Instance appears to not be running. If you know it is, try using kill");
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(lockPath)));
    String lockAppId = br.readLine();
    br.close();
    return ConverterUtils.toApplicationId(lockAppId);
}

From source file:edu.uci.ics.asterix.aoya.AsterixYARNClient.java

License:Apache License

public static ApplicationId getLockFile(String instanceName, Configuration conf) throws IOException {
    if (instanceName == "") {
        throw new IllegalStateException("Instance name not given.");
    }/*from  w w w.  j a  v a  2 s .com*/
    FileSystem fs = FileSystem.get(conf);
    Path lockPath = new Path(fs.getHomeDirectory(), CONF_DIR_REL + instanceName + '/' + instanceLock);
    if (!fs.exists(lockPath)) {
        return null;
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(lockPath)));
    String lockAppId = br.readLine();
    br.close();
    return ConverterUtils.toApplicationId(lockAppId);
}

From source file:io.hops.hopsworks.common.jobs.flink.AbstractYarnClusterDescriptor.java

License:Apache License

@Override
public YarnClusterClient retrieve(String applicationID) {

    try {/*from www  . j  av a2  s . c om*/
        // check if required Hadoop environment variables are set. If not, warn user
        if (System.getenv("HADOOP_CONF_DIR") == null && System.getenv("YARN_CONF_DIR") == null) {
            LOG.warn("Neither the HADOOP_CONF_DIR nor the YARN_CONF_DIR environment variable is set."
                    + "The Flink YARN Client needs one of these to be set to properly load the Hadoop "
                    + "configuration for accessing YARN.");
        }

        final ApplicationId yarnAppId = ConverterUtils.toApplicationId(applicationID);
        final YarnClient yarnClient = getYarnClient();
        final ApplicationReport appReport = yarnClient.getApplicationReport(yarnAppId);

        if (appReport.getFinalApplicationStatus() != FinalApplicationStatus.UNDEFINED) {
            // Flink cluster is not running anymore
            LOG.error(
                    "The application {} doesn't run anymore. It has previously completed with final status: {}",
                    applicationID, appReport.getFinalApplicationStatus());
            throw new RuntimeException("The Yarn application " + applicationID + " doesn't run anymore.");
        }

        LOG.info("Found application JobManager host name '{}' and port '{}' from supplied application id '{}'",
                appReport.getHost(), appReport.getRpcPort(), applicationID);

        flinkConfiguration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, appReport.getHost());
        flinkConfiguration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, appReport.getRpcPort());

        return createYarnClusterClient(this, yarnClient, appReport, flinkConfiguration, sessionFilesDir, false);
    } catch (Exception e) {
        throw new RuntimeException("Couldn't retrieve Yarn cluster", e);
    }
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

/**
 * Retrieve applications (RMApp, RMAppAttempt) from NDB. MUST be used only
 * by ResourceTrackerService as some fields of the objects are not set.
 *
 * @param rmContext/*from  www . j  ava 2 s.c  o  m*/
 * @param conf
 * @param applicationId
 * @return
 * @throws java.io.IOException
 */
public static RMApp getRMApp(RMContext rmContext, Configuration conf, String applicationId) throws IOException {
    //Retrieve all applicationIds from NDB
    ApplicationState hopAppState = RMUtilities.getApplicationState(applicationId);

    if (hopAppState != null) {
        //Create ApplicationState for every application

        ApplicationId appId = ConverterUtils.toApplicationId(hopAppState.getApplicationid());
        ApplicationStateDataPBImpl appStateData = new ApplicationStateDataPBImpl(
                ApplicationStateDataProto.parseFrom(hopAppState.getAppstate()));
        RMStateStore.ApplicationState appState = new RMStateStore.ApplicationState(appStateData.getSubmitTime(),
                appStateData.getStartTime(), appStateData.getApplicationSubmissionContext(),
                appStateData.getUser(), appStateData.getState(), appStateData.getDiagnostics(),
                appStateData.getFinishTime(), appStateData.getStateBeforeKilling(),
                appStateData.getUpdatedNodesId());
        LOG.debug("loadRMAppState for app " + appState.getAppId() + " state " + appState.getState());

        //Create RMApp
        //Null fields are not required by ResourceTrackerService
        RMAppImpl application = new RMAppImpl(appId, rmContext, conf,
                appState.getApplicationSubmissionContext().getApplicationName(), appState.getUser(),
                appState.getApplicationSubmissionContext().getQueue(),
                appState.getApplicationSubmissionContext(), null, null, appState.getSubmitTime(),
                appState.getApplicationSubmissionContext().getApplicationType(),
                appState.getApplicationSubmissionContext().getApplicationTags(), null);
        ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId,
                appState.getAttemptCount() + 1);
        ApplicationAttemptState hopAppAttemptState = RMUtilities.getApplicationAttemptState(applicationId,
                appAttemptId.toString());
        if (hopAppAttemptState != null) {
            ApplicationAttemptStateDataPBImpl attemptStateData = new ApplicationAttemptStateDataPBImpl(
                    YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto
                            .parseFrom(hopAppAttemptState.getApplicationattemptstate()));

            RMAppAttempt attempt = new RMAppAttemptImpl(appAttemptId, rmContext, null, null,
                    appState.getApplicationSubmissionContext(), conf,
                    application.getMaxAppAttempts() == application.getAppAttempts().size());

            ((RMAppAttemptImpl) attempt).setMasterContainer(attemptStateData.getMasterContainer());

            application.addRMAppAttempt(appAttemptId, attempt);
            return application;
        }
    }
    return null;
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

public static org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode getRMNode(final String id,
        final RMContext context, final Configuration conf) throws IOException {
    LightWeightRequestHandler getRMNodeHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override//from   www.ja  va2s  .co m
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.readLock();
            org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = null;
            RMNodeDataAccess rmnodeDA = (RMNodeDataAccess) RMStorageFactory
                    .getDataAccess(RMNodeDataAccess.class);
            RMNode hopRMNode = (RMNode) rmnodeDA.findByNodeId(id);
            if (hopRMNode != null) {
                ResourceDataAccess resDA = (ResourceDataAccess) RMStorageFactory
                        .getDataAccess(ResourceDataAccess.class);
                NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory.getDataAccess(NodeDataAccess.class);
                //Retrieve resource of RMNode
                Resource res = (Resource) resDA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                        Resource.RMNODE);

                NodeId nodeId = ConverterUtils.toNodeId(id);
                //Retrieve and Initialize NodeBase for RMNode
                org.apache.hadoop.net.Node node = null;
                if (hopRMNode.getNodeId() != null) {
                    Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                    node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                    if (hopNode.getParent() != null) {
                        node.setParent(new NodeBase(hopNode.getParent()));
                    }
                    node.setLevel(hopNode.getLevel());
                }
                //Retrieve nextHeartbeat
                NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                        .getDataAccess(NextHeartbeatDataAccess.class);
                boolean nextHeartbeat = nextHBDA.findEntry(id);
                //Create Resource
                ResourceOption resourceOption = null;
                if (res != null) {
                    resourceOption = ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource
                            .newInstance(res.getMemory(), res.getVirtualCores()),
                            hopRMNode.getOvercommittimeout());
                }
                rmNode = new RMNodeImpl(nodeId, context, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                        hopRMNode.getHttpPort(), node, resourceOption, hopRMNode.getNodemanagerVersion(),
                        hopRMNode.getHealthReport(), hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                        conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

                ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                // *** Recover maps/lists of RMNode ***
                //Use a cache for retrieved ContainerStatus
                Map<String, ContainerStatus> hopContainerStatuses = new HashMap<String, ContainerStatus>();
                //1. Recover JustLaunchedContainers
                JustLaunchedContainersDataAccess jlcDA = (JustLaunchedContainersDataAccess) RMStorageFactory
                        .getDataAccess(JustLaunchedContainersDataAccess.class);
                ContainerStatusDataAccess containerStatusDA = (ContainerStatusDataAccess) RMStorageFactory
                        .getDataAccess(ContainerStatusDataAccess.class);
                List<JustLaunchedContainers> hopJlcList = jlcDA.findByRMNode(id);
                if (hopJlcList != null && !hopJlcList.isEmpty()) {
                    Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    for (JustLaunchedContainers hop : hopJlcList) {
                        //Create ContainerId
                        org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                .toContainerId(hop.getContainerId());
                        //Find and create ContainerStatus
                        if (!hopContainerStatuses.containsKey(hop.getContainerId())) {
                            hopContainerStatuses.put(hop.getContainerId(),
                                    (ContainerStatus) containerStatusDA.findEntry(hop.getContainerId(), id));
                        }
                        org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                .newInstance(cid,
                                        ContainerState.valueOf(
                                                hopContainerStatuses.get(hop.getContainerId()).getState()),
                                        hopContainerStatuses.get(hop.getContainerId()).getDiagnostics(),
                                        hopContainerStatuses.get(hop.getContainerId()).getExitstatus());
                        justLaunchedContainers.put(cid, conStatus);
                    }
                    ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
                }
                //2. Return ContainerIdToClean
                ContainerIdToCleanDataAccess cidToCleanDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                        .getDataAccess(ContainerIdToCleanDataAccess.class);
                List<ContainerId> cidToCleanList = cidToCleanDA.findByRMNode(id);
                if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
                    Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                    for (ContainerId hop : cidToCleanList) {
                        //Create ContainerId
                        containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
                    }
                    ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
                }
                //3. Finished Applications
                FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) RMStorageFactory
                        .getDataAccess(FinishedApplicationsDataAccess.class);
                List<FinishedApplications> hopFinishedAppsList = finishedAppsDA.findByRMNode(id);
                if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
                    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
                    for (FinishedApplications hop : hopFinishedAppsList) {
                        finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
                    }
                    ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
                }

                //4. UpdadedContainerInfo
                UpdatedContainerInfoDataAccess uciDA = (UpdatedContainerInfoDataAccess) RMStorageFactory
                        .getDataAccess(UpdatedContainerInfoDataAccess.class);
                //Retrieve all UpdatedContainerInfo entries for this particular RMNode
                Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = uciDA.findByRMNode(id);
                if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
                    ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
                    for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                        for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            //Retrieve containerstatus entries for the particular updatedcontainerinfo
                            org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                    .toContainerId(hopUCI.getContainerId());
                            if (!hopContainerStatuses.containsKey(hopUCI.getContainerId())) {
                                hopContainerStatuses.put(hopUCI.getContainerId(),
                                        (ContainerStatus) containerStatusDA.findEntry(hopUCI.getContainerId(),
                                                id));
                            }
                            org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                    .newInstance(cid,
                                            ContainerState.valueOf(hopContainerStatuses
                                                    .get(hopUCI.getContainerId()).getState()),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getDiagnostics(),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getExitstatus());
                            //Check ContainerStatus state to add it to appropriate list
                            if (conStatus != null) {
                                if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                                    newlyAllocated.add(conStatus);
                                } else if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                                    completed.add(conStatus);
                                }
                            }
                            org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                    newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                            updatedContainerInfoQueue.add(uci);
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                            //Update uci counter
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNode.getUciId());
                        }
                    }
                }

                //5. Retrieve latestNodeHeartBeatResponse
                NodeHBResponseDataAccess hbDA = (NodeHBResponseDataAccess) RMStorageFactory
                        .getDataAccess(NodeHBResponseDataAccess.class);
                NodeHBResponse hopHB = (NodeHBResponse) hbDA.findById(id);
                if (hopHB != null) {
                    NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                            YarnServerCommonServiceProtos.NodeHeartbeatResponseProto
                                    .parseFrom(hopHB.getResponse()));
                    ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
                }
            }
            connector.commit();
            return rmNode;
        }
    };
    return (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) getRMNodeHandler.handle();
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

/**
 * Retrieves and sets RMNode containersToClean and FinishedApplications.
 *
 * @param rmnodeId/*from   w  w w .  ja  v a 2  s.  c om*/
 * @param containersToClean
 * @param finishedApplications
 * @throws IOException
 */
public static void setContainersToCleanAndFinishedApplications(final String rmnodeId,
        final Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean,
        final List<ApplicationId> finishedApplications) throws IOException {
    LightWeightRequestHandler setContainersToCleanAndFinishedAppsHandler = new LightWeightRequestHandler(
            YARNOperationType.TEST) {
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.readLock();
            //1. Retrieve ContainerIdToClean
            ContainerIdToCleanDataAccess tocleanDA = (ContainerIdToCleanDataAccess) YarnAPIStorageFactory
                    .getDataAccess(ContainerIdToCleanDataAccess.class);
            List<ContainerId> hopContainersToClean = tocleanDA.findByRMNode(rmnodeId);
            if (hopContainersToClean != null && !hopContainersToClean.isEmpty()) {
                Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToCleanNDB = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                for (ContainerId hop : hopContainersToClean) {
                    containersToCleanNDB.add(ConverterUtils.toContainerId(hop.getContainerId()));
                }
                containersToClean.clear();
                containersToClean.addAll(containersToCleanNDB);
            }

            //2. Retrieve finishedApplications
            FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) YarnAPIStorageFactory
                    .getDataAccess(FinishedApplicationsDataAccess.class);
            List<FinishedApplications> hopFinishedApps = finishedAppsDA.findByRMNode(rmnodeId);
            if (hopFinishedApps != null && !hopFinishedApps.isEmpty()) {
                List<ApplicationId> finishedApplicationsNDB = new ArrayList<ApplicationId>();
                for (FinishedApplications hopFinishedApp : hopFinishedApps) {
                    finishedApplicationsNDB
                            .add(ConverterUtils.toApplicationId(hopFinishedApp.getApplicationId()));
                }
                finishedApplications.clear();
                finishedApplications.addAll(finishedApplicationsNDB);
            }

            connector.commit();
            return null;
        }
    };
    setContainersToCleanAndFinishedAppsHandler.handle();
}

From source file:org.apache.drill.yarn.appMaster.AMYarnFacadeImpl.java

License:Apache License

@Override
public void start(CallbackHandler resourceCallback,
        org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler nodeCallback) {

    conf = new YarnConfiguration();

    resourceMgr = AMRMClientAsync.createAMRMClientAsync(pollPeriodMs, resourceCallback);
    resourceMgr.init(conf);/*from w  w w  .j  a  v a  2 s  .  c om*/
    resourceMgr.start();

    // Create the asynchronous node manager client

    nodeMgr = NMClientAsync.createNMClientAsync(nodeCallback);
    nodeMgr.init(conf);
    nodeMgr.start();

    client = YarnClient.createYarnClient();
    client.init(conf);
    client.start();

    String appIdStr = System.getenv(DrillOnYarnConfig.APP_ID_ENV_VAR);
    if (appIdStr != null) {
        appId = ConverterUtils.toApplicationId(appIdStr);
        try {
            appReport = client.getApplicationReport(appId);
        } catch (YarnException | IOException e) {
            LOG.error("Failed to get YARN applicaiton report for App ID: " + appIdStr, e);
        }
    }
}