Example usage for org.apache.hadoop.yarn.util ConverterUtils toNodeId

List of usage examples for org.apache.hadoop.yarn.util ConverterUtils toNodeId

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.util ConverterUtils toNodeId.

Prototype

@Public
    @Deprecated
    public static NodeId toNodeId(String nodeIdStr) 

Source Link

Usage

From source file:com.datatorrent.stram.StreamingAppMasterService.java

License:Apache License

/**
 * Check for containers that were allocated in a previous attempt.
 * If the containers are still alive, wait for them to check in via heartbeat.
 *//*www  .j a v a  2s .  c o  m*/
private void checkContainerStatus() {
    Collection<StreamingContainerAgent> containers = this.dnmgr.getContainerAgents();
    for (StreamingContainerAgent ca : containers) {
        ContainerId containerId = ConverterUtils.toContainerId(ca.container.getExternalId());
        NodeId nodeId = ConverterUtils.toNodeId(ca.container.host);

        // put container back into the allocated list
        org.apache.hadoop.yarn.api.records.Token containerToken = null;
        Resource resource = Resource.newInstance(ca.container.getAllocatedMemoryMB(),
                ca.container.getAllocatedVCores());
        Priority priority = Priority.newInstance(ca.container.getResourceRequestPriority());
        Container yarnContainer = Container.newInstance(containerId, nodeId, ca.container.nodeHttpAddress,
                resource, priority, containerToken);
        this.allocatedContainers.put(containerId.toString(), new AllocatedContainer(yarnContainer));

        // check the status
        nmClient.getContainerStatusAsync(containerId, nodeId);
    }
}

From source file:com.netflix.bdp.inviso.log.LogService.java

License:Apache License

@javax.ws.rs.Path("load/{owner}/{appId}/{containerId}/{nodeId}")
@GET/*from   w  w w .  j  av  a  2s.  c o  m*/
@Produces("text/plain")
public Response log(@PathParam("owner") String owner, @PathParam("appId") String appId,
        @PathParam("containerId") String containerId, @PathParam("nodeId") String nodeId,
        @QueryParam("fs") String fs, @QueryParam("root") String root) throws IOException {

    Configuration conf = new Configuration();

    if (fs != null) {
        conf.set("fs.default.name", fs);
    }

    Path logRoot = new Path(
            conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));

    if (root != null) {
        logRoot = new Path(root);
    }

    Path logPath = LogAggregationUtils.getRemoteNodeLogFileForApp(logRoot,
            ConverterUtils.toApplicationId(appId), owner, ConverterUtils.toNodeId(nodeId),
            LogAggregationUtils.getRemoteNodeLogDirSuffix(conf));

    AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(conf, logPath);

    LogKey key = new LogKey();

    DataInputStream in = reader.next(key);

    while (in != null && !key.toString().equals(containerId)) {
        key = new LogKey();
        in = reader.next(key);
    }

    if (in == null) {
        throw new WebApplicationException(404);
    }

    final DataInputStream fin = in;

    StreamingOutput stream = new StreamingOutput() {
        @Override
        public void write(OutputStream os) throws IOException, WebApplicationException {
            PrintStream out = new PrintStream(os);

            while (true) {
                try {
                    LogReader.readAContainerLogsForALogType(fin, out);
                    out.flush();
                } catch (EOFException e) {
                    break;
                }
            }
        }
    };

    return Response.ok(stream).build();
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

public static org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode getRMNode(final String id,
        final RMContext context, final Configuration conf) throws IOException {
    LightWeightRequestHandler getRMNodeHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override/*w  w  w .  j  a v a 2s  . c  o m*/
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.readLock();
            org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = null;
            RMNodeDataAccess rmnodeDA = (RMNodeDataAccess) RMStorageFactory
                    .getDataAccess(RMNodeDataAccess.class);
            RMNode hopRMNode = (RMNode) rmnodeDA.findByNodeId(id);
            if (hopRMNode != null) {
                ResourceDataAccess resDA = (ResourceDataAccess) RMStorageFactory
                        .getDataAccess(ResourceDataAccess.class);
                NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory.getDataAccess(NodeDataAccess.class);
                //Retrieve resource of RMNode
                Resource res = (Resource) resDA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                        Resource.RMNODE);

                NodeId nodeId = ConverterUtils.toNodeId(id);
                //Retrieve and Initialize NodeBase for RMNode
                org.apache.hadoop.net.Node node = null;
                if (hopRMNode.getNodeId() != null) {
                    Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                    node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                    if (hopNode.getParent() != null) {
                        node.setParent(new NodeBase(hopNode.getParent()));
                    }
                    node.setLevel(hopNode.getLevel());
                }
                //Retrieve nextHeartbeat
                NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                        .getDataAccess(NextHeartbeatDataAccess.class);
                boolean nextHeartbeat = nextHBDA.findEntry(id);
                //Create Resource
                ResourceOption resourceOption = null;
                if (res != null) {
                    resourceOption = ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource
                            .newInstance(res.getMemory(), res.getVirtualCores()),
                            hopRMNode.getOvercommittimeout());
                }
                rmNode = new RMNodeImpl(nodeId, context, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                        hopRMNode.getHttpPort(), node, resourceOption, hopRMNode.getNodemanagerVersion(),
                        hopRMNode.getHealthReport(), hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                        conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

                ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                // *** Recover maps/lists of RMNode ***
                //Use a cache for retrieved ContainerStatus
                Map<String, ContainerStatus> hopContainerStatuses = new HashMap<String, ContainerStatus>();
                //1. Recover JustLaunchedContainers
                JustLaunchedContainersDataAccess jlcDA = (JustLaunchedContainersDataAccess) RMStorageFactory
                        .getDataAccess(JustLaunchedContainersDataAccess.class);
                ContainerStatusDataAccess containerStatusDA = (ContainerStatusDataAccess) RMStorageFactory
                        .getDataAccess(ContainerStatusDataAccess.class);
                List<JustLaunchedContainers> hopJlcList = jlcDA.findByRMNode(id);
                if (hopJlcList != null && !hopJlcList.isEmpty()) {
                    Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    for (JustLaunchedContainers hop : hopJlcList) {
                        //Create ContainerId
                        org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                .toContainerId(hop.getContainerId());
                        //Find and create ContainerStatus
                        if (!hopContainerStatuses.containsKey(hop.getContainerId())) {
                            hopContainerStatuses.put(hop.getContainerId(),
                                    (ContainerStatus) containerStatusDA.findEntry(hop.getContainerId(), id));
                        }
                        org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                .newInstance(cid,
                                        ContainerState.valueOf(
                                                hopContainerStatuses.get(hop.getContainerId()).getState()),
                                        hopContainerStatuses.get(hop.getContainerId()).getDiagnostics(),
                                        hopContainerStatuses.get(hop.getContainerId()).getExitstatus());
                        justLaunchedContainers.put(cid, conStatus);
                    }
                    ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
                }
                //2. Return ContainerIdToClean
                ContainerIdToCleanDataAccess cidToCleanDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                        .getDataAccess(ContainerIdToCleanDataAccess.class);
                List<ContainerId> cidToCleanList = cidToCleanDA.findByRMNode(id);
                if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
                    Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                    for (ContainerId hop : cidToCleanList) {
                        //Create ContainerId
                        containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
                    }
                    ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
                }
                //3. Finished Applications
                FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) RMStorageFactory
                        .getDataAccess(FinishedApplicationsDataAccess.class);
                List<FinishedApplications> hopFinishedAppsList = finishedAppsDA.findByRMNode(id);
                if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
                    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
                    for (FinishedApplications hop : hopFinishedAppsList) {
                        finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
                    }
                    ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
                }

                //4. UpdadedContainerInfo
                UpdatedContainerInfoDataAccess uciDA = (UpdatedContainerInfoDataAccess) RMStorageFactory
                        .getDataAccess(UpdatedContainerInfoDataAccess.class);
                //Retrieve all UpdatedContainerInfo entries for this particular RMNode
                Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = uciDA.findByRMNode(id);
                if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
                    ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
                    for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                        for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            //Retrieve containerstatus entries for the particular updatedcontainerinfo
                            org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                    .toContainerId(hopUCI.getContainerId());
                            if (!hopContainerStatuses.containsKey(hopUCI.getContainerId())) {
                                hopContainerStatuses.put(hopUCI.getContainerId(),
                                        (ContainerStatus) containerStatusDA.findEntry(hopUCI.getContainerId(),
                                                id));
                            }
                            org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                    .newInstance(cid,
                                            ContainerState.valueOf(hopContainerStatuses
                                                    .get(hopUCI.getContainerId()).getState()),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getDiagnostics(),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getExitstatus());
                            //Check ContainerStatus state to add it to appropriate list
                            if (conStatus != null) {
                                if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                                    newlyAllocated.add(conStatus);
                                } else if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                                    completed.add(conStatus);
                                }
                            }
                            org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                    newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                            updatedContainerInfoQueue.add(uci);
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                            //Update uci counter
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNode.getUciId());
                        }
                    }
                }

                //5. Retrieve latestNodeHeartBeatResponse
                NodeHBResponseDataAccess hbDA = (NodeHBResponseDataAccess) RMStorageFactory
                        .getDataAccess(NodeHBResponseDataAccess.class);
                NodeHBResponse hopHB = (NodeHBResponse) hbDA.findById(id);
                if (hopHB != null) {
                    NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                            YarnServerCommonServiceProtos.NodeHeartbeatResponseProto
                                    .parseFrom(hopHB.getResponse()));
                    ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
                }
            }
            connector.commit();
            return rmNode;
        }
    };
    return (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) getRMNodeHandler.handle();
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

/**
 * Recover inactive nodes map of RMContextImpl.
 *
 * @param rmContext/*from  w ww .  j ava2 s .c o m*/
 * @param state
 * @return
 * @throws java.lang.Exception
 */
//For testing TODO move to test
public static Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> getRMContextInactiveNodes(
        final RMContext rmContext, final RMState state, final Configuration conf) throws Exception {
    LightWeightRequestHandler getRMContextInactiveNodesHandler = new LightWeightRequestHandler(
            YARNOperationType.TEST) {
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.writeLock();
            ConcurrentMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> inactiveNodes = new ConcurrentHashMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>();
            //Retrieve rmctxnodes table entries
            RMContextInactiveNodesDataAccess rmctxInactiveNodesDA = (RMContextInactiveNodesDataAccess) RMStorageFactory
                    .getDataAccess(RMContextInactiveNodesDataAccess.class);
            ResourceDataAccess DA = (ResourceDataAccess) YarnAPIStorageFactory
                    .getDataAccess(ResourceDataAccess.class);
            RMNodeDataAccess rmDA = (RMNodeDataAccess) RMStorageFactory.getDataAccess(RMNodeDataAccess.class);
            List<RMContextInactiveNodes> hopRMContextInactiveNodes = rmctxInactiveNodesDA.findAll();
            if (hopRMContextInactiveNodes != null && !hopRMContextInactiveNodes.isEmpty()) {
                for (RMContextInactiveNodes key : hopRMContextInactiveNodes) {

                    NodeId nodeId = ConverterUtils.toNodeId(key.getRmnodeid());
                    //retrieve RMNode in order to create a new FiCaSchedulerNode
                    RMNode hopRMNode = (RMNode) rmDA.findByNodeId(key.getRmnodeid());
                    //Retrieve resource of RMNode
                    Resource res = (Resource) DA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                            Resource.RMNODE);
                    //Retrieve and Initialize NodeBase for RMNode
                    NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory
                            .getDataAccess(NodeDataAccess.class);
                    //Retrieve and Initialize NodeBase for RMNode
                    org.apache.hadoop.net.Node node = null;
                    if (hopRMNode.getNodeId() != null) {
                        Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                        node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                        if (hopNode.getParent() != null) {
                            node.setParent(new NodeBase(hopNode.getParent()));
                        }
                        node.setLevel(hopNode.getLevel());
                    }
                    //Retrieve nextHeartbeat
                    NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                            .getDataAccess(NextHeartbeatDataAccess.class);
                    boolean nextHeartbeat = nextHBDA.findEntry(key.getRmnodeid());
                    org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = new RMNodeImpl(nodeId,
                            rmContext, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                            hopRMNode.getHttpPort(), node,
                            ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource.newInstance(
                                    res.getMemory(), res.getVirtualCores()), hopRMNode.getOvercommittimeout()),
                            hopRMNode.getNodemanagerVersion(), hopRMNode.getHealthReport(),
                            hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                            conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                    YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));
                    ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                    alreadyRecoveredRMContextInactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);
                    inactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);

                }
            }
            connector.commit();
            return inactiveNodes;
        }
    };
    try {
        if (alreadyRecoveredRMContextInactiveNodes.isEmpty()) {
            Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> result = (Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>) getRMContextInactiveNodesHandler
                    .handle();
            for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode node : result.values()) {
                node.recover(state);
            }
            return result;
        } else {
            return alreadyRecoveredRMContextInactiveNodes;
        }
    } catch (IOException ex) {
        LOG.error("HOP", ex);
    }
    return null;
}

From source file:io.hops.util.DBUtility.java

License:Apache License

public static RMNode processHopRMNodeCompsForScheduler(RMNodeComps hopRMNodeComps, RMContext rmContext)
        throws InvalidProtocolBufferException {
    org.apache.hadoop.yarn.api.records.NodeId nodeId;
    RMNode rmNode = null;/*from   w w w  . j  a  va2s  . co m*/
    if (hopRMNodeComps != null) {
        nodeId = ConverterUtils.toNodeId(hopRMNodeComps.getRMNodeId());
        rmNode = rmContext.getRMNodes().get(nodeId);

        // The first time we are receiving the RMNode, this will happen when the node registers
        if (rmNode == null) {
            // Retrieve heartbeat
            boolean nextHeartbeat = true;

            // Create Resource
            Resource resource = null;
            if (hopRMNodeComps.getHopResource() != null) {
                resource = Resource.newInstance(hopRMNodeComps.getHopResource().getMemory(),
                        hopRMNodeComps.getHopResource().getVirtualCores());
            } else {
                LOG.error("ResourceOption should not be null");
                resource = Resource.newInstance(0, 0);
            }
            /*rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(),
                    hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()),
                    resourceOption,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion(),
                    hopRMNodeComps.getHopRMNode().getHealthReport(),
                    hopRMNodeComps.getHopRMNode().getLastHealthReportTime(),
                    nextHeartbeat);*/

            rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resource,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion());

            // Force Java to put the host in cache
            NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
        }

        // Update the RMNode
        if (hopRMNodeComps.getHopRMNode() != null) {
            ((RMNodeImplDist) rmNode).setState(hopRMNodeComps.getHopRMNode().getCurrentState());
        }
        if (hopRMNodeComps.getHopUpdatedContainerInfo() != null) {
            List<io.hops.metadata.yarn.entity.UpdatedContainerInfo> hopUpdatedContainerInfoList = hopRMNodeComps
                    .getHopUpdatedContainerInfo();

            if (hopUpdatedContainerInfoList != null && !hopUpdatedContainerInfoList.isEmpty()) {
                ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<>();

                Map<Integer, org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> ucis = new HashMap<>();
                LOG.debug(hopRMNodeComps.getRMNodeId() + " getting ucis " + hopUpdatedContainerInfoList.size()
                        + " pending event " + hopRMNodeComps.getPendingEvent().getId().getEventId());

                for (io.hops.metadata.yarn.entity.UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoList) {
                    if (!ucis.containsKey(hopUCI.getUpdatedContainerInfoId())) {
                        ucis.put(hopUCI.getUpdatedContainerInfoId(),
                                new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        hopUCI.getUpdatedContainerInfoId()));
                    }

                    ContainerId cid = ConverterUtils.toContainerId(hopUCI.getContainerId());
                    io.hops.metadata.yarn.entity.ContainerStatus hopContainerStatus = hopRMNodeComps
                            .getHopContainersStatusMap().get(hopUCI.getContainerId());

                    org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                            .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                    hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());

                    // Check ContainerStatus state to add it in the appropriate list
                    if (conStatus != null) {
                        LOG.debug("add uci for container " + conStatus.getContainerId() + " status "
                                + conStatus.getState());
                        if (conStatus.getState().equals(ContainerState.RUNNING)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getNewlyLaunchedContainers()
                                    .add(conStatus);
                        } else if (conStatus.getState().equals(ContainerState.COMPLETE)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getCompletedContainers()
                                    .add(conStatus);
                        }
                    }
                }

                for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci : ucis
                        .values()) {
                    updatedContainerInfoQueue.add(uci);
                }

                ((RMNodeImplDist) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
            } else {
                LOG.debug(hopRMNodeComps.getRMNodeId()
                        + " hopUpdatedContainerInfoList = null || hopUpdatedContainerInfoList.isEmpty() "
                        + hopRMNodeComps.getPendingEvent().getId().getEventId());
            }
        } else {
            LOG.debug(hopRMNodeComps.getRMNodeId() + " hopRMNodeFull.getHopUpdatedContainerInfo()=null "
                    + hopRMNodeComps.getPendingEvent().getId().getEventId());
        }
    }

    return rmNode;
}

From source file:org.apache.tez.dag.history.events.TaskAttemptFinishedEvent.java

License:Apache License

public void fromProto(TaskAttemptFinishedProto proto) throws IOException {
    this.taskAttemptId = TezTaskAttemptID.fromString(proto.getTaskAttemptId());
    this.state = TaskAttemptState.values()[proto.getState()];
    this.creationTime = proto.getCreationTime();
    this.allocationTime = proto.getAllocationTime();
    this.startTime = proto.getStartTime();
    this.finishTime = proto.getFinishTime();
    if (proto.hasTaskFailureType()) {
        this.taskFailureType = TezConverterUtils.failureTypeFromProto(proto.getTaskFailureType());
    }/*from www. java2 s  .  c  om*/
    if (proto.hasCreationCausalTA()) {
        this.creationCausalTA = TezTaskAttemptID.fromString(proto.getCreationCausalTA());
    }
    if (proto.hasDiagnostics()) {
        this.diagnostics = proto.getDiagnostics();
    }
    if (proto.hasErrorEnum()) {
        this.error = TaskAttemptTerminationCause.valueOf(proto.getErrorEnum());
    }
    if (proto.hasCounters()) {
        this.tezCounters = DagTypeConverters.convertTezCountersFromProto(proto.getCounters());
    }
    if (proto.getDataEventsCount() > 0) {
        this.dataEvents = Lists.newArrayListWithCapacity(proto.getDataEventsCount());
        for (DataEventDependencyInfoProto protoEvent : proto.getDataEventsList()) {
            this.dataEvents.add(DataEventDependencyInfo.fromProto(protoEvent));
        }
    }
    if (proto.getTaGeneratedEventsCount() > 0) {
        this.taGeneratedEvents = Lists.newArrayListWithCapacity(proto.getTaGeneratedEventsCount());
        for (TezEventProto eventProto : proto.getTaGeneratedEventsList()) {
            this.taGeneratedEvents.add(TezEventUtils.fromProto(eventProto));
        }
    }
    if (proto.hasContainerId()) {
        this.containerId = ConverterUtils.toContainerId(proto.getContainerId());
    }
    if (proto.hasNodeId()) {
        this.nodeId = ConverterUtils.toNodeId(proto.getNodeId());
    }
    if (proto.hasNodeHttpAddress()) {
        this.nodeHttpAddress = proto.getNodeHttpAddress();
    }
}

From source file:org.apache.tez.dag.history.events.TaskAttemptStartedEvent.java

License:Apache License

public void fromProto(TaskAttemptStartedProto proto) {
    this.taskAttemptId = TezTaskAttemptID.fromString(proto.getTaskAttemptId());
    this.startTime = proto.getStartTime();
    this.containerId = ConverterUtils.toContainerId(proto.getContainerId());
    this.nodeId = ConverterUtils.toNodeId(proto.getNodeId());
}