Example usage for org.apache.hadoop.yarn.api.records NodeId getPort

List of usage examples for org.apache.hadoop.yarn.api.records NodeId getPort

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records NodeId getPort.

Prototype

@Public
@Stable
public abstract int getPort();

Source Link

Document

Get the port for communicating with the node.

Usage

From source file:com.cloudera.kitten.appmaster.service.ContainerManagerConnectionFactoryImpl.java

License:Open Source License

@Override
public synchronized ContainerManager connect(Container container) {
    NodeId nodeId = container.getNodeId();
    String containerIpPort = String.format("%s:%d", nodeId.getHost(), nodeId.getPort());
    if (!containerManagers.containsKey(containerIpPort)) {
        LOG.info("Connecting to ContainerManager at: " + containerIpPort);
        InetSocketAddress addr = NetUtils.createSocketAddr(containerIpPort);
        ContainerManager cm = (ContainerManager) rpc.getProxy(ContainerManager.class, addr, conf);
        containerManagers.put(containerIpPort, cm);
        return cm;
    }// w w  w. ja va2s  .c  o m
    return containerManagers.get(containerIpPort);
}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Map<String, String> getDataNodeNodeManagerMapping(Configuration conf) throws Exception {
    Map<String, String> map = new HashMap<String, String>();
    DFSClient dfsClient = new DFSClient(new URI(conf.get("fs.defaultFS")), conf);
    DatanodeInfo[] DNs = dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.ALL);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);//from  w  ww  .  j  ava  2s  . co  m
    yarnClient.start();
    List<NodeId> nodeIds = getYarnNodeIds(conf);
    if (nodeIds.size() != DNs.length) {
        throw new RuntimeException("Number of DNs and NMs differ, MiniLlama "
                + "node mapping requires them to be equal at startup");
    }
    LOG.info("HDFS/YARN mapping:");
    for (int i = 0; i < DNs.length; i++) {
        String key = DNs[i].getXferAddr();
        NodeId nodeId = nodeIds.get(i);
        String value = nodeId.getHost() + ":" + nodeId.getPort();
        map.put(key, value);
        LOG.info("  DN/NM: " + key + "/" + value);
    }
    yarnClient.stop();
    nodes = map.size();
    verifySingleHost(map.keySet(), "DataNode");
    verifySingleHost(map.values(), "NodeManager");
    return map;
}

From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java

License:Apache License

public String getNodeName(NodeId nodeId) {
    return (includePortInNodeName) ? nodeId.getHost() + ":" + nodeId.getPort() : nodeId.getHost();
}

From source file:io.hops.metadata.util.DistributedRTClientEvaluation.java

License:Apache License

/**
 * Registers a node with the RT. If num is greater that 1, multiple requests
 * are sent for the same node and the last response is returned;
 * <p/>/* ww w .j a  v a2  s  .c o  m*/
 *
 * @param rt
 * @param host
 * @param port
 * @param num
 * @return
 */
private void registerClient(ResourceTracker rt, NodeId nodeId) {
    try {
        RegisterNodeManagerRequest request = Records.newRecord(RegisterNodeManagerRequest.class);
        request.setHttpPort(nodeId.getPort());
        request.setNodeId(nodeId);
        Resource resource = Resource.newInstance(5012, 8);
        request.setResource(resource);
        rt.registerNodeManager(request);
    } catch (YarnException ex) {
        LOG.error("HOP :: Error sending NodeHeartbeatResponse", ex);
    } catch (IOException ex) {
        LOG.error("HOP :: Error sending NodeHeartbeatResponse", ex);
    }
}

From source file:io.hops.util.DBUtility.java

License:Apache License

public static RMNode processHopRMNodeCompsForScheduler(RMNodeComps hopRMNodeComps, RMContext rmContext)
        throws InvalidProtocolBufferException {
    org.apache.hadoop.yarn.api.records.NodeId nodeId;
    RMNode rmNode = null;/*from   w  w  w .j  a  v a2  s  .  c  om*/
    if (hopRMNodeComps != null) {
        nodeId = ConverterUtils.toNodeId(hopRMNodeComps.getRMNodeId());
        rmNode = rmContext.getRMNodes().get(nodeId);

        // The first time we are receiving the RMNode, this will happen when the node registers
        if (rmNode == null) {
            // Retrieve heartbeat
            boolean nextHeartbeat = true;

            // Create Resource
            Resource resource = null;
            if (hopRMNodeComps.getHopResource() != null) {
                resource = Resource.newInstance(hopRMNodeComps.getHopResource().getMemory(),
                        hopRMNodeComps.getHopResource().getVirtualCores());
            } else {
                LOG.error("ResourceOption should not be null");
                resource = Resource.newInstance(0, 0);
            }
            /*rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(),
                    hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()),
                    resourceOption,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion(),
                    hopRMNodeComps.getHopRMNode().getHealthReport(),
                    hopRMNodeComps.getHopRMNode().getLastHealthReportTime(),
                    nextHeartbeat);*/

            rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resource,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion());

            // Force Java to put the host in cache
            NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
        }

        // Update the RMNode
        if (hopRMNodeComps.getHopRMNode() != null) {
            ((RMNodeImplDist) rmNode).setState(hopRMNodeComps.getHopRMNode().getCurrentState());
        }
        if (hopRMNodeComps.getHopUpdatedContainerInfo() != null) {
            List<io.hops.metadata.yarn.entity.UpdatedContainerInfo> hopUpdatedContainerInfoList = hopRMNodeComps
                    .getHopUpdatedContainerInfo();

            if (hopUpdatedContainerInfoList != null && !hopUpdatedContainerInfoList.isEmpty()) {
                ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<>();

                Map<Integer, org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> ucis = new HashMap<>();
                LOG.debug(hopRMNodeComps.getRMNodeId() + " getting ucis " + hopUpdatedContainerInfoList.size()
                        + " pending event " + hopRMNodeComps.getPendingEvent().getId().getEventId());

                for (io.hops.metadata.yarn.entity.UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoList) {
                    if (!ucis.containsKey(hopUCI.getUpdatedContainerInfoId())) {
                        ucis.put(hopUCI.getUpdatedContainerInfoId(),
                                new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        hopUCI.getUpdatedContainerInfoId()));
                    }

                    ContainerId cid = ConverterUtils.toContainerId(hopUCI.getContainerId());
                    io.hops.metadata.yarn.entity.ContainerStatus hopContainerStatus = hopRMNodeComps
                            .getHopContainersStatusMap().get(hopUCI.getContainerId());

                    org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                            .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                    hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());

                    // Check ContainerStatus state to add it in the appropriate list
                    if (conStatus != null) {
                        LOG.debug("add uci for container " + conStatus.getContainerId() + " status "
                                + conStatus.getState());
                        if (conStatus.getState().equals(ContainerState.RUNNING)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getNewlyLaunchedContainers()
                                    .add(conStatus);
                        } else if (conStatus.getState().equals(ContainerState.COMPLETE)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getCompletedContainers()
                                    .add(conStatus);
                        }
                    }
                }

                for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci : ucis
                        .values()) {
                    updatedContainerInfoQueue.add(uci);
                }

                ((RMNodeImplDist) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
            } else {
                LOG.debug(hopRMNodeComps.getRMNodeId()
                        + " hopUpdatedContainerInfoList = null || hopUpdatedContainerInfoList.isEmpty() "
                        + hopRMNodeComps.getPendingEvent().getId().getEventId());
            }
        } else {
            LOG.debug(hopRMNodeComps.getRMNodeId() + " hopRMNodeFull.getHopUpdatedContainerInfo()=null "
                    + hopRMNodeComps.getPendingEvent().getId().getEventId());
        }
    }

    return rmNode;
}

From source file:org.apache.hama.bsp.JobImpl.java

License:Apache License

/**
 *
 * @param rpc/*from w  ww. j  av a  2  s . c o  m*/
 * @param nmToken
 * @param nodeId
 * @param user
 * @return
 */
protected ContainerManagementProtocol getContainerManagementProtocolProxy(final YarnRPC rpc, Token nmToken,
        NodeId nodeId, String user) {
    ContainerManagementProtocol proxy;
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    final InetSocketAddress addr = NetUtils.createSocketAddr(nodeId.getHost(), nodeId.getPort());
    if (nmToken != null) {
        ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));
    }

    proxy = ugi.doAs(new PrivilegedAction<ContainerManagementProtocol>() {
        @Override
        public ContainerManagementProtocol run() {
            return (ContainerManagementProtocol) rpc.getProxy(ContainerManagementProtocol.class, addr, conf);
        }
    });
    return proxy;
}

From source file:org.apache.myriad.TestObjectFactory.java

License:Apache License

public static RMNode getRMNode(String host, int port, Resource resource) {
    NodeId id = NodeId.newInstance(host, port);
    RMContext context = new MockRMContext();
    return new RMNodeImpl(id, context, id.getHost(), id.getPort(), id.getPort(), new NodeBase(host, "/tmp"),
            resource, "version-one");
}

From source file:org.apache.tajo.master.YarnContainerProxy.java

License:Apache License

public YarnContainerProxy(QueryMasterTask.QueryMasterTaskContext context, Configuration conf, YarnRPC yarnRPC,
        Container container, ExecutionBlockId executionBlockId) {
    super(context, conf, executionBlockId, container);
    this.yarnRPC = yarnRPC;

    NodeId nodeId = container.getNodeId();
    this.containerMgrAddress = nodeId.getHost() + ":" + nodeId.getPort();
    this.containerToken = container.getContainerToken();
}

From source file:org.apache.tajo.worker.TajoResourceAllocator.java

License:Apache License

private void stopExecutionBlock(ExecutionBlockId executionBlockId, NodeId worker) {
    NettyClientBase tajoWorkerRpc = null;
    try {//from  ww w  . ja  v  a  2  s .  c  o  m
        InetSocketAddress addr = new InetSocketAddress(worker.getHost(), worker.getPort());
        tajoWorkerRpc = RpcConnectionPool.getPool().getConnection(addr, TajoWorkerProtocol.class, true);
        TajoWorkerProtocol.TajoWorkerProtocolService tajoWorkerRpcClient = tajoWorkerRpc.getStub();

        tajoWorkerRpcClient.stopExecutionBlock(null, executionBlockId.getProto(), NullCallback.get());
    } catch (Throwable e) {
        LOG.error(e.getMessage(), e);
    } finally {
        RpcConnectionPool.getPool().releaseConnection(tajoWorkerRpc);
    }
}

From source file:org.apache.tez.dag.app.TaskCommunicatorManager.java

License:Apache License

@Override
public void registerRunningContainer(ContainerId containerId, int taskCommId) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("ContainerId: " + containerId + " registered with TaskAttemptListener");
    }//from   ww  w .  j  ava  2s . com
    ContainerInfo oldInfo = registeredContainers.put(containerId, NULL_CONTAINER_INFO);
    if (oldInfo != null) {
        throw new TezUncheckedException("Multiple registrations for containerId: " + containerId);
    }
    NodeId nodeId = context.getAllContainers().get(containerId).getContainer().getNodeId();
    try {
        taskCommunicators[taskCommId].registerRunningContainer(containerId, nodeId.getHost(), nodeId.getPort());
    } catch (Exception e) {
        String msg = "Error in TaskCommunicator when registering running Container" + ", communicator="
                + Utils.getTaskCommIdentifierString(taskCommId, context) + ", containerId=" + containerId
                + ", nodeId=" + nodeId;
        LOG.error(msg, e);
        sendEvent(new DAGAppMasterEventUserServiceFatalError(
                DAGAppMasterEventType.TASK_COMMUNICATOR_SERVICE_FATAL_ERROR, msg, e));
    }
}