List of usage examples for org.apache.hadoop.yarn.api.records Container getResource
@Public @Stable public abstract Resource getResource();
Resource
allocated to the container. From source file:ApplicationMaster.java
License:Apache License
private static void publishContainerStartEvent(TimelineClient timelineClient, Container container) throws IOException, YarnException { TimelineEntity entity = new TimelineEntity(); entity.setEntityId(container.getId().toString()); entity.setEntityType(DSEntity.DS_CONTAINER.toString()); entity.addPrimaryFilter("user", UserGroupInformation.getCurrentUser().getShortUserName()); TimelineEvent event = new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType(DSEvent.DS_CONTAINER_START.toString()); event.addEventInfo("Node", container.getNodeId().toString()); event.addEventInfo("Resources", container.getResource().toString()); entity.addEvent(event);//from ww w . j ava 2s .co m timelineClient.putEntities(entity); }
From source file:com.cloudera.kitten.appmaster.service.WorkflowService.java
License:Open Source License
@Override public void onContainersAllocated(List<Container> allocatedContainers) { LOG.info("Allocating " + allocatedContainers.size() + " container(s)"); Set<Container> assigned = Sets.newHashSet(); for (ContainerTracker tracker : trackers.values()) { for (Container allocated : allocatedContainers) { if (tracker.isInitilized && tracker.needsContainers()) { if (!assigned.contains(allocated) && tracker.matches(allocated)) { LOG.info("Allocated cores: " + allocated.getResource().getVirtualCores()); tracker.launchContainer(allocated); assigned.add(allocated); containerAllocation.put(allocated.getId(), tracker); }/*from ww w . j av a 2 s . c om*/ } } } for (Entry<ContainerId, ContainerTracker> e : containerAllocation.entrySet()) { LOG.info("Allocated: " + e.getKey() + " to operator: " + e.getValue().params.getName()); } /*if (assigned.size() < allocatedContainers.size()) { LOG.error(String.format("Not all containers were allocated (%d out of %d)", assigned.size(), allocatedContainers.size())); stop(); }*/ }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
private RMEvent createResourceAllocation(RMResource resources, Container container) { return RMEvent.createAllocationEvent(resources.getResourceId(), getNodeName(container.getNodeId()), container.getResource().getVirtualCores(), container.getResource().getMemory(), container.getId(), resources.getRmData());/* w w w. j ava 2s . c o m*/ }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
@Override public void onContainersAllocated(List<Container> containers) { List<RMEvent> changes = new ArrayList<RMEvent>(); // no need to use a ugi.doAs() as this is called from within Yarn client List<Container> unclaimedContainers = new ArrayList<Container>(); for (Container container : containers) { List<? extends Collection<LlamaContainerRequest>> matchingContainerReqs = amRmClientAsync .getMatchingRequests(container.getPriority(), getNodeName(container.getNodeId()), container.getResource()); if (!matchingContainerReqs.isEmpty()) { LlamaContainerRequest req = null; Iterator<? extends Collection<LlamaContainerRequest>> it1 = matchingContainerReqs.iterator(); while (req == null && it1.hasNext()) { Iterator<LlamaContainerRequest> it2 = it1.next().iterator(); while (req == null && it2.hasNext()) { req = it2.next();// ww w . jav a 2 s . co m LOG.trace("Matching container '{}' resource '{}'", container, req.getResourceAsk()); } } if (req == null) { LOG.error("There was a match for container '{}', " + "LlamaContainerRequest cannot be NULL", container); } else { handleContainerMatchingRequest(container, req, changes); /*Remove the granted request from anyLocationResourceIdToRequestMap if it is there*/ anyLocationResourceIdToRequestMap.remove(req.getResourceAsk().getResourceId()); } } else { LOG.debug("No strong request match for {}. Adding to the list of unclaimed containers.", container); unclaimedContainers.add(container); } } /*Matching YARN resources against requests relaxing locality*/ for (Container container : unclaimedContainers) { /*Looking for requests with 'DONT_CARE' or 'PREFERRED' locality which match with the resources we've got*/ boolean containerIsClaimed = false; Iterator<Map.Entry<UUID, LlamaContainerRequest>> iterator = anyLocationResourceIdToRequestMap.entrySet() .iterator(); while (iterator.hasNext()) { Map.Entry<UUID, LlamaContainerRequest> entry = iterator.next(); LlamaContainerRequest request = entry.getValue(); /*Matching by the capacity only*/ if (request.getResourceAsk().getCpuVCoresAsk() == container.getResource().getVirtualCores() && request.getResourceAsk().getMemoryMbsAsk() == container.getResource().getMemory()) { handleContainerMatchingRequest(container, request, changes); iterator.remove(); containerIsClaimed = true; break; } } if (!containerIsClaimed) { LOG.error("No matching request for {}. Releasing the container.", container); containerToResourceMap.remove(container.getId()); amRmClientAsync.releaseAssignedContainer(container.getId()); } } llamaCallback.onEvent(changes); }
From source file:com.continuuity.weave.internal.yarn.Hadoop20YarnNMClient.java
License:Apache License
@Override public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) { ContainerLaunchContext context = launchContext.getLaunchContext(); context.setUser(System.getProperty("user.name")); Container container = containerInfo.getContainer(); context.setContainerId(container.getId()); context.setResource(container.getResource()); StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class); startRequest.setContainerLaunchContext(context); ContainerManager manager = connectContainerManager(container); try {/*from w w w .j a va 2s .c om*/ manager.startContainer(startRequest); return new ContainerTerminator(container, manager); } catch (YarnRemoteException e) { LOG.error("Error in launching process", e); throw Throwables.propagate(e); } }
From source file:com.datatorrent.stram.StramMiniClusterTest.java
License:Apache License
@Ignore @Test//from ww w .j a va2 s .c o m public void testUnmanagedAM2() throws Exception { new InlineAM(conf) { @Override @SuppressWarnings("SleepWhileInLoop") public void runAM(ApplicationAttemptId attemptId) throws Exception { LOG.debug("AM running {}", attemptId); AMRMClient<ContainerRequest> amRmClient = AMRMClient.createAMRMClient(); amRmClient.init(conf); amRmClient.start(); // register with the RM (LAUNCHED -> RUNNING) amRmClient.registerApplicationMaster("", 0, null); // AM specific logic String[] hosts = { "vm1" }; String[] racks = { "somerack" }; Resource capability = Records.newRecord(Resource.class); capability.setMemory(1000); Priority priority = Records.newRecord(Priority.class); priority.setPriority(10); AMRMClient.ContainerRequest req = new AMRMClient.ContainerRequest(capability, hosts, racks, priority); amRmClient.addContainerRequest(req); amRmClient.addContainerRequest(req); /* capability = Records.newRecord(Resource.class); capability.setMemory(5512); priority = Records.newRecord(Priority.class); priority.setPriority(11); req = new AMRMClient.ContainerRequest(capability, hosts, racks, priority, 3); amRmClient.addContainerRequest(req); */ for (int i = 0; i < 100; i++) { AllocateResponse ar = amRmClient.allocate(0); sleep(1000); LOG.debug("allocateResponse: {}", ar); for (Container c : ar.getAllocatedContainers()) { LOG.debug("*** allocated {}", c.getResource()); amRmClient.removeContainerRequest(req); } /* GetClusterNodesRequest request = Records.newRecord(GetClusterNodesRequest.class); ClientRMService clientRMService = yarnCluster.getResourceManager().getClientRMService(); GetClusterNodesResponse response = clientRMService.getClusterNodes(request); List<NodeReport> nodeReports = response.getNodeReports(); LOG.info(nodeReports); for (NodeReport nr: nodeReports) { LOG.info("Node: " + nr.getNodeId()); LOG.info("Total memory: " + nr.getCapability()); LOG.info("Used memory: " + nr.getUsed()); LOG.info("Number containers: " + nr.getNumContainers()); } */ } // unregister from RM amRmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "testUnmanagedAM finished", null); } }.run(); }
From source file:com.datatorrent.stram.StreamingAppMasterService.java
License:Apache License
/** * Main run function for the application master * * @throws YarnException//from w w w . j ava2s.com */ @SuppressWarnings("SleepWhileInLoop") private void execute() throws YarnException, IOException { LOG.info("Starting ApplicationMaster"); final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); LOG.info("number of tokens: {}", credentials.getAllTokens().size()); Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.debug("token: {}", token); } final Configuration conf = getConfig(); long tokenLifeTime = (long) (dag.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * Math .min(dag.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME), dag.getValue(LogicalPlan.RM_TOKEN_LIFE_TIME))); long expiryTime = System.currentTimeMillis() + tokenLifeTime; LOG.debug(" expiry token time {}", tokenLifeTime); String hdfsKeyTabFile = dag.getValue(LogicalPlan.KEY_TAB_FILE); // Register self with ResourceManager RegisterApplicationMasterResponse response = amRmClient.registerApplicationMaster(appMasterHostname, 0, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); int maxVcores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max mem {}m and vcores {} capabililty of resources in this cluster ", maxMem, maxVcores); // for locality relaxation fall back Map<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> requestedResources = Maps .newHashMap(); // Setup heartbeat emitter // TODO poll RM every now and then with an empty request to let RM know that we are alive // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting: // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS // The allocate calls to the RM count as heartbeat so, for now, this additional heartbeat emitter // is not required. int loopCounter = -1; List<ContainerId> releasedContainers = new ArrayList<ContainerId>(); int numTotalContainers = 0; // keep track of already requested containers to not request them again while waiting for allocation int numRequestedContainers = 0; int numReleasedContainers = 0; int nextRequestPriority = 0; ResourceRequestHandler resourceRequestor = new ResourceRequestHandler(); YarnClient clientRMService = YarnClient.createYarnClient(); try { // YARN-435 // we need getClusterNodes to populate the initial node list, // subsequent updates come through the heartbeat response clientRMService.init(conf); clientRMService.start(); ApplicationReport ar = StramClientUtils.getStartedAppInstanceByName(clientRMService, dag.getAttributes().get(DAG.APPLICATION_NAME), UserGroupInformation.getLoginUser().getUserName(), dag.getAttributes().get(DAG.APPLICATION_ID)); if (ar != null) { appDone = true; dnmgr.shutdownDiagnosticsMessage = String.format( "Application master failed due to application %s with duplicate application name \"%s\" by the same user \"%s\" is already started.", ar.getApplicationId().toString(), ar.getName(), ar.getUser()); LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage); finishApplication(FinalApplicationStatus.FAILED, numTotalContainers); return; } resourceRequestor.updateNodeReports(clientRMService.getNodeReports()); } catch (Exception e) { throw new RuntimeException("Failed to retrieve cluster nodes report.", e); } finally { clientRMService.stop(); } // check for previously allocated containers // as of 2.2, containers won't survive AM restart, but this will change in the future - YARN-1490 checkContainerStatus(); FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); while (!appDone) { loopCounter++; if (UserGroupInformation.isSecurityEnabled() && System.currentTimeMillis() >= expiryTime && hdfsKeyTabFile != null) { String applicationId = appAttemptID.getApplicationId().toString(); expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, "." + File.separator + "tmp", applicationId, conf, hdfsKeyTabFile, credentials, rmAddress, true); } Runnable r; while ((r = this.pendingTasks.poll()) != null) { r.run(); } // log current state /* * LOG.info("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" + * numTotalContainers + ", requested=" + numRequestedContainers + ", completed=" + numCompletedContainers + * ", failed=" + numFailedContainers + ", currentAllocated=" + this.allAllocatedContainers.size()); */ // Sleep before each loop when asking RM for containers // to avoid flooding RM with spurious requests when it // need not have any available containers try { sleep(1000); } catch (InterruptedException e) { LOG.info("Sleep interrupted " + e.getMessage()); } // Setup request to be sent to RM to allocate containers List<ContainerRequest> containerRequests = new ArrayList<ContainerRequest>(); List<ContainerRequest> removedContainerRequests = new ArrayList<ContainerRequest>(); // request containers for pending deploy requests if (!dnmgr.containerStartRequests.isEmpty()) { StreamingContainerAgent.ContainerStartRequest csr; while ((csr = dnmgr.containerStartRequests.poll()) != null) { if (csr.container.getRequiredMemoryMB() > maxMem) { LOG.warn("Container memory {}m above max threshold of cluster. Using max value {}m.", csr.container.getRequiredMemoryMB(), maxMem); csr.container.setRequiredMemoryMB(maxMem); } if (csr.container.getRequiredVCores() > maxVcores) { LOG.warn("Container vcores {} above max threshold of cluster. Using max value {}.", csr.container.getRequiredVCores(), maxVcores); csr.container.setRequiredVCores(maxVcores); } csr.container.setResourceRequestPriority(nextRequestPriority++); ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true); MutablePair<Integer, ContainerRequest> pair = new MutablePair<Integer, ContainerRequest>( loopCounter, cr); requestedResources.put(csr, pair); containerRequests.add(cr); } } if (!requestedResources.isEmpty()) { //resourceRequestor.clearNodeMapping(); for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources .entrySet()) { if ((loopCounter - entry.getValue().getKey()) > NUMBER_MISSED_HEARTBEATS) { StreamingContainerAgent.ContainerStartRequest csr = entry.getKey(); removedContainerRequests.add(entry.getValue().getRight()); ContainerRequest cr = resourceRequestor.createContainerRequest(csr, false); entry.getValue().setLeft(loopCounter); entry.getValue().setRight(cr); containerRequests.add(cr); } } } numTotalContainers += containerRequests.size(); numRequestedContainers += containerRequests.size(); AllocateResponse amResp = sendContainerAskToRM(containerRequests, removedContainerRequests, releasedContainers); if (amResp.getAMCommand() != null) { LOG.info(" statement executed:{}", amResp.getAMCommand()); switch (amResp.getAMCommand()) { case AM_RESYNC: case AM_SHUTDOWN: throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM"); default: throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM"); } } releasedContainers.clear(); // Retrieve list of allocated containers from the response List<Container> newAllocatedContainers = amResp.getAllocatedContainers(); // LOG.info("Got response from RM for container ask, allocatedCnt=" + newAllocatedContainers.size()); numRequestedContainers -= newAllocatedContainers.size(); long timestamp = System.currentTimeMillis(); for (Container allocatedContainer : newAllocatedContainers) { LOG.info("Got new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemory() + ", priority" + allocatedContainer.getPriority()); // + ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString()); boolean alreadyAllocated = true; StreamingContainerAgent.ContainerStartRequest csr = null; for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources .entrySet()) { if (entry.getKey().container.getResourceRequestPriority() == allocatedContainer.getPriority() .getPriority()) { alreadyAllocated = false; csr = entry.getKey(); break; } } if (alreadyAllocated) { LOG.info("Releasing {} as resource with priority {} was already assigned", allocatedContainer.getId(), allocatedContainer.getPriority()); releasedContainers.add(allocatedContainer.getId()); numReleasedContainers++; numRequestedContainers++; continue; } if (csr != null) { requestedResources.remove(csr); } // allocate resource to container ContainerResource resource = new ContainerResource(allocatedContainer.getPriority().getPriority(), allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString(), allocatedContainer.getResource().getMemory(), allocatedContainer.getResource().getVirtualCores(), allocatedContainer.getNodeHttpAddress()); StreamingContainerAgent sca = dnmgr.assignContainer(resource, null); if (sca == null) { // allocated container no longer needed, add release request LOG.warn("Container {} allocated but nothing to deploy, going to release this container.", allocatedContainer.getId()); releasedContainers.add(allocatedContainer.getId()); } else { AllocatedContainer allocatedContainerHolder = new AllocatedContainer(allocatedContainer); this.allocatedContainers.put(allocatedContainer.getId().toString(), allocatedContainerHolder); ByteBuffer tokens = null; if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); Token<StramDelegationTokenIdentifier> delegationToken = allocateDelegationToken( ugi.getUserName(), heartbeatListener.getAddress()); allocatedContainerHolder.delegationToken = delegationToken; //ByteBuffer tokens = LaunchContainerRunnable.getTokens(delegationTokenManager, heartbeatListener.getAddress()); tokens = LaunchContainerRunnable.getTokens(ugi, delegationToken); } LaunchContainerRunnable launchContainer = new LaunchContainerRunnable(allocatedContainer, nmClient, sca, tokens); // Thread launchThread = new Thread(runnableLaunchContainer); // launchThreads.add(launchThread); // launchThread.start(); launchContainer.run(); // communication with NMs is now async // record container start event StramEvent ev = new StramEvent.StartContainerEvent(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString()); ev.setTimestamp(timestamp); dnmgr.recordEventAsync(ev); } } // track node updates for future locality constraint allocations // TODO: it seems 2.0.4-alpha doesn't give us any updates resourceRequestor.updateNodeReports(amResp.getUpdatedNodes()); // Check the completed containers List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses(); // LOG.debug("Got response from RM for container ask, completedCnt=" + completedContainers.size()); for (ContainerStatus containerStatus : completedContainers) { LOG.info("Completed containerId=" + containerStatus.getContainerId() + ", state=" + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics=" + containerStatus.getDiagnostics()); // non complete containers should not be here assert (containerStatus.getState() == ContainerState.COMPLETE); AllocatedContainer allocatedContainer = allocatedContainers .remove(containerStatus.getContainerId().toString()); if (allocatedContainer != null && allocatedContainer.delegationToken != null) { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); delegationTokenManager.cancelToken(allocatedContainer.delegationToken, ugi.getUserName()); } int exitStatus = containerStatus.getExitStatus(); if (0 != exitStatus) { if (allocatedContainer != null) { numFailedContainers.incrementAndGet(); } // if (exitStatus == 1) { // // non-recoverable StreamingContainer failure // appDone = true; // finalStatus = FinalApplicationStatus.FAILED; // dnmgr.shutdownDiagnosticsMessage = "Unrecoverable failure " + containerStatus.getContainerId(); // LOG.info("Exiting due to: {}", dnmgr.shutdownDiagnosticsMessage); // } // else { // Recoverable failure or process killed (externally or via stop request by AM) // also occurs when a container was released by the application but never assigned/launched LOG.debug("Container {} failed or killed.", containerStatus.getContainerId()); dnmgr.scheduleContainerRestart(containerStatus.getContainerId().toString()); // } } else { // container completed successfully numCompletedContainers.incrementAndGet(); LOG.info("Container completed successfully." + ", containerId=" + containerStatus.getContainerId()); } String containerIdStr = containerStatus.getContainerId().toString(); dnmgr.removeContainerAgent(containerIdStr); // record container stop event StramEvent ev = new StramEvent.StopContainerEvent(containerIdStr, containerStatus.getExitStatus()); ev.setReason(containerStatus.getDiagnostics()); dnmgr.recordEventAsync(ev); } if (dnmgr.forcedShutdown) { LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage); finalStatus = FinalApplicationStatus.FAILED; appDone = true; } else if (allocatedContainers.isEmpty() && numRequestedContainers == 0 && dnmgr.containerStartRequests.isEmpty()) { LOG.debug("Exiting as no more containers are allocated or requested"); finalStatus = FinalApplicationStatus.SUCCEEDED; appDone = true; } LOG.debug("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" + numTotalContainers + ", requested=" + numRequestedContainers + ", released=" + numReleasedContainers + ", completed=" + numCompletedContainers + ", failed=" + numFailedContainers + ", currentAllocated=" + allocatedContainers.size()); // monitor child containers dnmgr.monitorHeartbeat(); } finishApplication(finalStatus, numTotalContainers); }
From source file:com.github.hdl.tensorflow.yarn.app.ApplicationMaster.java
License:Apache License
public boolean startAllContainers() throws Exception { if (numAllocatedContainers.get() == numTotalContainers) { int numWorkerContainers = 0; int numPsContainers = 0; if (this.allocatedContainers.size() < numTotalWokerContainers + numTotalParamServerContainer) { LOG.error("not enough ps and woker containers allocated!"); return false; }// w w w .jav a2s.c o m for (Container allocatedContainer : this.allocatedContainers) { if (numWorkerContainers < numTotalWokerContainers) { LOG.info("work cid: " + allocatedContainer.getId().toString()); clusterSpec.addWorkerSpec(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().getHost()); numWorkerContainers++; continue; } if (numPsContainers < this.numTotalParamServerContainer) { LOG.info("ps cid: " + allocatedContainer.getId().toString()); clusterSpec.addPsSpec(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().getHost()); numPsContainers++; } } for (Container allocatedContainer : this.allocatedContainers) { LOG.info("Launching a new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemorySize() + ", containerResourceVirtualCores" + allocatedContainer.getResource().getVirtualCores()); // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); LOG.info("server cid: " + allocatedContainer.getId().toString()); LaunchContainerThread launchDelegator = new LaunchContainerThread(allocatedContainer, this, clusterSpec.getServerAddress(allocatedContainer.getId().toString())); launchDelegator.setTfServerJar(tfServerJar); launchDelegator.setJniSoDfsPath(jniSoDfsPath); launchDelegator.setContainerMemory(containerMemory); launchDelegator.setContainerRetryPolicy(containerRetryPolicy); launchDelegator.setContainerRetryErrorCodes(containerRetryErrorCodes); launchDelegator.setContainerMaxRetries(containerMaxRetries); launchDelegator.setContainrRetryInterval(containrRetryInterval); Thread launchThread = new Thread(launchDelegator); // launch and start the container on a separate thread to keep // the main thread unblocked // as all containers may not be allocated at one go. launchThreads.add(launchThread); launchedContainers.add(allocatedContainer.getId()); launchThread.start(); } } else { throw new Exception("containers are not allocated!"); } return true; }
From source file:com.hazelcast.yarn.ApplicationMaster.java
License:Open Source License
private boolean checkContainer(Container cont) { if (this.properties.clusterSize() <= this.containers.size()) { LOG.log(Level.INFO, "Failed this.properties.clusterSize()=" + this.properties.clusterSize() + " this.containers.size()=" + this.containers.size()); return false; }//from ww w .j av a 2s .com if (cont.getResource().getVirtualCores() < this.properties.cpuPerNode() || cont.getResource().getMemory() < this.properties.memoryPerNode()) { LOG.log(Level.INFO, "Container resources not sufficient requirements. Host: {0}, cpu: {1}, mem: {2}", new Object[] { cont.getNodeId().getHost(), cont.getResource().getVirtualCores(), cont.getResource().getMemory() }); return false; } return true; }
From source file:com.hazelcast.yarn.ApplicationMaster.java
License:Open Source License
@Override public synchronized void onContainersAllocated(List<Container> containerList) { for (Container container : containerList) { if (checkContainer(container)) { try { ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); Map<String, String> env = new HashMap<String, String>(System.getenv()); ctx.setEnvironment(env); Map<String, LocalResource> resources = new HashMap<String, LocalResource>(); resources.put("hazelcast", YarnUtil.createFileResource(this.hazelcastPath, this.hdfs, LocalResourceType.ARCHIVE)); if (this.properties.customLibs() != null) { resources.put("libs", YarnUtil.createFileResource(new Path(this.properties.customLibs()), this.hdfs, LocalResourceType.FILE)); }/*from w w w .j a v a 2s.co m*/ if (this.properties.jvmOpts() != null && !this.properties.jvmOpts().isEmpty()) { env.put("JVM_OPTS", this.properties.jvmOpts()); } ctx.setEnvironment(env); String command = "cd ./hazelcast/*/bin/ && ./server.sh hazelcast.xml" + " -J-Xmx" + container.getResource().getMemory() + "m" + " -J-Xms" + container.getResource().getMemory() + "m" + YarnUtil.LOGS; LOG.log(Level.INFO, command); ctx.setLocalResources(resources); ctx.setCommands(Collections.singletonList(command)); LOG.log(Level.INFO, "Launching container: {0}.", container.getId()); this.nmClient.startContainer(container, ctx); this.containers.put(container.getId(), new HazelcastContainer(container.getId(), container.getNodeId(), container.getResource().getVirtualCores(), container.getResource().getMemory())); } catch (Exception ex) { LOG.log(Level.WARNING, "Error launching container " + container.getId(), ex); } } else { LOG.log(Level.INFO, "Checking failed for container=" + container.toString()); } } }