List of usage examples for org.apache.hadoop.yarn.api.records Container getNodeId
@Public @Stable public abstract NodeId getNodeId();
From source file:com.continuuity.weave.internal.yarn.Hadoop20YarnNMClient.java
License:Apache License
/** * Helper to connect to container manager (node manager). *//*from w w w .j av a 2 s .c o m*/ private ContainerManager connectContainerManager(Container container) { String cmIpPortStr = String.format("%s:%d", container.getNodeId().getHost(), container.getNodeId().getPort()); InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr); return ((ContainerManager) yarnRPC.getProxy(ContainerManager.class, cmAddress, yarnConf)); }
From source file:com.datatorrent.stram.StreamingAppMasterService.java
License:Apache License
/** * Main run function for the application master * * @throws YarnException//from w w w . ja va 2 s. c o m */ @SuppressWarnings("SleepWhileInLoop") private void execute() throws YarnException, IOException { LOG.info("Starting ApplicationMaster"); final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); LOG.info("number of tokens: {}", credentials.getAllTokens().size()); Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.debug("token: {}", token); } final Configuration conf = getConfig(); long tokenLifeTime = (long) (dag.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * Math .min(dag.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME), dag.getValue(LogicalPlan.RM_TOKEN_LIFE_TIME))); long expiryTime = System.currentTimeMillis() + tokenLifeTime; LOG.debug(" expiry token time {}", tokenLifeTime); String hdfsKeyTabFile = dag.getValue(LogicalPlan.KEY_TAB_FILE); // Register self with ResourceManager RegisterApplicationMasterResponse response = amRmClient.registerApplicationMaster(appMasterHostname, 0, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); int maxVcores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max mem {}m and vcores {} capabililty of resources in this cluster ", maxMem, maxVcores); // for locality relaxation fall back Map<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> requestedResources = Maps .newHashMap(); // Setup heartbeat emitter // TODO poll RM every now and then with an empty request to let RM know that we are alive // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting: // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS // The allocate calls to the RM count as heartbeat so, for now, this additional heartbeat emitter // is not required. int loopCounter = -1; List<ContainerId> releasedContainers = new ArrayList<ContainerId>(); int numTotalContainers = 0; // keep track of already requested containers to not request them again while waiting for allocation int numRequestedContainers = 0; int numReleasedContainers = 0; int nextRequestPriority = 0; ResourceRequestHandler resourceRequestor = new ResourceRequestHandler(); YarnClient clientRMService = YarnClient.createYarnClient(); try { // YARN-435 // we need getClusterNodes to populate the initial node list, // subsequent updates come through the heartbeat response clientRMService.init(conf); clientRMService.start(); ApplicationReport ar = StramClientUtils.getStartedAppInstanceByName(clientRMService, dag.getAttributes().get(DAG.APPLICATION_NAME), UserGroupInformation.getLoginUser().getUserName(), dag.getAttributes().get(DAG.APPLICATION_ID)); if (ar != null) { appDone = true; dnmgr.shutdownDiagnosticsMessage = String.format( "Application master failed due to application %s with duplicate application name \"%s\" by the same user \"%s\" is already started.", ar.getApplicationId().toString(), ar.getName(), ar.getUser()); LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage); finishApplication(FinalApplicationStatus.FAILED, numTotalContainers); return; } resourceRequestor.updateNodeReports(clientRMService.getNodeReports()); } catch (Exception e) { throw new RuntimeException("Failed to retrieve cluster nodes report.", e); } finally { clientRMService.stop(); } // check for previously allocated containers // as of 2.2, containers won't survive AM restart, but this will change in the future - YARN-1490 checkContainerStatus(); FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); while (!appDone) { loopCounter++; if (UserGroupInformation.isSecurityEnabled() && System.currentTimeMillis() >= expiryTime && hdfsKeyTabFile != null) { String applicationId = appAttemptID.getApplicationId().toString(); expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, "." + File.separator + "tmp", applicationId, conf, hdfsKeyTabFile, credentials, rmAddress, true); } Runnable r; while ((r = this.pendingTasks.poll()) != null) { r.run(); } // log current state /* * LOG.info("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" + * numTotalContainers + ", requested=" + numRequestedContainers + ", completed=" + numCompletedContainers + * ", failed=" + numFailedContainers + ", currentAllocated=" + this.allAllocatedContainers.size()); */ // Sleep before each loop when asking RM for containers // to avoid flooding RM with spurious requests when it // need not have any available containers try { sleep(1000); } catch (InterruptedException e) { LOG.info("Sleep interrupted " + e.getMessage()); } // Setup request to be sent to RM to allocate containers List<ContainerRequest> containerRequests = new ArrayList<ContainerRequest>(); List<ContainerRequest> removedContainerRequests = new ArrayList<ContainerRequest>(); // request containers for pending deploy requests if (!dnmgr.containerStartRequests.isEmpty()) { StreamingContainerAgent.ContainerStartRequest csr; while ((csr = dnmgr.containerStartRequests.poll()) != null) { if (csr.container.getRequiredMemoryMB() > maxMem) { LOG.warn("Container memory {}m above max threshold of cluster. Using max value {}m.", csr.container.getRequiredMemoryMB(), maxMem); csr.container.setRequiredMemoryMB(maxMem); } if (csr.container.getRequiredVCores() > maxVcores) { LOG.warn("Container vcores {} above max threshold of cluster. Using max value {}.", csr.container.getRequiredVCores(), maxVcores); csr.container.setRequiredVCores(maxVcores); } csr.container.setResourceRequestPriority(nextRequestPriority++); ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true); MutablePair<Integer, ContainerRequest> pair = new MutablePair<Integer, ContainerRequest>( loopCounter, cr); requestedResources.put(csr, pair); containerRequests.add(cr); } } if (!requestedResources.isEmpty()) { //resourceRequestor.clearNodeMapping(); for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources .entrySet()) { if ((loopCounter - entry.getValue().getKey()) > NUMBER_MISSED_HEARTBEATS) { StreamingContainerAgent.ContainerStartRequest csr = entry.getKey(); removedContainerRequests.add(entry.getValue().getRight()); ContainerRequest cr = resourceRequestor.createContainerRequest(csr, false); entry.getValue().setLeft(loopCounter); entry.getValue().setRight(cr); containerRequests.add(cr); } } } numTotalContainers += containerRequests.size(); numRequestedContainers += containerRequests.size(); AllocateResponse amResp = sendContainerAskToRM(containerRequests, removedContainerRequests, releasedContainers); if (amResp.getAMCommand() != null) { LOG.info(" statement executed:{}", amResp.getAMCommand()); switch (amResp.getAMCommand()) { case AM_RESYNC: case AM_SHUTDOWN: throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM"); default: throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM"); } } releasedContainers.clear(); // Retrieve list of allocated containers from the response List<Container> newAllocatedContainers = amResp.getAllocatedContainers(); // LOG.info("Got response from RM for container ask, allocatedCnt=" + newAllocatedContainers.size()); numRequestedContainers -= newAllocatedContainers.size(); long timestamp = System.currentTimeMillis(); for (Container allocatedContainer : newAllocatedContainers) { LOG.info("Got new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemory() + ", priority" + allocatedContainer.getPriority()); // + ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString()); boolean alreadyAllocated = true; StreamingContainerAgent.ContainerStartRequest csr = null; for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources .entrySet()) { if (entry.getKey().container.getResourceRequestPriority() == allocatedContainer.getPriority() .getPriority()) { alreadyAllocated = false; csr = entry.getKey(); break; } } if (alreadyAllocated) { LOG.info("Releasing {} as resource with priority {} was already assigned", allocatedContainer.getId(), allocatedContainer.getPriority()); releasedContainers.add(allocatedContainer.getId()); numReleasedContainers++; numRequestedContainers++; continue; } if (csr != null) { requestedResources.remove(csr); } // allocate resource to container ContainerResource resource = new ContainerResource(allocatedContainer.getPriority().getPriority(), allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString(), allocatedContainer.getResource().getMemory(), allocatedContainer.getResource().getVirtualCores(), allocatedContainer.getNodeHttpAddress()); StreamingContainerAgent sca = dnmgr.assignContainer(resource, null); if (sca == null) { // allocated container no longer needed, add release request LOG.warn("Container {} allocated but nothing to deploy, going to release this container.", allocatedContainer.getId()); releasedContainers.add(allocatedContainer.getId()); } else { AllocatedContainer allocatedContainerHolder = new AllocatedContainer(allocatedContainer); this.allocatedContainers.put(allocatedContainer.getId().toString(), allocatedContainerHolder); ByteBuffer tokens = null; if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); Token<StramDelegationTokenIdentifier> delegationToken = allocateDelegationToken( ugi.getUserName(), heartbeatListener.getAddress()); allocatedContainerHolder.delegationToken = delegationToken; //ByteBuffer tokens = LaunchContainerRunnable.getTokens(delegationTokenManager, heartbeatListener.getAddress()); tokens = LaunchContainerRunnable.getTokens(ugi, delegationToken); } LaunchContainerRunnable launchContainer = new LaunchContainerRunnable(allocatedContainer, nmClient, sca, tokens); // Thread launchThread = new Thread(runnableLaunchContainer); // launchThreads.add(launchThread); // launchThread.start(); launchContainer.run(); // communication with NMs is now async // record container start event StramEvent ev = new StramEvent.StartContainerEvent(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString()); ev.setTimestamp(timestamp); dnmgr.recordEventAsync(ev); } } // track node updates for future locality constraint allocations // TODO: it seems 2.0.4-alpha doesn't give us any updates resourceRequestor.updateNodeReports(amResp.getUpdatedNodes()); // Check the completed containers List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses(); // LOG.debug("Got response from RM for container ask, completedCnt=" + completedContainers.size()); for (ContainerStatus containerStatus : completedContainers) { LOG.info("Completed containerId=" + containerStatus.getContainerId() + ", state=" + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics=" + containerStatus.getDiagnostics()); // non complete containers should not be here assert (containerStatus.getState() == ContainerState.COMPLETE); AllocatedContainer allocatedContainer = allocatedContainers .remove(containerStatus.getContainerId().toString()); if (allocatedContainer != null && allocatedContainer.delegationToken != null) { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); delegationTokenManager.cancelToken(allocatedContainer.delegationToken, ugi.getUserName()); } int exitStatus = containerStatus.getExitStatus(); if (0 != exitStatus) { if (allocatedContainer != null) { numFailedContainers.incrementAndGet(); } // if (exitStatus == 1) { // // non-recoverable StreamingContainer failure // appDone = true; // finalStatus = FinalApplicationStatus.FAILED; // dnmgr.shutdownDiagnosticsMessage = "Unrecoverable failure " + containerStatus.getContainerId(); // LOG.info("Exiting due to: {}", dnmgr.shutdownDiagnosticsMessage); // } // else { // Recoverable failure or process killed (externally or via stop request by AM) // also occurs when a container was released by the application but never assigned/launched LOG.debug("Container {} failed or killed.", containerStatus.getContainerId()); dnmgr.scheduleContainerRestart(containerStatus.getContainerId().toString()); // } } else { // container completed successfully numCompletedContainers.incrementAndGet(); LOG.info("Container completed successfully." + ", containerId=" + containerStatus.getContainerId()); } String containerIdStr = containerStatus.getContainerId().toString(); dnmgr.removeContainerAgent(containerIdStr); // record container stop event StramEvent ev = new StramEvent.StopContainerEvent(containerIdStr, containerStatus.getExitStatus()); ev.setReason(containerStatus.getDiagnostics()); dnmgr.recordEventAsync(ev); } if (dnmgr.forcedShutdown) { LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage); finalStatus = FinalApplicationStatus.FAILED; appDone = true; } else if (allocatedContainers.isEmpty() && numRequestedContainers == 0 && dnmgr.containerStartRequests.isEmpty()) { LOG.debug("Exiting as no more containers are allocated or requested"); finalStatus = FinalApplicationStatus.SUCCEEDED; appDone = true; } LOG.debug("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" + numTotalContainers + ", requested=" + numRequestedContainers + ", released=" + numReleasedContainers + ", completed=" + numCompletedContainers + ", failed=" + numFailedContainers + ", currentAllocated=" + allocatedContainers.size()); // monitor child containers dnmgr.monitorHeartbeat(); } finishApplication(finalStatus, numTotalContainers); }
From source file:com.github.hdl.tensorflow.yarn.app.ApplicationMaster.java
License:Apache License
public boolean startAllContainers() throws Exception { if (numAllocatedContainers.get() == numTotalContainers) { int numWorkerContainers = 0; int numPsContainers = 0; if (this.allocatedContainers.size() < numTotalWokerContainers + numTotalParamServerContainer) { LOG.error("not enough ps and woker containers allocated!"); return false; }//from w w w.j a v a 2 s. co m for (Container allocatedContainer : this.allocatedContainers) { if (numWorkerContainers < numTotalWokerContainers) { LOG.info("work cid: " + allocatedContainer.getId().toString()); clusterSpec.addWorkerSpec(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().getHost()); numWorkerContainers++; continue; } if (numPsContainers < this.numTotalParamServerContainer) { LOG.info("ps cid: " + allocatedContainer.getId().toString()); clusterSpec.addPsSpec(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().getHost()); numPsContainers++; } } for (Container allocatedContainer : this.allocatedContainers) { LOG.info("Launching a new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemorySize() + ", containerResourceVirtualCores" + allocatedContainer.getResource().getVirtualCores()); // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); LOG.info("server cid: " + allocatedContainer.getId().toString()); LaunchContainerThread launchDelegator = new LaunchContainerThread(allocatedContainer, this, clusterSpec.getServerAddress(allocatedContainer.getId().toString())); launchDelegator.setTfServerJar(tfServerJar); launchDelegator.setJniSoDfsPath(jniSoDfsPath); launchDelegator.setContainerMemory(containerMemory); launchDelegator.setContainerRetryPolicy(containerRetryPolicy); launchDelegator.setContainerRetryErrorCodes(containerRetryErrorCodes); launchDelegator.setContainerMaxRetries(containerMaxRetries); launchDelegator.setContainrRetryInterval(containrRetryInterval); Thread launchThread = new Thread(launchDelegator); // launch and start the container on a separate thread to keep // the main thread unblocked // as all containers may not be allocated at one go. launchThreads.add(launchThread); launchedContainers.add(allocatedContainer.getId()); launchThread.start(); } } else { throw new Exception("containers are not allocated!"); } return true; }
From source file:com.hazelcast.yarn.ApplicationMaster.java
License:Open Source License
private boolean checkContainer(Container cont) { if (this.properties.clusterSize() <= this.containers.size()) { LOG.log(Level.INFO, "Failed this.properties.clusterSize()=" + this.properties.clusterSize() + " this.containers.size()=" + this.containers.size()); return false; }//from w ww . j a va2 s .c o m if (cont.getResource().getVirtualCores() < this.properties.cpuPerNode() || cont.getResource().getMemory() < this.properties.memoryPerNode()) { LOG.log(Level.INFO, "Container resources not sufficient requirements. Host: {0}, cpu: {1}, mem: {2}", new Object[] { cont.getNodeId().getHost(), cont.getResource().getVirtualCores(), cont.getResource().getMemory() }); return false; } return true; }
From source file:com.hazelcast.yarn.ApplicationMaster.java
License:Open Source License
@Override public synchronized void onContainersAllocated(List<Container> containerList) { for (Container container : containerList) { if (checkContainer(container)) { try { ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); Map<String, String> env = new HashMap<String, String>(System.getenv()); ctx.setEnvironment(env); Map<String, LocalResource> resources = new HashMap<String, LocalResource>(); resources.put("hazelcast", YarnUtil.createFileResource(this.hazelcastPath, this.hdfs, LocalResourceType.ARCHIVE)); if (this.properties.customLibs() != null) { resources.put("libs", YarnUtil.createFileResource(new Path(this.properties.customLibs()), this.hdfs, LocalResourceType.FILE)); }/*from www .java 2s . co m*/ if (this.properties.jvmOpts() != null && !this.properties.jvmOpts().isEmpty()) { env.put("JVM_OPTS", this.properties.jvmOpts()); } ctx.setEnvironment(env); String command = "cd ./hazelcast/*/bin/ && ./server.sh hazelcast.xml" + " -J-Xmx" + container.getResource().getMemory() + "m" + " -J-Xms" + container.getResource().getMemory() + "m" + YarnUtil.LOGS; LOG.log(Level.INFO, command); ctx.setLocalResources(resources); ctx.setCommands(Collections.singletonList(command)); LOG.log(Level.INFO, "Launching container: {0}.", container.getId()); this.nmClient.startContainer(container, ctx); this.containers.put(container.getId(), new HazelcastContainer(container.getId(), container.getNodeId(), container.getResource().getVirtualCores(), container.getResource().getMemory())); } catch (Exception ex) { LOG.log(Level.WARNING, "Error launching container " + container.getId(), ex); } } else { LOG.log(Level.INFO, "Checking failed for container=" + container.toString()); } } }
From source file:com.inforefiner.hdata.ApplicationMaster.java
License:Apache License
private static void publishContainerStartEvent(final TimelineClient timelineClient, Container container, String domainId, UserGroupInformation ugi) { final TimelineEntity entity = new TimelineEntity(); entity.setEntityId(container.getId().toString()); entity.setEntityType(DSEntity.DS_CONTAINER.toString()); entity.setDomainId(domainId);/*from w w w .j av a2s . c o m*/ entity.addPrimaryFilter("user", ugi.getShortUserName()); TimelineEvent event = new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType(DSEvent.DS_CONTAINER_START.toString()); event.addEventInfo("Node", container.getNodeId().toString()); event.addEventInfo("Resources", container.getResource().toString()); entity.addEvent(event); try { ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() { @Override public TimelinePutResponse run() throws Exception { return timelineClient.putEntities(entity); } }); } catch (Exception e) { LOG.error("Container start event could not be published for " + container.getId().toString(), e instanceof UndeclaredThrowableException ? e.getCause() : e); } }
From source file:com.scistor.dshell.ScistorApplicationMaster.java
License:Apache License
private static void publishContainerStartEvent(TimelineClient timelineClient, Container container) throws IOException, YarnException { TimelineEntity entity = new TimelineEntity(); entity.setEntityId(container.getId().toString()); entity.setEntityType(DSEntity.DS_CONTAINER.toString()); entity.addPrimaryFilter("user", UserGroupInformation.getCurrentUser().toString()); TimelineEvent event = new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType(DSEvent.DS_CONTAINER_START.toString()); event.addEventInfo("Node", container.getNodeId().toString()); event.addEventInfo("Resources", container.getResource().toString()); entity.addEvent(event);// w w w .j a v a2 s .c o m timelineClient.putEntities(entity); }
From source file:de.huberlin.wbi.hiway.am.benchmark.PerfectDaxGreedyQueue.java
License:Apache License
@Override public TaskInstance scheduleTaskToContainer(Container container) { numberOfRemainingTasks--;/*from w w w. j av a 2s. com*/ numberOfRunningTasks++; // compute and log the service time (from request to allocation) for this container logRequestServiceTime(container); // biggest containers first such that we match the biggest possible task to each container. // queue.sort((t1,t2)-> - Long.compare(t1.getPeakMemoryBytes(),t2.getPeakMemoryBytes())); // search the container in the queue with the lowest wastage (but do not change the order to avoid starvation) double optWastedBytes = container.getResource().getMemory() * 1e6; DaxTaskInstance bestfit = null; for (DaxTaskInstance task : queue) { double wastedBytes = container.getResource().getMemory() * 1e6 - task.getPeakMemoryBytes(); if (wastedBytes >= 0 /* task fits */ && wastedBytes < optWastedBytes) { optWastedBytes = wastedBytes; bestfit = task; } else { WorkflowDriver.Logger.writeToStdout(String.format( "PerfDaxGQ skipped task %s (peak_mem_bytes=%s) for %s MB container", task.getTaskName(), task.getPeakMemoryBytes(), container.getResource().getMemory())); } } if (bestfit != null) { WorkflowDriver.Logger.writeToStdout(String.format( "PerfDaxGQ Assigned task %s (peak_mem_bytes %s) to container %s@%s (memory %s MB)", bestfit, bestfit.getPeakMemoryBytes(), container.getId(), container.getNodeId().getHost(), container.getResource().getMemory())); bestfit.incTries(); } return bestfit; }
From source file:de.huberlin.wbi.hiway.common.Data.java
License:Apache License
long countAvailableLocalData(Container container) throws IOException { BlockLocation[] blockLocations = null; Path hdfsLocation = getHdfsPath(); while (blockLocations == null) { FileStatus fileStatus = hdfs.getFileStatus(hdfsLocation); blockLocations = hdfs.getFileBlockLocations(hdfsLocation, 0, fileStatus.getLen()); }/* w ww .j a va 2 s . c o m*/ long sum = 0; for (BlockLocation blockLocation : blockLocations) { for (String host : blockLocation.getHosts()) { if (container.getNodeId().getHost().equals(host)) { sum += blockLocation.getLength(); break; } } } return sum; }
From source file:de.huberlin.wbi.hiway.scheduler.c3po.C3PO.java
License:Apache License
@Override public TaskInstance scheduleTaskToContainer(Container container) { TaskInstance task = null;/*from w ww . j a v a2 s . c o m*/ numberOfRemainingTasks--; numberOfRunningTasks++; boolean replicate = getNumberOfReadyTasks() == 0; String nodeId = container.getNodeId().getHost(); if (!provenanceManager.runtimeEstimatesPerNode.containsKey(nodeId)) { newHost(nodeId); } computeJobStatisticsWeight(replicate); computeTaskStatisticsWeights(); computePlacementAwarenessWeights(container, replicate); Map<Long, Estimate> combinedWeights = new HashMap<>(); for (long taskId : getTaskIds()) combinedWeights.put(taskId, new Estimate()); multiplyWeights(combinedWeights, provenanceManager.runtimeEstimatesPerNode.get(nodeId), conservatismWeight); multiplyWeights(combinedWeights, jobStatistics, outlookWeight); multiplyWeights(combinedWeights, dataLocalityStatistics, placementAwarenessWeight); normalizeWeights(combinedWeights.values()); if (HiWayConfiguration.verbose) { WorkflowDriver.Logger.writeToStdout("Updated Decision Vector for node " + nodeId + ":"); WorkflowDriver.Logger.writeToStdout("\tConservatism (x" + (int) (conservatismWeight + 0.5d) + ")\t" + printWeights(provenanceManager.runtimeEstimatesPerNode.get(nodeId))); WorkflowDriver.Logger.writeToStdout( "\tOutlook (x" + (int) (outlookWeight + 0.5d) + ")\t\t" + printWeights(jobStatistics)); WorkflowDriver.Logger.writeToStdout("\tPlacement (x" + (int) (placementAwarenessWeight + 0.5d) + ")\t\t" + printWeights(dataLocalityStatistics)); WorkflowDriver.Logger.writeToStdout("\tCombined\t\t" + printWeights(combinedWeights)); } double sample = numGen.nextDouble(); double min = 0d; for (long taskId : getTaskIds()) { double max = min + combinedWeights.get(taskId).weight; if (sample < max) { Queue<TaskInstance> queue = runningTasks.get(taskId); if (!replicate) { jobStatistics.get(taskId).remainingTasks--; queue = readyTasks.get(taskId); } task = queue.remove(); runningTasks.get(taskId).add(task); if (!taskToContainers.containsKey(task)) { taskToContainers.put(task, new ArrayList<Container>()); } taskToContainers.get(task).add(container); if (replicate) { WorkflowDriver.Logger.writeToStdout("Assigned speculative copy of task " + task + " to container " + container.getId() + "@" + container.getNodeId().getHost()); } else { WorkflowDriver.Logger.writeToStdout("C3PO Assigned task " + task + " to container " + container.getId() + "@" + container.getNodeId().getHost()); } task.incTries(); return task; } min = max; } return task; }