Example usage for org.apache.hadoop.yarn.api.records Container getResource

List of usage examples for org.apache.hadoop.yarn.api.records Container getResource

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records Container getResource.

Prototype

@Public
@Stable
public abstract Resource getResource();

Source Link

Document

Get the Resource allocated to the container.

Usage

From source file:org.apache.hama.bsp.JobImpl.java

License:Apache License

@Override
public JobState startJob() throws Exception {

    this.allocatedContainers = new ArrayList<Container>(numBSPTasks);
    NMTokenCache nmTokenCache = new NMTokenCache();
    while (allocatedContainers.size() < numBSPTasks) {
        AllocateRequest req = AllocateRequest.newInstance(lastResponseID, 0.0f,
                createBSPTaskRequest(numBSPTasks - allocatedContainers.size(), taskMemoryInMb, priority),
                releasedContainers, null);

        AllocateResponse allocateResponse = resourceManager.allocate(req);
        for (NMToken token : allocateResponse.getNMTokens()) {
            nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
        }//from ww  w .j  a v  a2  s . c o  m

        LOG.info("Got response ID: " + allocateResponse.getResponseId() + " with num of containers: "
                + allocateResponse.getAllocatedContainers().size() + " and following resources: "
                + allocateResponse.getAvailableResources().getMemory() + "mb");
        this.lastResponseID = allocateResponse.getResponseId();

        this.allocatedContainers.addAll(allocateResponse.getAllocatedContainers());

        LOG.info("Waiting to allocate " + (numBSPTasks - allocatedContainers.size()) + " more containers...");

        Thread.sleep(1000l);
    }

    LOG.info("Got " + allocatedContainers.size() + " containers!");

    int id = 0;
    for (Container allocatedContainer : allocatedContainers) {
        LOG.info("Launching task on a new container." + ", containerId=" + allocatedContainer.getId()
                + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":"
                + allocatedContainer.getNodeId().getPort() + ", containerNodeURI="
                + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
                + allocatedContainer.getResource().getMemory());

        // Connect to ContainerManager on the allocated container
        String user = conf.get("bsp.user.name");
        if (user == null) {
            user = System.getenv(ApplicationConstants.Environment.USER.name());
        }

        ContainerManagementProtocol cm = null;
        try {
            cm = getContainerManagementProtocolProxy(yarnRPC,
                    nmTokenCache.getToken(allocatedContainer.getNodeId().toString()),
                    allocatedContainer.getNodeId(), user);
        } catch (Exception e) {
            LOG.error("Failed to create ContainerManager...");
            if (cm != null)
                yarnRPC.stopProxy(cm, conf);
            e.printStackTrace();
        }

        BSPTaskLauncher runnableLaunchContainer = new BSPTaskLauncher(id, allocatedContainer, cm, conf, jobFile,
                jobId);

        launchers.put(id, runnableLaunchContainer);
        runnableLaunchContainer.start();
        completionQueue.add(runnableLaunchContainer);
        id++;
    }

    LOG.info("Waiting for tasks to finish...");
    state = JobState.RUNNING;
    int completed = 0;

    List<Integer> cleanupTasks = new ArrayList<Integer>();
    while (completed != numBSPTasks) {
        for (BSPTaskLauncher task : completionQueue) {
            BSPTaskStatus returnedTask = task.poll();
            // if our task returned with a finished state
            if (returnedTask != null) {
                if (returnedTask.getExitStatus() != 0) {
                    LOG.error("Task with id \"" + returnedTask.getId() + "\" failed!");
                    cleanupTask(returnedTask.getId());
                    state = JobState.FAILED;
                    return state;
                } else {
                    LOG.info("Task \"" + returnedTask.getId() + "\" sucessfully finished!");
                    completed++;
                    LOG.info("Waiting for " + (numBSPTasks - completed) + " tasks to finish!");
                }
                cleanupTasks.add(returnedTask.getId());
            }
        }
        Thread.sleep(1000L);
    }

    for (Integer stopId : cleanupTasks) {
        cleanupTask(stopId);
    }

    state = JobState.SUCCESS;
    return state;
}

From source file:org.apache.helix.provisioning.yarn.RMCallbackHandler.java

License:Apache License

@Override
public void onContainersAllocated(List<Container> allocatedContainers) {
    GenericApplicationMaster.LOG// ww w  .jav a  2 s .  c  om
            .info("Got response from RM for container ask, allocatedCnt=" + allocatedContainers.size());
    for (Container allocatedContainer : allocatedContainers) {
        GenericApplicationMaster.LOG.info("Allocated new container." + ", containerId="
                + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId().getHost()
                + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI="
                + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
                + allocatedContainer.getResource().getMemory());
        for (ContainerRequest containerRequest : _genericApplicationMaster.containerRequestMap.keySet()) {
            if (containerRequest.getCapability().getMemory() == allocatedContainer.getResource().getMemory()) {
                SettableFuture<ContainerAskResponse> future = _genericApplicationMaster.containerRequestMap
                        .remove(containerRequest);
                ContainerAskResponse response = new ContainerAskResponse();
                response.setContainer(allocatedContainer);
                _genericApplicationMaster.allocatedContainerSet.add(allocatedContainer.getId());
                future.set(response);
                break;
            }
        }
    }
}

From source file:org.apache.ignite.yarn.ApplicationMaster.java

License:Apache License

/** {@inheritDoc} */
public synchronized void onContainersAllocated(List<Container> conts) {
    for (Container c : conts) {
        if (checkContainer(c)) {
            try {
                ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

                if (UserGroupInformation.isSecurityEnabled())
                    // Set the tokens to the newly allocated container:
                    ctx.setTokens(allTokens.duplicate());

                Map<String, String> env = new HashMap<>(System.getenv());

                env.put("IGNITE_TCP_DISCOVERY_ADDRESSES", getAddress(c.getNodeId().getHost()));

                if (props.jvmOpts() != null && !props.jvmOpts().isEmpty())
                    env.put("JVM_OPTS", props.jvmOpts());

                ctx.setEnvironment(env);

                Map<String, LocalResource> resources = new HashMap<>();

                resources.put("ignite", IgniteYarnUtils.setupFile(ignitePath, fs, LocalResourceType.ARCHIVE));
                resources.put("ignite-config.xml",
                        IgniteYarnUtils.setupFile(cfgPath, fs, LocalResourceType.FILE));

                if (props.licencePath() != null)
                    resources.put("gridgain-license.xml", IgniteYarnUtils
                            .setupFile(new Path(props.licencePath()), fs, LocalResourceType.FILE));

                if (props.userLibs() != null)
                    resources.put("libs",
                            IgniteYarnUtils.setupFile(new Path(props.userLibs()), fs, LocalResourceType.FILE));

                ctx.setLocalResources(resources);

                ctx.setCommands(Collections.singletonList(
                        (props.licencePath() != null ? "cp gridgain-license.xml ./ignite/*/ || true && " : "")
                                + "cp -r ./libs/* ./ignite/*/libs/ || true && " + "./ignite/*/bin/ignite.sh "
                                + "./ignite-config.xml" + " -J-Xmx" + ((int) props.memoryPerNode()) + "m"
                                + " -J-Xms" + ((int) props.memoryPerNode()) + "m"
                                + IgniteYarnUtils.YARN_LOG_OUT));

                log.log(Level.INFO, "Launching container: {0}.", c.getId());

                nmClient.startContainer(c, ctx);

                containers.put(c.getId(), new IgniteContainer(c.getId(), c.getNodeId(),
                        c.getResource().getVirtualCores(), c.getResource().getMemory()));
            } catch (Exception ex) {
                log.log(Level.WARNING, "Error launching container " + c.getId(), ex);
            }//w  ww  .j  av  a2  s.  c  o  m
        } else
            rmClient.releaseAssignedContainer(c.getId());
    }
}

From source file:org.apache.ignite.yarn.ApplicationMaster.java

License:Apache License

/**
 * @param cont Container./*from   ww  w .ja v  a  2 s  .co m*/
 * @return {@code True} if container satisfies requirements.
 */
private boolean checkContainer(Container cont) {
    // Check limit on running nodes.
    if (props.instances() <= containers.size())
        return false;

    // Check host name
    if (props.hostnameConstraint() != null
            && props.hostnameConstraint().matcher(cont.getNodeId().getHost()).matches())
        return false;

    // Check that slave satisfies min requirements.
    if (cont.getResource().getVirtualCores() < props.cpusPerNode()
            || cont.getResource().getMemory() < props.totalMemoryPerNode()) {
        log.log(Level.FINE, "Container resources not sufficient requirements. Host: {0}, cpu: {1}, mem: {2}",
                new Object[] { cont.getNodeId().getHost(), cont.getResource().getVirtualCores(),
                        cont.getResource().getMemory() });

        return false;
    }

    return true;
}

From source file:org.apache.metron.maas.service.callback.ContainerRequestListener.java

License:Apache License

@Override
public void onContainersAllocated(List<Container> allocatedContainers) {
    LOG.info("Got response from RM for container ask, allocatedCnt=" + allocatedContainers.size());
    for (Container allocatedContainer : allocatedContainers) {
        containers.put(allocatedContainer.getId(), allocatedContainer);
        state.registerContainer(allocatedContainer.getResource(), allocatedContainer);
        LOG.info("Launching shell command on a new container." + ", containerId=" + allocatedContainer.getId()
                + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":"
                + allocatedContainer.getNodeId().getPort() + ", containerNodeURI="
                + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory="
                + allocatedContainer.getResource().getMemory() + ", containerResourceVirtualCores="
                + allocatedContainer.getResource().getVirtualCores());
    }//from ww w. j  a  va2  s. c o m
}

From source file:org.apache.metron.maas.service.yarn.YarnUtils.java

License:Apache License

public void publishContainerStartEvent(final TimelineClient timelineClient, Container container,
        String domainId, UserGroupInformation ugi) {
    final TimelineEntity entity = new TimelineEntity();
    entity.setEntityId("" + container.getId());
    entity.setEntityType(ApplicationMaster.DSEntity.DS_CONTAINER.toString());
    entity.setDomainId(domainId);//from ww w .  j a v  a 2s  .c  om
    entity.addPrimaryFilter("user", ugi.getShortUserName());
    TimelineEvent event = new TimelineEvent();
    event.setTimestamp(System.currentTimeMillis());
    event.setEventType(ContainerEvents.CONTAINER_START.toString());
    event.addEventInfo("Node", container.getNodeId().toString());
    event.addEventInfo("Resources", container.getResource().toString());
    entity.addEvent(event);

    try {
        ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() {
            @Override
            public TimelinePutResponse run() throws Exception {
                return timelineClient.putEntities(entity);
            }
        });
    } catch (Exception e) {
        LOG.error("Container start event could not be published for " + container.getId().toString(),
                e instanceof UndeclaredThrowableException ? e.getCause() : e);
    }
}

From source file:org.apache.myriad.scheduler.fgs.YarnNodeCapacityManager.java

License:Apache License

private Protos.TaskInfo getTaskInfoForContainer(RMContainer rmContainer, ConsumedOffer consumedOffer,
        Node node) {/*from www. j av  a 2 s.  co  m*/

    Protos.Offer offer = consumedOffer.getOffers().get(0);
    Container container = rmContainer.getContainer();
    Protos.TaskID taskId = Protos.TaskID.newBuilder()
            .setValue(ContainerTaskStatusRequest.YARN_CONTAINER_TASK_ID_PREFIX + container.getId().toString())
            .build();

    // TODO (sdaingade) Remove ExecutorInfo from the Node object
    // as this is now cached in the NodeTask object in scheduler state.
    Protos.ExecutorInfo executorInfo = node.getExecInfo();
    if (executorInfo == null) {
        executorInfo = Protos.ExecutorInfo.newBuilder(
                state.getNodeTask(offer.getSlaveId(), NodeManagerConfiguration.DEFAULT_NM_TASK_PREFIX)
                        .getExecutorInfo())
                .setFrameworkId(offer.getFrameworkId()).build();
        node.setExecInfo(executorInfo);
    }

    return Protos.TaskInfo.newBuilder().setName("task_" + taskId.getValue()).setTaskId(taskId)
            .setSlaveId(offer.getSlaveId())
            .addAllResources(taskUtils.getScalarResource(offer, "cpus",
                    (double) container.getResource().getVirtualCores(), 0.0))
            .addAllResources(taskUtils.getScalarResource(offer, "mem",
                    (double) container.getResource().getMemory(), 0.0))
            .setExecutor(executorInfo).build();
}

From source file:org.apache.reef.runtime.yarn.driver.YarnContainerManager.java

License:Apache License

/**
 * Handles new container allocations. Calls come from YARN.
 *
 * @param container newly allocated/*from w w  w .j a va 2s  .  c o  m*/
 */
private void handleNewContainer(final Container container, final boolean isRecoveredContainer) {

    LOG.log(Level.FINE, "allocated container: id[ {0} ]", container.getId());
    // recovered container is not new allocation, it is just checking back from previous driver failover
    if (!isRecoveredContainer) {
        synchronized (this) {
            if (matchContainerWithPendingRequest(container)) {
                final AMRMClient.ContainerRequest matchedRequest = this.requestsAfterSentToRM.peek();
                this.containerRequestCounter.decrement();
                this.containers.add(container);

                LOG.log(Level.FINEST, "{0} matched with {1}",
                        new Object[] { container.toString(), matchedRequest.toString() });

                // Due to the bug YARN-314 and the workings of AMRMCClientAsync, when x-priority m-capacity zero-container request
                // and x-priority n-capacity nonzero-container request are sent together, where m > n, RM ignores the latter.
                // Therefore it is necessary avoid sending zero-container request, even it means getting extra containers.
                // It is okay to send nonzero m-capacity and n-capacity request together since bigger containers can be matched.
                // TODO: revisit this when implementing locality-strictness (i.e. a specific rack request can be ignored)
                if (this.requestsAfterSentToRM.size() > 1) {
                    try {
                        this.resourceManager.removeContainerRequest(matchedRequest);
                    } catch (final Exception e) {
                        LOG.log(Level.WARNING,
                                "Nothing to remove from Async AMRM client's queue, removal attempt failed with exception",
                                e);
                    }
                }

                this.requestsAfterSentToRM.remove();
                doHomogeneousRequests();

                LOG.log(Level.FINEST, "Allocated Container: memory = {0}, core number = {1}", new Object[] {
                        container.getResource().getMemory(), container.getResource().getVirtualCores() });
                this.reefEventHandlers.onResourceAllocation(ResourceAllocationProto.newBuilder()
                        .setIdentifier(container.getId().toString()).setNodeId(container.getNodeId().toString())
                        .setResourceMemory(container.getResource().getMemory())
                        .setVirtualCores(container.getResource().getVirtualCores()).build());
                // we only add this to Container log after the Container has been registered as an REEF Evaluator.
                logContainerAddition(container.getId().toString());
                this.updateRuntimeStatus();
            } else {
                LOG.log(Level.WARNING, "Got an extra container {0} that doesn't match, releasing...",
                        container.getId());
                this.resourceManager.releaseAssignedContainer(container.getId());
            }
        }
    }
}

From source file:org.apache.reef.runtime.yarn.driver.YarnContainerManager.java

License:Apache License

/**
 * Match to see whether the container satisfies the request.
 * We take into consideration that RM has some freedom in rounding
 * up the allocation and in placing containers on other machines.
 *///from w w  w  .  j  a  v  a 2s  .com
private boolean matchContainerWithPendingRequest(Container container) {
    if (this.requestsAfterSentToRM.isEmpty()) {
        return false;
    }

    final AMRMClient.ContainerRequest request = this.requestsAfterSentToRM.peek();
    final boolean resourceCondition = container.getResource().getMemory() >= request.getCapability()
            .getMemory(); // TODO: check vcores once YARN-2380 is resolved
    final boolean nodeCondition = request.getNodes() == null
            || request.getNodes().contains(container.getNodeId().getHost());
    final boolean rackCondition = request.getRacks() == null
            || request.getRacks().contains(this.nodeIdToRackName.get(container.getNodeId().toString()));

    return resourceCondition && (request.getRelaxLocality() || (rackCondition && nodeCondition));
}

From source file:org.apache.reef.runtime.yarn.driver.YarnDriverRuntimeRestartManager.java

License:Apache License

/**
 * Used by {@link org.apache.reef.driver.restart.DriverRestartManager}.
 * Gets the list of previous containers from the resource manager,
 * compares that list to the YarnDriverRuntimeRestartManager's own list based on the evalutor preserver,
 * and determine which evaluators are alive and which have failed during restart.
 * @return a map of Evaluator ID to {@link EvaluatorRestartInfo} for evaluators that have either failed or survived
 * driver restart./*from   w w  w. jav  a2 s . c  o m*/
 */
@Override
public RestartEvaluators getPreviousEvaluators() {
    final RestartEvaluators.Builder restartEvaluatorsBuilder = RestartEvaluators.newBuilder();

    this.initializeListOfPreviousContainers();

    if (this.previousContainers != null && !this.previousContainers.isEmpty()) {
        LOG.log(Level.INFO, "Driver restarted, with {0} previous containers", this.previousContainers.size());
        final Set<String> expectedContainers = this.evaluatorPreserver.recoverEvaluators();

        final int numExpectedContainers = expectedContainers.size();
        final int numPreviousContainers = this.previousContainers.size();
        if (numExpectedContainers > numPreviousContainers) {
            // we expected more containers to be alive, some containers must have died during driver restart
            LOG.log(Level.WARNING, "Expected {0} containers while only {1} are still alive",
                    new Object[] { numExpectedContainers, numPreviousContainers });
            final Set<String> previousContainersIds = new HashSet<>();
            for (final Container container : this.previousContainers) {
                previousContainersIds.add(container.getId().toString());
            }
            for (final String expectedContainerId : expectedContainers) {
                if (!previousContainersIds.contains(expectedContainerId)) {
                    LOG.log(Level.WARNING,
                            "Expected container [{0}] not alive, must have failed during driver restart.",
                            expectedContainerId);
                    restartEvaluatorsBuilder.addRestartEvaluator(
                            EvaluatorRestartInfo.createFailedEvaluatorInfo(expectedContainerId));
                }
            }
        }
        if (numExpectedContainers < numPreviousContainers) {
            // somehow we have more alive evaluators, this should not happen
            throw new RuntimeException("Expected only [" + numExpectedContainers + "] containers "
                    + "but resource manager believe that [" + numPreviousContainers
                    + "] are outstanding for driver.");
        }

        //  numExpectedContainers == numPreviousContainers
        for (final Container container : this.previousContainers) {
            LOG.log(Level.FINE, "Previous container: [{0}]", container.toString());
            if (!expectedContainers.contains(container.getId().toString())) {
                throw new RuntimeException("Not expecting container " + container.getId().toString());
            }

            restartEvaluatorsBuilder.addRestartEvaluator(EvaluatorRestartInfo.createExpectedEvaluatorInfo(
                    ResourceEventImpl.newRecoveryBuilder().setIdentifier(container.getId().toString())
                            .setNodeId(container.getNodeId().toString())
                            .setRackName(rackNameFormatter.getRackName(container))
                            .setResourceMemory(container.getResource().getMemory())
                            .setVirtualCores(container.getResource().getVirtualCores()).build()));
        }
    }

    return restartEvaluatorsBuilder.build();
}