Example usage for org.apache.hadoop.yarn.api.records Priority compareTo

List of usage examples for org.apache.hadoop.yarn.api.records Priority compareTo

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records Priority compareTo.

Prototype

@Override
    public int compareTo(Priority other) 

Source Link

Usage

From source file:org.apache.tez.dag.app.rm.YarnTaskSchedulerService.java

License:Apache License

/**
 * Try to assign a re-used container//w  ww  . j a va2s  .com
 * @param heldContainer Container to be used to assign to tasks
 * @return Assigned container map
 */

private synchronized Map<CookieContainerRequest, Container> assignDelayedContainer(
        HeldContainer heldContainer) {

    DAGAppMasterState state = appContext.getAMState();
    boolean isNew = heldContainer.isNew();
    if (LOG.isDebugEnabled()) {
        LOG.debug(
                "Trying to assign a delayed container" + ", containerId=" + heldContainer.getContainer().getId()
                        + ", nextScheduleTime=" + heldContainer.getNextScheduleTime() + ", containerExpiryTime="
                        + heldContainer.getContainerExpiryTime() + ", AMState=" + state + ", matchLevel="
                        + heldContainer.getLocalityMatchLevel() + ", taskRequestsCount=" + taskRequests.size()
                        + ", heldContainers=" + heldContainers.size() + ", delayedContainers="
                        + delayedContainerManager.delayedContainers.size() + ", isNew=" + isNew);
    }

    if (state.equals(DAGAppMasterState.IDLE) || taskRequests.isEmpty()) {
        // reset locality level on held container
        // if sessionDelay defined, push back into delayed queue if not already
        // done so

        // Compute min held containers.
        if (appContext.isSession() && sessionNumMinHeldContainers > 0 && sessionMinHeldContainers.isEmpty()) {
            // session mode and need to hold onto containers and not done so already
            determineMinHeldContainers();
        }

        heldContainer.resetLocalityMatchLevel();
        long currentTime = System.currentTimeMillis();
        boolean releaseContainer = false;

        if (isNew || (heldContainer.getContainerExpiryTime() <= currentTime && idleContainerTimeoutMin != -1)) {
            // container idle timeout has expired or is a new unused container. 
            // new container is possibly a spurious race condition allocation.
            if (!isNew && appContext.isSession()
                    && sessionMinHeldContainers.contains(heldContainer.getContainer().getId())) {
                // Not a potentially spurious new container.
                // In session mode and container in set of chosen min held containers
                // increase the idle container expire time to maintain sanity with 
                // the rest of the code
                heldContainer.setContainerExpiryTime(getHeldContainerExpireTime(currentTime));
            } else {
                releaseContainer = true;
            }
        }

        if (releaseContainer) {
            LOG.info("No taskRequests. Container's idle timeout delay expired or is new. "
                    + "Releasing container" + ", containerId=" + heldContainer.container.getId()
                    + ", containerExpiryTime=" + heldContainer.getContainerExpiryTime() + ", idleTimeout="
                    + idleContainerTimeoutMin + ", taskRequestsCount=" + taskRequests.size()
                    + ", heldContainers=" + heldContainers.size() + ", delayedContainers="
                    + delayedContainerManager.delayedContainers.size() + ", isNew=" + isNew);
            releaseUnassignedContainers(Lists.newArrayList(heldContainer.container));
        } else {
            // no outstanding work and container idle timeout not expired
            if (LOG.isDebugEnabled()) {
                LOG.debug(
                        "Holding onto idle container with no work. CId: " + heldContainer.getContainer().getId()
                                + " with expiry: " + heldContainer.getContainerExpiryTime() + " currentTime: "
                                + currentTime + " next look: " + (currentTime + localitySchedulingDelay));
            }
            // put back and wait for new requests until expiry
            heldContainer.resetLocalityMatchLevel();
            delayedContainerManager.addDelayedContainer(heldContainer.getContainer(),
                    currentTime + localitySchedulingDelay);
        }
    } else if (state.equals(DAGAppMasterState.RUNNING)) {
        // clear min held containers since we need to allocate to tasks
        sessionMinHeldContainers.clear();
        HeldContainer.LocalityMatchLevel localityMatchLevel = heldContainer.getLocalityMatchLevel();
        Map<CookieContainerRequest, Container> assignedContainers = new HashMap<CookieContainerRequest, Container>();

        Container containerToAssign = heldContainer.container;

        heldContainer.incrementAssignmentAttempts();
        // Each time a container is seen, we try node, rack and non-local in that
        // order depending on matching level allowed

        // if match level is NEW or NODE, match only at node-local
        // always try node local matches for other levels
        if (isNew || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.NEW)
                || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.NODE)
                || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.RACK)
                || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.NON_LOCAL)) {
            assignReUsedContainerWithLocation(containerToAssign, NODE_LOCAL_ASSIGNER, assignedContainers, true);
            if (LOG.isDebugEnabled() && assignedContainers.isEmpty()) {
                LOG.info("Failed to assign tasks to delayed container using node" + ", containerId="
                        + heldContainer.getContainer().getId());
            }
        }

        // if re-use allowed at rack
        // match against rack if match level is RACK or NON-LOCAL
        // if scheduling delay is 0, match at RACK allowed without a sleep
        if (assignedContainers.isEmpty()) {
            if ((reuseRackLocal || isNew) && (localitySchedulingDelay == 0
                    || (localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.RACK)
                            || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.NON_LOCAL)))) {
                assignReUsedContainerWithLocation(containerToAssign, RACK_LOCAL_ASSIGNER, assignedContainers,
                        false);
                if (LOG.isDebugEnabled() && assignedContainers.isEmpty()) {
                    LOG.info("Failed to assign tasks to delayed container using rack" + ", containerId="
                            + heldContainer.getContainer().getId());
                }
            }
        }

        // if re-use allowed at non-local
        // match against rack if match level is NON-LOCAL
        // if scheduling delay is 0, match at NON-LOCAL allowed without a sleep
        if (assignedContainers.isEmpty()) {
            if ((reuseNonLocal || isNew) && (localitySchedulingDelay == 0
                    || localityMatchLevel.equals(HeldContainer.LocalityMatchLevel.NON_LOCAL))) {
                assignReUsedContainerWithLocation(containerToAssign, NON_LOCAL_ASSIGNER, assignedContainers,
                        false);
                if (LOG.isDebugEnabled() && assignedContainers.isEmpty()) {
                    LOG.info("Failed to assign tasks to delayed container using non-local" + ", containerId="
                            + heldContainer.getContainer().getId());
                }
            }
        }

        if (assignedContainers.isEmpty()) {

            long currentTime = System.currentTimeMillis();

            // Release container if final expiry time is reached
            // Dont release a new container. The RM may not give us new ones
            // The assumption is that the expire time is larger than the sum of all
            // locality delays. So if we hit the expire time then we have already 
            // tried to assign at all locality levels.
            // We run the risk of not being able to retain min held containers but 
            // if we are not being able to assign containers to pending tasks then 
            // we cannot avoid releasing containers. Or else we may not be able to 
            // get new containers from YARN to match the pending request
            if (!isNew && heldContainer.getContainerExpiryTime() <= currentTime
                    && idleContainerTimeoutMin != -1) {
                LOG.info("Container's idle timeout expired. Releasing container" + ", containerId="
                        + heldContainer.container.getId() + ", containerExpiryTime="
                        + heldContainer.getContainerExpiryTime() + ", idleTimeoutMin="
                        + idleContainerTimeoutMin);
                releaseUnassignedContainers(Lists.newArrayList(heldContainer.container));
            } else {

                // Let's decide if this container has hit the end of the road

                // EOL true if container's match level is NON-LOCAL
                boolean hitFinalMatchLevel = localityMatchLevel
                        .equals(HeldContainer.LocalityMatchLevel.NON_LOCAL);
                if (!hitFinalMatchLevel) {
                    // EOL also true if locality delay is 0
                    // or rack-local or non-local is disabled
                    heldContainer.incrementLocalityMatchLevel();
                    if (localitySchedulingDelay == 0 || (!reuseRackLocal || (!reuseNonLocal && heldContainer
                            .getLocalityMatchLevel().equals(HeldContainer.LocalityMatchLevel.NON_LOCAL)))) {
                        hitFinalMatchLevel = true;
                    }
                    // the above if-stmt does not apply to new containers since they will
                    // be matched at all locality levels. So there finalMatchLevel cannot
                    // be short-circuited
                    if (localitySchedulingDelay > 0 && isNew) {
                        hitFinalMatchLevel = false;
                    }
                }

                if (hitFinalMatchLevel) {
                    boolean safeToRelease = true;
                    Priority topPendingPriority = amRmClient.getTopPriority();
                    Priority containerPriority = heldContainer.container.getPriority();
                    if (isNew && topPendingPriority != null
                            && containerPriority.compareTo(topPendingPriority) < 0) {
                        // this container is of lower priority and given to us by the RM for
                        // a task that will be matched after the current top priority. Keep 
                        // this container for those pending tasks since the RM is not going
                        // to give this container to us again
                        safeToRelease = false;
                    }

                    // Are there any pending requests at any priority?
                    // release if there are tasks or this is not a session
                    if (safeToRelease && (!taskRequests.isEmpty() || !appContext.isSession())) {
                        LOG.info("Releasing held container as either there are pending but "
                                + " unmatched requests or this is not a session" + ", containerId="
                                + heldContainer.container.getId() + ", pendingTasks=" + taskRequests.size()
                                + ", isSession=" + appContext.isSession() + ". isNew=" + isNew);
                        releaseUnassignedContainers(Lists.newArrayList(heldContainer.container));
                    } else {
                        // if no tasks, treat this like an idle session
                        heldContainer.resetLocalityMatchLevel();
                        delayedContainerManager.addDelayedContainer(heldContainer.getContainer(),
                                currentTime + localitySchedulingDelay);
                    }
                } else {
                    // Schedule delay container to match at a later try
                    delayedContainerManager.addDelayedContainer(heldContainer.getContainer(),
                            currentTime + localitySchedulingDelay);
                }
            }
        } else if (LOG.isDebugEnabled()) {
            LOG.debug("Delayed container assignment successful" + ", containerId="
                    + heldContainer.getContainer().getId());
        }

        return assignedContainers;
    } else {
        // ignore all other cases?
        LOG.warn("Received a request to assign re-used containers when AM was " + " in state: " + state
                + ". Ignoring request and releasing container" + ": " + heldContainer.getContainer().getId());
        releaseUnassignedContainers(Lists.newArrayList(heldContainer.container));
    }

    return null;
}

From source file:org.apache.tez.dag.app.rm.YarnTaskSchedulerService.java

License:Apache License

private synchronized boolean assignReUsedContainerWithLocation(Container container, ContainerAssigner assigner,
        Map<CookieContainerRequest, Container> assignedContainers, boolean honorLocality) {

    Priority containerPriority = container.getPriority();
    Priority topPendingTaskPriority = amRmClient.getTopPriority();
    if (topPendingTaskPriority == null) {
        // nothing left to assign
        return false;
    }//w  w  w.ja  v  a2  s . c  om

    if (topPendingTaskPriority.compareTo(containerPriority) > 0
            && heldContainers.get(container.getId()).isNew()) {
        // if the next task to assign is higher priority than the container then 
        // dont assign this container to that task.
        // if task and container are equal priority - then its first use or reuse
        // within the same priority - safe to use
        // if task is lower priority than container then if we use a container that
        // is no longer needed by higher priority tasks All those higher pri tasks 
        // has been assigned resources - safe to use (first use or reuse)
        // if task is higher priority than container then we may end up using a 
        // container that was assigned by the RM for a lower priority pending task 
        // that will be assigned after this higher priority task is assigned. If we
        // use that task's container now then we may not be able to match this 
        // container to that task later on. However the RM has already assigned us 
        // all containers and is not going to give us new containers. We will get 
        // stuck for resources.
        // the above applies for new containers. If a container has already been 
        // re-used then this is not relevant
        return false;
    }

    CookieContainerRequest assigned = assigner.assignReUsedContainer(container, honorLocality);
    if (assigned != null) {
        assignedContainers.put(assigned, container);
        return true;
    }
    return false;
}