Example usage for org.apache.hadoop.yarn.util RackResolver resolve

List of usage examples for org.apache.hadoop.yarn.util RackResolver resolve

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.util RackResolver resolve.

Prototype

public static Node resolve(String hostName) 

Source Link

Document

Utility method for getting a hostname resolved to a node in the network topology.

Usage

From source file:disAMS.AMRMClient.Impl.AMRMClientImpl.java

License:Apache License

private Set<String> resolveRacks(List<String> nodes) {
    Set<String> racks = new HashSet<String>();
    if (nodes != null) {
        for (String node : nodes) {
            // Ensure node requests are accompanied by requests for
            // corresponding rack
            String rack = RackResolver.resolve(node).getNetworkLocation();
            if (rack == null) {
                LOG.warn("Failed to resolve rack for node " + node + ".");
            } else {
                racks.add(rack);/*from  w w  w . j a v a2  s . co m*/
            }
        }
    }

    return racks;
}

From source file:org.apache.tajo.master.DefaultFragmentScheduleAlgorithm.java

License:Apache License

private void addFragment(String host, Integer diskId, FragmentPair fragmentPair) {
    // update the fragment maps per host
    String normalizeHost = NetUtils.normalizeHost(host);
    Map<Integer, FragmentsPerDisk> diskFragmentMap;
    if (fragmentHostMapping.containsKey(normalizeHost)) {
        diskFragmentMap = fragmentHostMapping.get(normalizeHost);
    } else {// w  w w . j a va2  s  .c o m
        diskFragmentMap = new HashMap<Integer, FragmentsPerDisk>();
        fragmentHostMapping.put(normalizeHost, diskFragmentMap);
    }
    FragmentsPerDisk fragmentsPerDisk;
    if (diskFragmentMap.containsKey(diskId)) {
        fragmentsPerDisk = diskFragmentMap.get(diskId);
    } else {
        fragmentsPerDisk = new FragmentsPerDisk(diskId);
        diskFragmentMap.put(diskId, fragmentsPerDisk);
    }
    fragmentsPerDisk.addFragmentPair(fragmentPair);

    // update the fragment maps per rack
    String rack = RackResolver.resolve(normalizeHost).getNetworkLocation();
    Set<FragmentPair> fragmentPairList;
    if (rackFragmentMapping.containsKey(rack)) {
        fragmentPairList = rackFragmentMapping.get(rack);
    } else {
        fragmentPairList = Collections.newSetFromMap(new HashMap<FragmentPair, Boolean>());
        rackFragmentMapping.put(rack, fragmentPairList);
    }
    fragmentPairList.add(fragmentPair);
}

From source file:org.apache.tajo.master.DefaultFragmentScheduleAlgorithm.java

License:Apache License

@Override
public void removeFragment(FragmentPair fragmentPair) {
    boolean removed = false;
    for (String eachHost : fragmentPair.getLeftFragment().getHosts()) {
        String normalizedHost = NetUtils.normalizeHost(eachHost);
        Map<Integer, FragmentsPerDisk> diskFragmentMap = fragmentHostMapping.get(normalizedHost);
        for (Entry<Integer, FragmentsPerDisk> entry : diskFragmentMap.entrySet()) {
            FragmentsPerDisk fragmentsPerDisk = entry.getValue();
            removed = fragmentsPerDisk.removeFragmentPair(fragmentPair);
            if (removed) {
                if (fragmentsPerDisk.size() == 0) {
                    diskFragmentMap.remove(entry.getKey());
                }//  w w  w.j a  v a  2 s.co  m
                if (diskFragmentMap.size() == 0) {
                    fragmentHostMapping.remove(normalizedHost);
                }
                break;
            }
        }
        String rack = RackResolver.resolve(normalizedHost).getNetworkLocation();
        if (rackFragmentMapping.containsKey(rack)) {
            Set<FragmentPair> fragmentPairs = rackFragmentMapping.get(rack);
            fragmentPairs.remove(fragmentPair);
            if (fragmentPairs.size() == 0) {
                rackFragmentMapping.remove(rack);
            }
        }
    }
    if (removed) {
        fragmentNum--;
    }
}

From source file:org.apache.tajo.master.DefaultFragmentScheduleAlgorithm.java

License:Apache License

/**
 * Randomly select a fragment among the fragments stored on nodes of the same rack with the host.
 * @param host/* ww w  .  j  a  v a  2 s.com*/
 * @return a randomly selected fragment
 */
@Override
public FragmentPair getRackLocalFragment(String host) {
    String rack = RackResolver.resolve(host).getNetworkLocation();
    if (rackFragmentMapping.containsKey(rack)) {
        Set<FragmentPair> fragmentPairs = rackFragmentMapping.get(rack);
        if (!fragmentPairs.isEmpty()) {
            return fragmentPairs.iterator().next();
        }
    }
    return null;
}

From source file:org.apache.tez.dag.app.rm.TaskScheduler.java

License:Apache License

@Override
public void onContainersAllocated(List<Container> containers) {
    if (isStopped) {
        return;// w w w .j a  v a 2  s . c o m
    }
    Map<CookieContainerRequest, Container> appContainers = new HashMap<CookieContainerRequest, Container>(
            containers.size());
    synchronized (this) {
        for (Container container : containers) {
            String location = container.getNodeId().getHost();
            CookieContainerRequest assigned = getMatchingRequest(container, location);
            if (assigned == null) {
                location = RackResolver.resolve(location).getNetworkLocation();
                assigned = getMatchingRequest(container, location);
            }
            if (assigned == null) {
                location = ResourceRequest.ANY;
                assigned = getMatchingRequest(container, location);
            }
            if (assigned == null) {
                // not matched anything. release container
                // Probably we cancelled a request and RM allocated that to us 
                // before RM heard of the cancellation
                releaseContainer(container.getId(), null);
                LOG.info("No RM requests matching container: " + container);
                continue;
            }

            Object task = getTask(assigned);
            assert task != null;
            assignContainer(task, container, assigned);
            appContainers.put(assigned, container);

            LOG.info("Assigning container: " + container + " for task: " + task + " at locality: " + location
                    + " resource memory: " + container.getResource().getMemory() + " cpu: "
                    + container.getResource().getVirtualCores());

        }
    }

    // upcall to app must be outside locks
    for (Entry<CookieContainerRequest, Container> entry : appContainers.entrySet()) {
        CookieContainerRequest assigned = entry.getKey();
        appClient.taskAllocated(getTask(assigned), assigned.getCookie().appCookie, entry.getValue());
    }
}

From source file:org.apache.tez.mapreduce.grouper.TezSplitGrouper.java

License:Apache License

public List<GroupedSplitContainer> getGroupedSplits(Configuration conf, List<SplitContainer> originalSplits,
        int desiredNumSplits, String wrappedInputFormatName, SplitSizeEstimatorWrapper estimator,
        SplitLocationProviderWrapper locationProvider) throws IOException, InterruptedException {
    LOG.info("Grouping splits in Tez");
    Preconditions.checkArgument(originalSplits != null, "Splits must be specified");

    int configNumSplits = conf.getInt(TEZ_GROUPING_SPLIT_COUNT, 0);
    if (configNumSplits > 0) {
        // always use config override if specified
        desiredNumSplits = configNumSplits;
        LOG.info("Desired numSplits overridden by config to: " + desiredNumSplits);
    }//from  w w w . j  a  va 2 s . co m

    if (estimator == null) {
        estimator = DEFAULT_SPLIT_ESTIMATOR;
    }
    if (locationProvider == null) {
        locationProvider = DEFAULT_SPLIT_LOCATION_PROVIDER;
    }

    List<GroupedSplitContainer> groupedSplits = null;
    String emptyLocation = "EmptyLocation";
    String localhost = "localhost";
    String[] emptyLocations = { emptyLocation };
    groupedSplits = new ArrayList<GroupedSplitContainer>(desiredNumSplits);

    boolean allSplitsHaveLocalhost = true;

    long totalLength = 0;
    Map<String, LocationHolder> distinctLocations = createLocationsMap(conf);
    // go through splits and add them to locations
    for (SplitContainer split : originalSplits) {
        totalLength += estimator.getEstimatedSize(split);
        String[] locations = locationProvider.getPreferredLocations(split);
        if (locations == null || locations.length == 0) {
            locations = emptyLocations;
            allSplitsHaveLocalhost = false;
        }
        for (String location : locations) {
            if (location == null) {
                location = emptyLocation;
                allSplitsHaveLocalhost = false;
            }
            if (!location.equalsIgnoreCase(localhost)) {
                allSplitsHaveLocalhost = false;
            }
            distinctLocations.put(location, null);
        }
    }

    if (!(configNumSplits > 0 || originalSplits.size() == 0)) {
        // numSplits has not been overridden by config
        // numSplits has been set at runtime
        // there are splits generated
        // desired splits is less than number of splits generated
        // Do sanity checks

        int splitCount = desiredNumSplits > 0 ? desiredNumSplits : originalSplits.size();
        long lengthPerGroup = totalLength / splitCount;

        long maxLengthPerGroup = conf.getLong(TEZ_GROUPING_SPLIT_MAX_SIZE, TEZ_GROUPING_SPLIT_MAX_SIZE_DEFAULT);
        long minLengthPerGroup = conf.getLong(TEZ_GROUPING_SPLIT_MIN_SIZE, TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT);
        if (maxLengthPerGroup < minLengthPerGroup || minLengthPerGroup <= 0) {
            throw new TezUncheckedException("Invalid max/min group lengths. Required min>0, max>=min. "
                    + " max: " + maxLengthPerGroup + " min: " + minLengthPerGroup);
        }
        if (lengthPerGroup > maxLengthPerGroup) {
            // splits too big to work. Need to override with max size.
            int newDesiredNumSplits = (int) (totalLength / maxLengthPerGroup) + 1;
            LOG.info("Desired splits: " + desiredNumSplits + " too small. " + " Desired splitLength: "
                    + lengthPerGroup + " Max splitLength: " + maxLengthPerGroup + " New desired splits: "
                    + newDesiredNumSplits + " Total length: " + totalLength + " Original splits: "
                    + originalSplits.size());

            desiredNumSplits = newDesiredNumSplits;
        } else if (lengthPerGroup < minLengthPerGroup) {
            // splits too small to work. Need to override with size.
            int newDesiredNumSplits = (int) (totalLength / minLengthPerGroup) + 1;
            /**
             * This is a workaround for systems like S3 that pass the same
             * fake hostname for all splits.
             */
            if (!allSplitsHaveLocalhost) {
                desiredNumSplits = newDesiredNumSplits;
            }

            LOG.info("Desired splits: " + desiredNumSplits + " too large. " + " Desired splitLength: "
                    + lengthPerGroup + " Min splitLength: " + minLengthPerGroup + " New desired splits: "
                    + newDesiredNumSplits + " Final desired splits: " + desiredNumSplits
                    + " All splits have localhost: " + allSplitsHaveLocalhost + " Total length: " + totalLength
                    + " Original splits: " + originalSplits.size());
        }
    }

    if (desiredNumSplits == 0 || originalSplits.size() == 0 || desiredNumSplits >= originalSplits.size()) {
        // nothing set. so return all the splits as is
        LOG.info("Using original number of splits: " + originalSplits.size() + " desired splits: "
                + desiredNumSplits);
        groupedSplits = new ArrayList<GroupedSplitContainer>(originalSplits.size());
        for (SplitContainer split : originalSplits) {
            GroupedSplitContainer newSplit = new GroupedSplitContainer(1, wrappedInputFormatName,
                    cleanupLocations(locationProvider.getPreferredLocations(split)), null);
            newSplit.addSplit(split);
            groupedSplits.add(newSplit);
        }
        return groupedSplits;
    }

    long lengthPerGroup = totalLength / desiredNumSplits;
    int numNodeLocations = distinctLocations.size();
    int numSplitsPerLocation = originalSplits.size() / numNodeLocations;
    int numSplitsInGroup = originalSplits.size() / desiredNumSplits;

    // allocation loop here so that we have a good initial size for the lists
    for (String location : distinctLocations.keySet()) {
        distinctLocations.put(location, new LocationHolder(numSplitsPerLocation + 1));
    }

    Set<String> locSet = new HashSet<String>();
    for (SplitContainer split : originalSplits) {
        locSet.clear();
        String[] locations = locationProvider.getPreferredLocations(split);
        if (locations == null || locations.length == 0) {
            locations = emptyLocations;
        }
        for (String location : locations) {
            if (location == null) {
                location = emptyLocation;
            }
            locSet.add(location);
        }
        for (String location : locSet) {
            LocationHolder holder = distinctLocations.get(location);
            holder.splits.add(split);
        }
    }

    boolean groupByLength = conf.getBoolean(TEZ_GROUPING_SPLIT_BY_LENGTH, TEZ_GROUPING_SPLIT_BY_LENGTH_DEFAULT);
    boolean groupByCount = conf.getBoolean(TEZ_GROUPING_SPLIT_BY_COUNT, TEZ_GROUPING_SPLIT_BY_COUNT_DEFAULT);
    boolean nodeLocalOnly = conf.getBoolean(TEZ_GROUPING_NODE_LOCAL_ONLY, TEZ_GROUPING_NODE_LOCAL_ONLY_DEFAULT);
    if (!(groupByLength || groupByCount)) {
        throw new TezUncheckedException("None of the grouping parameters are true: "
                + TEZ_GROUPING_SPLIT_BY_LENGTH + ", " + TEZ_GROUPING_SPLIT_BY_COUNT);
    }
    LOG.info("Desired numSplits: " + desiredNumSplits + " lengthPerGroup: " + lengthPerGroup + " numLocations: "
            + numNodeLocations + " numSplitsPerLocation: " + numSplitsPerLocation + " numSplitsInGroup: "
            + numSplitsInGroup + " totalLength: " + totalLength + " numOriginalSplits: " + originalSplits.size()
            + " . Grouping by length: " + groupByLength + " count: " + groupByCount + " nodeLocalOnly: "
            + nodeLocalOnly);

    // go through locations and group splits
    int splitsProcessed = 0;
    List<SplitContainer> group = new ArrayList<SplitContainer>(numSplitsInGroup);
    Set<String> groupLocationSet = new HashSet<String>(10);
    boolean allowSmallGroups = false;
    boolean doingRackLocal = false;
    int iterations = 0;
    while (splitsProcessed < originalSplits.size()) {
        iterations++;
        int numFullGroupsCreated = 0;
        for (Map.Entry<String, LocationHolder> entry : distinctLocations.entrySet()) {
            group.clear();
            groupLocationSet.clear();
            String location = entry.getKey();
            LocationHolder holder = entry.getValue();
            SplitContainer splitContainer = holder.getUnprocessedHeadSplit();
            if (splitContainer == null) {
                // all splits on node processed
                continue;
            }
            int oldHeadIndex = holder.headIndex;
            long groupLength = 0;
            int groupNumSplits = 0;
            do {
                group.add(splitContainer);
                groupLength += estimator.getEstimatedSize(splitContainer);
                groupNumSplits++;
                holder.incrementHeadIndex();
                splitContainer = holder.getUnprocessedHeadSplit();
            } while (splitContainer != null
                    && (!groupByLength
                            || (groupLength + estimator.getEstimatedSize(splitContainer) <= lengthPerGroup))
                    && (!groupByCount || (groupNumSplits + 1 <= numSplitsInGroup)));

            if (holder.isEmpty() && !allowSmallGroups && (!groupByLength || groupLength < lengthPerGroup / 2)
                    && (!groupByCount || groupNumSplits < numSplitsInGroup / 2)) {
                // group too small, reset it
                holder.headIndex = oldHeadIndex;
                continue;
            }

            numFullGroupsCreated++;

            // One split group created
            String[] groupLocation = { location };
            if (location == emptyLocation) {
                groupLocation = null;
            } else if (doingRackLocal) {
                for (SplitContainer splitH : group) {
                    String[] locations = locationProvider.getPreferredLocations(splitH);
                    if (locations != null) {
                        for (String loc : locations) {
                            if (loc != null) {
                                groupLocationSet.add(loc);
                            }
                        }
                    }
                }
                groupLocation = groupLocationSet.toArray(groupLocation);
            }
            GroupedSplitContainer groupedSplit = new GroupedSplitContainer(group.size(), wrappedInputFormatName,
                    groupLocation,
                    // pass rack local hint directly to AM
                    ((doingRackLocal && location != emptyLocation) ? location : null));
            for (SplitContainer groupedSplitContainer : group) {
                groupedSplit.addSplit(groupedSplitContainer);
                Preconditions.checkState(groupedSplitContainer.isProcessed() == false,
                        "Duplicates in grouping at location: " + location);
                groupedSplitContainer.setIsProcessed(true);
                splitsProcessed++;
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Grouped " + group.size() + " length: " + groupedSplit.getLength() + " split at: "
                        + location);
            }
            groupedSplits.add(groupedSplit);
        }

        if (!doingRackLocal && numFullGroupsCreated < 1) {
            // no node could create a regular node-local group.

            // Allow small groups if that is configured.
            if (nodeLocalOnly && !allowSmallGroups) {
                LOG.info(
                        "Allowing small groups early after attempting to create full groups at iteration: {}, groupsCreatedSoFar={}",
                        iterations, groupedSplits.size());
                allowSmallGroups = true;
                continue;
            }

            // else go rack-local
            doingRackLocal = true;
            // re-create locations
            int numRemainingSplits = originalSplits.size() - splitsProcessed;
            Set<SplitContainer> remainingSplits = new HashSet<SplitContainer>(numRemainingSplits);
            // gather remaining splits.
            for (Map.Entry<String, LocationHolder> entry : distinctLocations.entrySet()) {
                LocationHolder locHolder = entry.getValue();
                while (!locHolder.isEmpty()) {
                    SplitContainer splitHolder = locHolder.getUnprocessedHeadSplit();
                    if (splitHolder != null) {
                        remainingSplits.add(splitHolder);
                        locHolder.incrementHeadIndex();
                    }
                }
            }
            if (remainingSplits.size() != numRemainingSplits) {
                throw new TezUncheckedException(
                        "Expected: " + numRemainingSplits + " got: " + remainingSplits.size());
            }

            // doing all this now instead of up front because the number of remaining
            // splits is expected to be much smaller
            RackResolver.init(conf);
            Map<String, String> locToRackMap = new HashMap<String, String>(distinctLocations.size());
            Map<String, LocationHolder> rackLocations = createLocationsMap(conf);
            for (String location : distinctLocations.keySet()) {
                String rack = emptyLocation;
                if (location != emptyLocation) {
                    rack = RackResolver.resolve(location).getNetworkLocation();
                }
                locToRackMap.put(location, rack);
                if (rackLocations.get(rack) == null) {
                    // splits will probably be located in all racks
                    rackLocations.put(rack, new LocationHolder(numRemainingSplits));
                }
            }
            distinctLocations.clear();
            HashSet<String> rackSet = new HashSet<String>(rackLocations.size());
            int numRackSplitsToGroup = remainingSplits.size();
            for (SplitContainer split : originalSplits) {
                if (numRackSplitsToGroup == 0) {
                    break;
                }
                // Iterate through the original splits in their order and consider them for grouping.
                // This maintains the original ordering in the list and thus subsequent grouping will
                // maintain that order
                if (!remainingSplits.contains(split)) {
                    continue;
                }
                numRackSplitsToGroup--;
                rackSet.clear();
                String[] locations = locationProvider.getPreferredLocations(split);
                if (locations == null || locations.length == 0) {
                    locations = emptyLocations;
                }
                for (String location : locations) {
                    if (location == null) {
                        location = emptyLocation;
                    }
                    rackSet.add(locToRackMap.get(location));
                }
                for (String rack : rackSet) {
                    rackLocations.get(rack).splits.add(split);
                }
            }

            remainingSplits.clear();
            distinctLocations = rackLocations;
            // adjust split length to be smaller because the data is non local
            float rackSplitReduction = conf.getFloat(TEZ_GROUPING_RACK_SPLIT_SIZE_REDUCTION,
                    TEZ_GROUPING_RACK_SPLIT_SIZE_REDUCTION_DEFAULT);
            if (rackSplitReduction > 0) {
                long newLengthPerGroup = (long) (lengthPerGroup * rackSplitReduction);
                int newNumSplitsInGroup = (int) (numSplitsInGroup * rackSplitReduction);
                if (newLengthPerGroup > 0) {
                    lengthPerGroup = newLengthPerGroup;
                }
                if (newNumSplitsInGroup > 0) {
                    numSplitsInGroup = newNumSplitsInGroup;
                }
            }

            LOG.info("Doing rack local after iteration: " + iterations + " splitsProcessed: " + splitsProcessed
                    + " numFullGroupsInRound: " + numFullGroupsCreated + " totalGroups: " + groupedSplits.size()
                    + " lengthPerGroup: " + lengthPerGroup + " numSplitsInGroup: " + numSplitsInGroup);

            // dont do smallGroups for the first pass
            continue;
        }

        if (!allowSmallGroups && numFullGroupsCreated <= numNodeLocations / 10) {
            // a few nodes have a lot of data or data is thinly spread across nodes
            // so allow small groups now
            allowSmallGroups = true;
            LOG.info("Allowing small groups after iteration: " + iterations + " splitsProcessed: "
                    + splitsProcessed + " numFullGroupsInRound: " + numFullGroupsCreated + " totalGroups: "
                    + groupedSplits.size());
        }

        if (LOG.isDebugEnabled()) {
            LOG.debug("Iteration: " + iterations + " splitsProcessed: " + splitsProcessed
                    + " numFullGroupsInRound: " + numFullGroupsCreated + " totalGroups: "
                    + groupedSplits.size());
        }
    }
    LOG.info("Number of splits desired: " + desiredNumSplits + " created: " + groupedSplits.size()
            + " splitsProcessed: " + splitsProcessed);
    return groupedSplits;
}

From source file:org.springframework.yarn.batch.am.AbstractBatchAppmaster.java

License:Apache License

@Override
protected void onContainerAllocated(Container container) {
    if (log.isDebugEnabled()) {
        log.debug("Container allocated: " + container);
    }/*from   ww w .  j ava 2 s  .c o  m*/

    StepExecution stepExecution = null;

    String host = container.getNodeId().getHost();
    String rack = RackResolver.resolve(host).getNetworkLocation();
    if (log.isDebugEnabled()) {
        log.debug("Matching agains: host=" + host + " rack=" + rack);
    }

    Iterator<Entry<StepExecution, ContainerRequestHint>> iterator = requestData.entrySet().iterator();
    while (iterator.hasNext() && stepExecution != null) {
        Entry<StepExecution, ContainerRequestHint> entry = iterator.next();
        if (entry.getValue() != null && entry.getValue().getHosts() != null) {
            for (String h : entry.getValue().getHosts()) {
                if (h.equals(host)) {
                    stepExecution = entry.getKey();
                    break;
                }
            }
        }
    }

    log.debug("stepExecution after hosts match: " + stepExecution);

    iterator = requestData.entrySet().iterator();
    while (iterator.hasNext() && stepExecution != null) {
        Entry<StepExecution, ContainerRequestHint> entry = iterator.next();
        if (entry.getValue() != null && entry.getValue().getRacks() != null) {
            for (String r : entry.getValue().getRacks()) {
                if (r.equals(rack)) {
                    stepExecution = entry.getKey();
                    break;
                }
            }
        }
    }

    log.debug("stepExecution after racks match: " + stepExecution);

    try {
        if (stepExecution == null) {
            stepExecution = requestData.entrySet().iterator().next().getKey();
        }
        requestData.remove(stepExecution);
        containerToStepMap.put(container.getId(), stepExecution);
        getLauncher().launchContainer(container, getCommands());
    } catch (NoSuchElementException e) {
        log.error("We didn't have step execution in request map.", e);
    }
}

From source file:org.springframework.yarn.examples.grid.yarn.GenericContainerGroupResolver.java

License:Apache License

@Override
public List<String> resolveGroupNames(Container container) {
    String containerHost = container.getNodeId().getHost();
    String rack = resolveRacks ? RackResolver.resolve(containerHost).getNetworkLocation() : null;
    ArrayList<String> found = new ArrayList<String>();
    for (Entry<String, List<String>> entry : resolves.entrySet()) {
        for (String host : entry.getValue()) {
            if (safeMatch(containerHost, host) || safeMatch(rack, host)) {
                found.add(entry.getKey());
                break;
            }//from   w ww. j ava2  s  .  co m
        }
    }
    return found;
}