Example usage for com.google.common.collect Sets difference

List of usage examples for com.google.common.collect Sets difference

Introduction

In this page you can find the example usage for com.google.common.collect Sets difference.

Prototype

public static <E> SetView<E> difference(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the difference of two sets.

Usage

From source file:org.obiba.mica.core.service.DocumentSetService.java

/**
 * Set the new list of identifiers to a document set and notifies that some of them have been removed (if any).
 *
 * @param id/*from www.ja v  a 2  s .  c  om*/
 * @param identifiers
 * @return
 */
public DocumentSet setIdentifiers(String id, List<String> identifiers) {
    DocumentSet documentSet = get(id);
    Set<String> removedIdentifiers = Sets.difference(documentSet.getIdentifiers(),
            Sets.newLinkedHashSet(identifiers));
    documentSet.setIdentifiers(identifiers);
    return save(documentSet, removedIdentifiers);
}

From source file:com.eucalyptus.cluster.VmStateHandler.java

public static void updateVmInfo(final VmStateUpdate stateUpdate) {
    UpdateInstanceResourcesType update = new UpdateInstanceResourcesType();
    update.setPartition(stateUpdate.getCluster().getPartition());
    update.setResources(TypeMappers.transform(stateUpdate, InstanceResourceReportType.class));
    final boolean requestBroadcast = Networking.getInstance().update(update);

    if (Databases.isVolatile()) {
        return;// www.j  a v  a 2  s  .  co m
    }

    final Cluster cluster = stateUpdate.getCluster();
    final Set<String> initialInstances = stateUpdate.getRequestedVms();
    final List<VmInfo> vms = stateUpdate.getVmInfos();
    final Map<String, VmStateView> localState = ImmutableMap.copyOf(CollectionUtils.putAll(
            instanceViewSupplier.get(), Maps.<String, VmStateView>newHashMapWithExpectedSize(vms.size()),
            HasName.GET_NAME, Functions.<VmStateView>identity()));

    final Set<String> reportedInstances = Sets.newHashSetWithExpectedSize(vms.size());
    for (VmInfo vmInfo : vms) {
        reportedInstances.add(vmInfo.getInstanceId());
        vmInfo.setPlacement(cluster.getConfiguration().getName());
        VmTypeInfo typeInfo = vmInfo.getInstanceType();
        if (typeInfo.getName() == null || "".equals(typeInfo.getName())) {
            for (VmType t : VmTypes.list()) {
                if (t.getCpu().equals(typeInfo.getCores()) && t.getDisk().equals(typeInfo.getDisk())
                        && t.getMemory().equals(typeInfo.getMemory())) {
                    typeInfo.setName(t.getName());
                }
            }
        }
    }

    final Set<String> unreportedInstances = Sets
            .newHashSet(Sets.difference(initialInstances, reportedInstances));
    if (Databases.isVolatile()) {
        return;
    }

    final Set<String> unknownInstances = Sets.newHashSet(Sets.difference(reportedInstances, initialInstances));

    final List<Optional<Runnable>> taskList = Lists.newArrayList();

    for (final VmInfo runVm : vms) {
        if (initialInstances.contains(runVm.getInstanceId())) {
            taskList.add(UpdateTaskFunction.REPORTED.apply(context(localState, runVm)));
        } else if (unknownInstances.contains(runVm.getInstanceId())) {
            taskList.add(UpdateTaskFunction.UNKNOWN.apply(context(localState, runVm)));
        }
    }
    for (final String vmId : unreportedInstances) {
        taskList.add(UpdateTaskFunction.UNREPORTED.apply(context(localState, vmId)));
    }
    final Optional<Runnable> broadcastRequestRunnable = requestBroadcast
            ? Optional.<Runnable>of(new Runnable() {
                @Override
                public void run() {
                    NetworkInfoBroadcaster.requestNetworkInfoBroadcast();
                }
            })
            : Optional.<Runnable>absent();

    for (final Runnable task : Iterables.concat(Optional.presentInstances(taskList),
            broadcastRequestRunnable.asSet())) {
        Threads.enqueue(ClusterController.class, VmStateHandler.class,
                (Runtime.getRuntime().availableProcessors() * 2) + 1, Executors.callable(task));
    }
}

From source file:google.registry.export.CheckSnapshotAction.java

private void checkAndLoadSnapshotIfComplete() {
    Set<String> kindsToLoad = ImmutableSet.copyOf(Splitter.on(',').split(kindsToLoadParam));
    DatastoreBackupInfo backup = getBackup();
    // Stop now if the backup is not complete.
    if (!backup.getStatus().equals(BackupStatus.COMPLETE)) {
        Duration runningTime = backup.getRunningTime();
        if (runningTime.isShorterThan(MAXIMUM_BACKUP_RUNNING_TIME)) {
            // Backup might still be running, so send a 304 to have the task retry.
            throw new NotModifiedException(String.format("Datastore backup %s still pending", snapshotName));
        } else {//ww w  . j  a v  a  2 s  .  c o  m
            // Declare the backup a lost cause, and send 204 No Content so the task will
            // not be retried.
            String message = String.format("Datastore backup %s abandoned - not complete after %s",
                    snapshotName, PeriodFormat.getDefault().print(runningTime.toPeriod()
                            .normalizedStandard(PeriodType.dayTime().withMillisRemoved())));
            throw new NoContentException(message);
        }
    }
    // Get a compact string to identify this snapshot in BigQuery by trying to parse the unique
    // suffix out of the snapshot name and falling back to the start time as a string.
    String snapshotId = snapshotName.startsWith(ExportSnapshotAction.SNAPSHOT_PREFIX)
            ? snapshotName.substring(ExportSnapshotAction.SNAPSHOT_PREFIX.length())
            : backup.getStartTime().toString("YYYYMMdd_HHmmss");
    // Log a warning if kindsToLoad is not a subset of the exported snapshot kinds.
    if (!backup.getKinds().containsAll(kindsToLoad)) {
        logger.warningfmt("Kinds to load included non-exported kinds: %s",
                Sets.difference(kindsToLoad, backup.getKinds()));
    }
    // Load kinds from the snapshot, limited to those also in kindsToLoad (if it's present).
    ImmutableSet<String> exportedKindsToLoad = ImmutableSet
            .copyOf(intersection(backup.getKinds(), kindsToLoad));
    String message = String.format("Datastore backup %s complete - ", snapshotName);
    if (exportedKindsToLoad.isEmpty()) {
        message += "no kinds to load into BigQuery";
    } else {
        enqueueLoadSnapshotTask(snapshotId, backup.getGcsFilename().get(), exportedKindsToLoad);
        message += "BigQuery load task enqueued";
    }
    logger.info(message);
    response.setPayload(message);
}

From source file:org.apache.beam.sdk.io.kinesis.StartingPointShardsFinder.java

/**
 * Finds all the shards at the given startingPoint. This method starts by gathering the oldest
 * shards in the stream and considers them as initial shards set. Then it validates the shards by
 * getting an iterator at the given starting point and trying to read some records. If shard
 * passes the validation then it is added to the result shards set. If not then it is regarded as
 * expired and its successors are taken into consideration. This step is repeated until all valid
 * shards are found.//  w w  w. j a v  a2s.c  om
 *
 * <p>The following diagram depicts sample split and merge operations on a stream with 3 initial
 * shards. Let's consider what happens when T1, T2, T3 or T4 timestamps are passed as the
 * startingPoint.
 *
 * <ul>
 *   <li>T1 timestamp (or TRIM_HORIZON marker) - 0000, 0001 and 0002 shards are the oldest so they
 *       are gathered as initial shards set. All of them are valid at T1 timestamp so they are all
 *       returned from the method.
 *   <li>T2 timestamp - 0000, 0001 and 0002 shards form the initial shards set.
 *       <ul>
 *         <li>0000 passes the validation at T2 timestamp so it is added to the result set
 *         <li>0001 does not pass the validation as it is already closed at T2 timestamp so its
 *             successors 0003 and 0004 are considered. Both are valid at T2 timestamp so they are
 *             added to the resulting set.
 *         <li>0002 also does not pass the validation so its successors 0005 and 0006 are
 *             considered and both are valid.
 *       </ul>
 *       Finally the resulting set contains 0000, 0003, 0004, 0005 and 0006 shards.
 *   <li>T3 timestamp - the beginning is the same as in T2 case.
 *       <ul>
 *         <li>0000 is valid
 *         <li>0001 is already closed at T2 timestamp so its successors 0003 and 0004 are next.
 *             0003 is valid but 0004 is already closed at T3 timestamp. It has one successor 0007
 *             which is the result of merging 0004 and 0005 shards. 0007 has two parent shards
 *             then stored in {@link Shard#parentShardId} and {@link Shard#adjacentParentShardId}
 *             fields. Only one of them should follow the relation to its successor so it is
 *             always the shard stored in parentShardId field. Let's assume that it was 0004 shard
 *             and it's the one that considers 0007 its successor. 0007 is valid at T3 timestamp
 *             and it's added to the result set.
 *         <li>0002 is closed at T3 timestamp so its successors 0005 and 0006 are next. 0005 is
 *             also closed because it was merged with 0004 shard. Their successor is 0007 and it
 *             was already considered by 0004 shard so no action here is needed. Shard 0006 is
 *             valid.
 *       </ul>
 *   <li>T4 timestamp (or LATEST marker) - following the same reasoning as in previous cases it
 *       end's up with 0000, 0003, 0008 and 0010 shards.
 * </ul>
 *
 * <pre>
 *      T1                T2          T3                      T4
 *      |                 |           |                       |
 * 0000-----------------------------------------------------------
 *
 *
 *             0003-----------------------------------------------
 *            /
 * 0001------+
 *            \
 *             0004-----------+             0008------------------
 *                             \           /
 *                              0007------+
 *                             /           \
 *                  0005------+             0009------+
 *                 /                                   \
 * 0002-----------+                                     0010------
 *                 \                                   /
 *                  0006------------------------------+
 * </pre>
 */
Set<Shard> findShardsAtStartingPoint(SimplifiedKinesisClient kinesis, String streamName,
        StartingPoint startingPoint) throws TransientKinesisException {
    List<Shard> allShards = kinesis.listShards(streamName);
    Set<Shard> initialShards = findInitialShardsWithoutParents(streamName, allShards);

    Set<Shard> startingPointShards = new HashSet<>();
    Set<Shard> expiredShards;
    do {
        Set<Shard> validShards = validateShards(kinesis, initialShards, streamName, startingPoint);
        startingPointShards.addAll(validShards);
        expiredShards = Sets.difference(initialShards, validShards);
        if (!expiredShards.isEmpty()) {
            LOGGER.info("Following shards expired for {} stream at '{}' starting point: {}", streamName,
                    startingPoint, expiredShards);
        }
        initialShards = findNextShards(allShards, expiredShards);
    } while (!expiredShards.isEmpty());
    return startingPointShards;
}

From source file:com.google.security.zynamics.binnavi.debug.models.breakpoints.BreakpointManager.java

/**
 * This function enforces the type hierarchy of breakpoints.
 *
 * @param addresses The set of addresses for the breakpoints to be added.
 * @param type The type of the breakpoints to be added.
 *
 * @return The Set of breakpoints which has been set.
 *//*w w  w. j a  va2s  .  com*/
private Set<BreakpointAddress> enforceBreakpointHierarchy(final Set<BreakpointAddress> addresses,
        final BreakpointType type) {
    final SetView<BreakpointAddress> alreadyRegularBreakpoints = Sets.intersection(addresses,
            indexedBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadySteppingBreakpoints = Sets.intersection(addresses,
            stepBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadyEchoBreakpoints = Sets.intersection(addresses,
            echoBreakpointStorage.getBreakPointAddresses());

    Set<BreakpointAddress> addressesSet = null;

    switch (type) {
    case REGULAR:
        final SetView<BreakpointAddress> notInRegularBreakpoints = Sets.difference(addresses,
                indexedBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadySteppingBreakpoints, stepBreakpointStorage);
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = notInRegularBreakpoints;
        break;

    case STEP:
        final SetView<BreakpointAddress> notInSteppingBreakpoints = Sets.difference(addresses,
                stepBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = Sets.difference(notInSteppingBreakpoints, alreadyRegularBreakpoints);
        break;

    case ECHO:
        final SetView<BreakpointAddress> notInEchoBreakPoints = Sets.difference(addresses,
                echoBreakpointStorage.getBreakPointAddresses());
        addressesSet = Sets.difference(notInEchoBreakPoints,
                Sets.union(alreadySteppingBreakpoints, alreadyRegularBreakpoints));
        break;
    default:
        throw new IllegalStateException("IE00722: Breakpoint of invalid type");

    }
    return addressesSet;
}

From source file:com.eucalyptus.node.Nodes.java

public static void updateNodeInfo(ServiceConfiguration ccConfig, List<NodeType> nodes) {
    ConcurrentNavigableMap<String, NodeInfo> clusterNodeMap = Clusters.lookup(ccConfig).getNodeMap();
    /** prepare key sets for comparison **/
    Set<String> knownTags = Sets.newHashSet(clusterNodeMap.keySet());
    Set<String> reportedTags = Sets.newHashSet();
    for (final NodeType node : nodes) {
        reportedTags.add(node.getServiceTag());
    }/*from   ww  w  .  jav a  2s. co  m*/
    /** compute intersections and differences **/
    Set<String> unreportedTags = Sets.difference(knownTags, reportedTags);
    Set<String> newTags = Sets.difference(reportedTags, knownTags);
    Set<String> stillKnownTags = Sets.intersection(knownTags, reportedTags);
    StringBuilder nodeLog = new StringBuilder();
    /** maybe remove unreported nodes **/
    for (String unreportedTag : unreportedTags) {
        NodeInfo unreportedNode = clusterNodeMap.get(unreportedTag);
        if (unreportedNode != null && (System.currentTimeMillis()
                - unreportedNode.getLastSeen().getTime()) > Nodes.REFRESH_TIMEOUT) {
            Topology.destroy(Components.lookup(NodeController.class).lookup(unreportedNode.getName()));
            NodeInfo removed = clusterNodeMap.remove(unreportedTag);
            nodeLog.append("GONE:").append(removed.getName()).append(":").append(removed.getLastState())
                    .append(" ");
        }
    }
    /** add new nodes or updated existing node infos **/
    Set<NodeInfo> nodesToUpdate = Sets.newHashSet();
    for (final NodeType node : nodes) {
        try {
            String serviceTag = node.getServiceTag();
            if (newTags.contains(serviceTag)) {
                clusterNodeMap.putIfAbsent(serviceTag, new NodeInfo(ccConfig.getPartition(), node));
                NodeInfo nodeInfo = clusterNodeMap.get(serviceTag);
                nodeLog.append("NEW:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState())
                        .append(" ");
                nodesToUpdate.add(nodeInfo);
            } else if (stillKnownTags.contains(serviceTag)) {
                NodeInfo nodeInfo = clusterNodeMap.get(serviceTag);
                nodeInfo.setIqn(node.getIqn());
                nodeLog.append("OLD:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState())
                        .append(" ");
                nodesToUpdate.add(nodeInfo);
            }
        } catch (NoSuchElementException e) {
            LOG.error(e);
            LOG.debug(e, e);
        }
    }
    LOG.debug("Updated node info map: " + nodeLog.toString());
    try {
        Nodes.updateServiceConfiguration(ccConfig, nodesToUpdate);
    } catch (Exception e) {
        if (!Component.State.ENABLED.apply(ccConfig))
            LOG.debug("Error while updating nodes: " + e.getMessage(), e);
    }
    /**
     * TODO:GRZE: if not present emulate {@link ClusterController.NodeController} using
     * {@link Component#setup()} TODO:GRZE: emulate update of emulate
     * {@link ClusterController.NodeController} state
     * TODO:GRZE: {@link Component#destroy()} for the NodeControllers which are not reported by the
     * CC.
     */

}

From source file:com.squareup.wire.schema.IdentifierSet.java

public Set<String> unusedIncludes() {
    return Sets.difference(includes, usedIncludes);
}

From source file:com.google.errorprone.bugpatterns.InconsistentHashCode.java

@Override
public Description matchClass(ClassTree tree, VisitorState state) {
    ClassSymbol classSymbol = getSymbol(tree);
    MethodTree equalsDeclaration = null;
    MethodTree hashCodeDeclaration = null;
    for (Tree member : tree.getMembers()) {
        if (!(member instanceof MethodTree)) {
            continue;
        }// w  ww  . j ava  2 s .co  m
        MethodTree methodTree = (MethodTree) member;
        if (hashCodeMethodDeclaration().matches(methodTree, state)) {
            hashCodeDeclaration = methodTree;
        } else if (equalsMethodDeclaration().matches(methodTree, state)) {
            equalsDeclaration = methodTree;
        }
    }
    if (equalsDeclaration == null || hashCodeDeclaration == null) {
        return Description.NO_MATCH;
    }
    // Build up a map of methods to the fields they access for simple methods, i.e. getters.
    // Not a SetMultimap, because we do want to distinguish between "method was not analyzable" and
    // "method accessed no fields".
    Map<MethodSymbol, ImmutableSet<Symbol>> fieldsByMethod = new HashMap<>();
    for (Tree member : tree.getMembers()) {
        if (!(member instanceof MethodTree)) {
            continue;
        }
        MethodTree methodTree = (MethodTree) member;
        if (!methodTree.equals(equalsDeclaration) && !methodTree.equals(hashCodeDeclaration)) {
            FieldAccessFinder finder = FieldAccessFinder.scanMethod(state, classSymbol, methodTree);
            if (!finder.failed()) {
                fieldsByMethod.put(getSymbol(methodTree), finder.accessedFields());
            }
        }
    }
    FieldAccessFinder equalsScanner = FieldAccessFinder.scanMethod(state, classSymbol, equalsDeclaration,
            fieldsByMethod, HASH_CODE_METHODS);
    FieldAccessFinder hashCodeScanner = FieldAccessFinder.scanMethod(state, classSymbol, hashCodeDeclaration,
            fieldsByMethod, EQUALS_METHODS);
    if (equalsScanner.failed() || hashCodeScanner.failed()) {
        return Description.NO_MATCH;
    }
    ImmutableSet<Symbol> fieldsInHashCode = hashCodeScanner.accessedFields();
    ImmutableSet<Symbol> fieldsInEquals = equalsScanner.accessedFields();
    Set<Symbol> difference = new HashSet<>(Sets.difference(fieldsInHashCode, fieldsInEquals));
    // Special-case the situation where #hashCode uses a field containing `hash` for memoization.
    difference.removeIf(f -> Ascii.toLowerCase(f.toString()).contains("hash"));
    String message = String.format(MESSAGE, difference);
    // Skip cases where equals and hashCode compare the same fields, or equals compares none (and
    // so is probably checking reference equality).
    return difference.isEmpty() || fieldsInEquals.isEmpty() ? Description.NO_MATCH
            : buildDescription(hashCodeDeclaration).setMessage(message).build();
}

From source file:co.cask.cdap.data2.dataset2.lib.hbase.AbstractHBaseDataSetAdmin.java

/**
 * Performs upgrade on a given HBase table.
 *
 * @param tableId {@link TableId} for the HBase table that upgrade will be performed on.
 * @throws IOException If upgrade failed.
 *//*from   w w  w.  j  a  va  2s.  com*/
protected void upgradeTable(TableId tableId) throws IOException {
    HTableDescriptor tableDescriptor = tableUtil.getHTableDescriptor(getAdmin(), tableId);

    // Upgrade any table properties if necessary
    boolean needUpgrade = upgradeTable(tableDescriptor);

    // Get the cdap version from the table
    ProjectInfo.Version version = getVersion(tableDescriptor);

    if (!needUpgrade && version.compareTo(ProjectInfo.getVersion()) >= 0) {
        // If the table has greater than or same version, no need to upgrade.
        LOG.info("Table '{}' was upgraded with same or newer version '{}'. Current version is '{}'", tableId,
                version, ProjectInfo.getVersion());
        return;
    }

    // create a new descriptor for the table upgrade
    HTableDescriptorBuilder newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);

    // Generate the coprocessor jar
    CoprocessorJar coprocessorJar = createCoprocessorJar();
    Location jarLocation = coprocessorJar.getJarLocation();

    // Check if coprocessor upgrade is needed
    Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil
            .getCoprocessorInfo(tableDescriptor);

    // For all required coprocessors, check if they've need to be upgraded.
    for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
        HBaseTableUtil.CoprocessorInfo info = coprocessorInfo.get(coprocessor.getName());
        if (info != null) {
            // The same coprocessor has been configured, check by the file name hash to see if they are the same.
            if (!jarLocation.getName().equals(info.getPath().getName())) {
                needUpgrade = true;
                // Remove old one and add the new one.
                newDescriptor.removeCoprocessor(info.getClassName());
                addCoprocessor(newDescriptor, coprocessor, jarLocation,
                        coprocessorJar.getPriority(coprocessor));
            }
        } else {
            // The coprocessor is missing from the table, add it.
            needUpgrade = true;
            addCoprocessor(newDescriptor, coprocessor, jarLocation, coprocessorJar.getPriority(coprocessor));
        }
    }

    // Removes all old coprocessors
    Set<String> coprocessorNames = ImmutableSet
            .copyOf(Iterables.transform(coprocessorJar.coprocessors, CLASS_TO_NAME));
    for (String remove : Sets.difference(coprocessorInfo.keySet(), coprocessorNames)) {
        needUpgrade = true;
        newDescriptor.removeCoprocessor(remove);
    }

    if (!needUpgrade) {
        LOG.info("No upgrade needed for table '{}'", tableId);
        return;
    }

    setVersion(newDescriptor);

    LOG.info("Upgrading table '{}'...", tableId);
    boolean enableTable = false;
    try {
        tableUtil.disableTable(getAdmin(), tableId);
        enableTable = true;
    } catch (TableNotEnabledException e) {
        LOG.debug("Table '{}' not enabled when try to disable it.", tableId);
    }

    tableUtil.modifyTable(getAdmin(), newDescriptor.build());
    if (enableTable) {
        tableUtil.enableTable(getAdmin(), tableId);
    }

    LOG.info("Table '{}' upgrade completed.", tableId);
}

From source file:org.apache.cassandra.service.PendingRangeCalculatorService.java

/**
 * Calculate pending ranges according to bootsrapping and leaving nodes. Reasoning is:
 *
 * (1) When in doubt, it is better to write too much to a node than too little. That is, if
 * there are multiple nodes moving, calculate the biggest ranges a node could have. Cleaning
 * up unneeded data afterwards is better than missing writes during movement.
 * (2) When a node leaves, ranges for other nodes can only grow (a node might get additional
 * ranges, but it will not lose any of its current ranges as a result of a leave). Therefore
 * we will first remove _all_ leaving tokens for the sake of calculation and then check what
 * ranges would go where if all nodes are to leave. This way we get the biggest possible
 * ranges with regard current leave operations, covering all subsets of possible final range
 * values.// w  ww.  j ava2 s.  c  om
 * (3) When a node bootstraps, ranges of other nodes can only get smaller. Without doing
 * complex calculations to see if multiple bootstraps overlap, we simply base calculations
 * on the same token ring used before (reflecting situation after all leave operations have
 * completed). Bootstrapping nodes will be added and removed one by one to that metadata and
 * checked what their ranges would be. This will give us the biggest possible ranges the
 * node could have. It might be that other bootstraps make our actual final ranges smaller,
 * but it does not matter as we can clean up the data afterwards.
 *
 * NOTE: This is heavy and ineffective operation. This will be done only once when a node
 * changes state in the cluster, so it should be manageable.
 */
// public & static for testing purposes
public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create();
    BiMultiValMap<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();

    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
        if (logger.isDebugEnabled())
            logger.debug(
                    "No bootstrapping, leaving or moving nodes, and no relocating tokens -> empty pending ranges for {}",
                    keyspaceName);
        tm.setPendingRanges(keyspaceName, pendingRanges);
        return;
    }

    Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges();

    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();

    // get all ranges that will be affected by leaving nodes
    Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>();
    for (InetAddress endpoint : leavingEndpoints)
        affectedRanges.addAll(addressRanges.get(endpoint));

    // for each of those ranges, find what new nodes will be responsible for the range when
    // all leaving nodes are gone.
    TokenMetadata metadata = tm.cloneOnlyTokenMap(); // don't do this in the loop! #7758
    for (Range<Token> range : affectedRanges) {
        Set<InetAddress> currentEndpoints = ImmutableSet
                .copyOf(strategy.calculateNaturalEndpoints(range.right, metadata));
        Set<InetAddress> newEndpoints = ImmutableSet
                .copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata));
        pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints));
    }

    // At this stage pendingRanges has been updated according to leave operations. We can
    // now continue the calculation by checking bootstrapping nodes.

    // For each of the bootstrapping nodes, simply add and remove them one by one to
    // allLeftMetadata and check in between what their ranges would be.
    Multimap<InetAddress, Token> bootstrapAddresses = bootstrapTokens.inverse();
    for (InetAddress endpoint : bootstrapAddresses.keySet()) {
        Collection<Token> tokens = bootstrapAddresses.get(endpoint);

        allLeftMetadata.updateNormalTokens(tokens, endpoint);
        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint))
            pendingRanges.put(range, endpoint);
        allLeftMetadata.removeEndpoint(endpoint);
    }

    // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes.
    // We can now finish the calculation by checking moving and relocating nodes.

    // For each of the moving nodes, we do the same thing we did for bootstrapping:
    // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
    for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
        InetAddress endpoint = moving.right; // address of the moving node

        //  moving.left is a new token of the endpoint
        allLeftMetadata.updateNormalToken(moving.left, endpoint);

        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
            pendingRanges.put(range, endpoint);
        }

        allLeftMetadata.removeEndpoint(endpoint);
    }

    tm.setPendingRanges(keyspaceName, pendingRanges);

    if (logger.isDebugEnabled())
        logger.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}