Example usage for com.google.common.collect MapDifference entriesDiffering

List of usage examples for com.google.common.collect MapDifference entriesDiffering

Introduction

In this page you can find the example usage for com.google.common.collect MapDifference entriesDiffering.

Prototype

Map<K, ValueDifference<V>> entriesDiffering();

Source Link

Document

Returns an unmodifiable map describing keys that appear in both maps, but with different values.

Usage

From source file:org.apache.stratos.cloud.controller.publisher.CartridgeInstanceDataPublisherTask.java

private static void populateNewlyAddedOrStateChangedNodes(Map<String, String> newMap) {

    MapDifference<String, String> diff = Maps.difference(newMap,
            FasterLookUpDataHolder.getInstance().getNodeIdToStatusMap());

    // adding newly added nodes
    Map<String, String> newlyAddedNodes = diff.entriesOnlyOnLeft();

    for (Iterator<?> it = newlyAddedNodes.entrySet().iterator(); it.hasNext();) {
        @SuppressWarnings("unchecked")
        Map.Entry<String, String> entry = (Map.Entry<String, String>) it.next();
        String key = entry.getKey();
        String val = entry.getValue();
        ServiceContext ctxt = FasterLookUpDataHolder.getInstance().getServiceContext(key);

        log.debug("------ Node id: " + key + " --- node status: " + val + " -------- ctxt: " + ctxt);

        if (ctxt != null && key != null && val != null) {
            // bundle the data to be published
            bundleData(key, val, ctxt);
        }//  ww w .jav a2 s  .  c o  m

    }

    // adding nodes with state changes
    Map<String, ValueDifference<String>> stateChangedNodes = diff.entriesDiffering();

    for (Iterator<?> it = stateChangedNodes.entrySet().iterator(); it.hasNext();) {
        @SuppressWarnings("unchecked")
        Map.Entry<String, ValueDifference<String>> entry = (Map.Entry<String, ValueDifference<String>>) it
                .next();

        String key = entry.getKey();
        String newState = entry.getValue().leftValue();
        ServiceContext ctxt = FasterLookUpDataHolder.getInstance().getServiceContext(key);

        log.debug("------- Node id: " + key + " --- node status: " + newState + " -------- ctxt: " + ctxt);

        if (ctxt != null && key != null && newState != null) {
            // bundle the data to be published
            bundleData(key, newState, ctxt);
        }

    }

}

From source file:com.facebook.buck.rules.ActionGraphCache.java

/**
 * Compares the cached ActionGraph with a newly generated from the targetGraph. The comparison
 * is done by generating and comparing content agnostic RuleKeys. In case of mismatch, the
 * mismatching BuildRules are printed and the building process is stopped.
 * @param eventBus Buck's event bus.// w  w w .  j  a v  a 2  s  .co m
 * @param lastActionGraphAndResolver The cached version of the graph that gets compared.
 * @param targetGraph Used to generate the actionGraph that gets compared with lastActionGraph.
 */
private void compareActionGraphs(final BuckEventBus eventBus,
        final ActionGraphAndResolver lastActionGraphAndResolver, final TargetGraph targetGraph,
        final int keySeed) {
    try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus,
            PerfEventId.of("ActionGraphCacheCheck"))) {
        // We check that the lastActionGraph is not null because it's possible we had a
        // invalidateCache() between the scheduling and the execution of this task.
        LOG.info("ActionGraph integrity check spawned.");
        Pair<TargetGraph, ActionGraphAndResolver> newActionGraph = new Pair<TargetGraph, ActionGraphAndResolver>(
                targetGraph,
                createActionGraph(eventBus, new DefaultTargetNodeToBuildRuleTransformer(), targetGraph));

        Map<BuildRule, RuleKey> lastActionGraphRuleKeys = getRuleKeysFromBuildRules(
                lastActionGraphAndResolver.getActionGraph().getNodes(),
                lastActionGraphAndResolver.getResolver(), keySeed);
        Map<BuildRule, RuleKey> newActionGraphRuleKeys = getRuleKeysFromBuildRules(
                newActionGraph.getSecond().getActionGraph().getNodes(),
                newActionGraph.getSecond().getResolver(), keySeed);

        if (!lastActionGraphRuleKeys.equals(newActionGraphRuleKeys)) {
            invalidateCache();
            String mismatchInfo = "RuleKeys of cached and new ActionGraph don't match:\n";
            MapDifference<BuildRule, RuleKey> mismatchedRules = Maps.difference(lastActionGraphRuleKeys,
                    newActionGraphRuleKeys);
            mismatchInfo += "Number of nodes in common/differing: " + mismatchedRules.entriesInCommon().size()
                    + "/" + mismatchedRules.entriesDiffering().size() + "\n"
                    + "Entries only in the cached ActionGraph: " + mismatchedRules.entriesOnlyOnLeft().size()
                    + "Entries only in the newly created ActionGraph: "
                    + mismatchedRules.entriesOnlyOnRight().size() + "The rules that did not match:\n";
            mismatchInfo += mismatchedRules.entriesDiffering().keySet().toString();
            LOG.error(mismatchInfo);
            throw new RuntimeException(mismatchInfo);
        }
    }
}

From source file:com.facebook.buck.core.model.actiongraph.computation.ActionGraphProvider.java

/**
 * Compares the cached ActionGraph with a newly generated from the targetGraph. The comparison is
 * done by generating and comparing content agnostic RuleKeys. In case of mismatch, the
 * mismatching BuildRules are printed and the building process is stopped.
 *
 * @param lastActionGraphAndBuilder The cached version of the graph that gets compared.
 * @param targetGraph Used to generate the actionGraph that gets compared with lastActionGraph.
 * @param fieldLoader// w  w  w . jav  a 2s  .  c  o m
 * @param ruleKeyLogger The logger to use (if any) when computing the new action graph
 */
private void compareActionGraphs(ActionGraphAndBuilder lastActionGraphAndBuilder,
        TargetNodeToBuildRuleTransformer transformer, TargetGraph targetGraph, RuleKeyFieldLoader fieldLoader,
        Optional<ThriftRuleKeyLogger> ruleKeyLogger) {
    try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus,
            PerfEventId.of("ActionGraphCacheCheck"))) {
        // We check that the lastActionGraph is not null because it's possible we had a
        // invalidateCache() between the scheduling and the execution of this task.
        LOG.info("ActionGraph integrity check spawned.");
        ActionGraphAndBuilder newActionGraph = createActionGraph(transformer, targetGraph,
                IncrementalActionGraphMode.DISABLED);

        Map<BuildRule, RuleKey> lastActionGraphRuleKeys = getRuleKeysFromBuildRules(
                lastActionGraphAndBuilder.getActionGraph().getNodes(),
                lastActionGraphAndBuilder.getActionGraphBuilder(), fieldLoader,
                Optional.empty() /* Only log once, and only for the new graph */);
        Map<BuildRule, RuleKey> newActionGraphRuleKeys = getRuleKeysFromBuildRules(
                newActionGraph.getActionGraph().getNodes(), newActionGraph.getActionGraphBuilder(), fieldLoader,
                ruleKeyLogger);

        if (!lastActionGraphRuleKeys.equals(newActionGraphRuleKeys)) {
            actionGraphCache.invalidateCache();
            String mismatchInfo = "RuleKeys of cached and new ActionGraph don't match:\n";
            MapDifference<BuildRule, RuleKey> mismatchedRules = Maps.difference(lastActionGraphRuleKeys,
                    newActionGraphRuleKeys);
            mismatchInfo += "Number of nodes in common/differing: " + mismatchedRules.entriesInCommon().size()
                    + "/" + mismatchedRules.entriesDiffering().size() + "\n"
                    + "Entries only in the cached ActionGraph: " + mismatchedRules.entriesOnlyOnLeft().size()
                    + "Entries only in the newly created ActionGraph: "
                    + mismatchedRules.entriesOnlyOnRight().size() + "The rules that did not match:\n";
            mismatchInfo += mismatchedRules.entriesDiffering().keySet().toString();
            LOG.error(mismatchInfo);
            throw new RuntimeException(mismatchInfo);
        }
    }
}

From source file:models.DeploymentDiff.java

/**
 * Computes the changes from one manifest to another.
 *
 * @param oldManifest the old manifest/* w  ww  .  j  av  a2  s  . c o m*/
 * @param newManifest the new manifest
 * @return a list of package changes
 */
List<PackageChange> getPackageChanges(final Manifest oldManifest, final Manifest newManifest) {
    final List<PackageVersion> oldPackages = oldManifest.getPackages();
    final ImmutableMap<String, PackageVersion> oldMap = Maps.uniqueIndex(oldPackages,
            (v) -> v.getPkg().getName());
    final ImmutableMap<String, PackageVersion> newMap = Maps.uniqueIndex(newManifest.getPackages(),
            (v) -> v.getPkg().getName());
    final MapDifference<String, PackageVersion> mapDifference = Maps.difference(oldMap, newMap);

    final List<PackageChange> changes = Lists.newArrayList();
    mapDifference.entriesOnlyOnLeft().forEach(
            (k, v) -> changes.add(new PackageChange(k, Optional.of(v.getVersion()), Optional.empty())));

    mapDifference.entriesOnlyOnRight().forEach(
            (k, v) -> changes.add(new PackageChange(k, Optional.empty(), Optional.of(v.getVersion()))));

    mapDifference.entriesDiffering().forEach((k, v) -> changes.add(new PackageChange(k,
            Optional.of(v.leftValue().getVersion()), Optional.of(v.rightValue().getVersion()))));

    mapDifference.entriesInCommon().forEach((k, v) -> changes
            .add(new PackageChange(k, Optional.of(v.getVersion()), Optional.of(v.getVersion()))));
    changes.sort(Comparator.comparing(PackageChange::getName));
    return changes;
}

From source file:com.auditbucket.engine.service.WhatService.java

public AuditDeltaBean getDelta(MetaHeader header, ChangeLog from, ChangeLog to) {
    if (header == null || from == null || to == null)
        throw new IllegalArgumentException("Unable to compute delta due to missing arguments");
    LogWhat source = getWhat(header, from);
    LogWhat dest = getWhat(header, to);/* ww  w . j  a v a2s . c o  m*/
    MapDifference<String, Object> diffMap = Maps.difference(source.getWhatMap(), dest.getWhatMap());
    AuditDeltaBean result = new AuditDeltaBean();
    result.setAdded(new HashMap<>(diffMap.entriesOnlyOnRight()));
    result.setRemoved(new HashMap<>(diffMap.entriesOnlyOnLeft()));
    HashMap<String, Object> differences = new HashMap<>();
    Set<String> keys = diffMap.entriesDiffering().keySet();
    for (String key : keys) {
        differences.put(key, diffMap.entriesDiffering().get(key).toString());
    }
    result.setChanged(differences);
    result.setUnchanged(diffMap.entriesInCommon());
    return result;
}

From source file:cpw.mods.fml.client.GuiIdMismatchScreen.java

public GuiIdMismatchScreen(MapDifference<Integer, ItemData> idDifferences, boolean allowContinue) {
    super(null, "ID mismatch", "Should I continue?", 1);
    field_73942_a = this;
    for (Entry<Integer, ItemData> entry : idDifferences.entriesOnlyOnLeft().entrySet()) {
        missingIds.add(String.format("ID %d from Mod %s is missing", entry.getValue().getItemId(),
                entry.getValue().getModId(), entry.getValue().getItemType()));
    }//w w w .  j  av  a 2  s  . com
    for (Entry<Integer, ValueDifference<ItemData>> entry : idDifferences.entriesDiffering().entrySet()) {
        ItemData world = entry.getValue().leftValue();
        ItemData game = entry.getValue().rightValue();
        mismatchedIds.add(String.format("ID %d is mismatched between world and game", world.getItemId()));
    }
    this.allowContinue = allowContinue;
}

From source file:org.apache.cassandra.db.DefsTable.java

private static Set<String> mergeKeyspaces(Map<DecoratedKey, ColumnFamily> old,
        Map<DecoratedKey, ColumnFamily> updated) throws ConfigurationException, IOException {
    // calculate the difference between old and new states (note that entriesOnlyLeft() will be always empty)
    MapDifference<DecoratedKey, ColumnFamily> diff = Maps.difference(old, updated);

    /**/*from  w  ww . j  av a  2  s  . c o m*/
     * At first step we check if any new keyspaces were added.
     */
    for (Map.Entry<DecoratedKey, ColumnFamily> entry : diff.entriesOnlyOnRight().entrySet()) {
        ColumnFamily ksAttrs = entry.getValue();

        // we don't care about nested ColumnFamilies here because those are going to be processed separately
        if (!ksAttrs.isEmpty())
            addKeyspace(KSMetaData.fromSchema(new Row(entry.getKey(), entry.getValue()),
                    Collections.<CFMetaData>emptyList()));
    }

    /**
     * At second step we check if there were any keyspaces re-created, in this context
     * re-created means that they were previously deleted but still exist in the low-level schema as empty keys
     */

    Map<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> modifiedEntries = diff.entriesDiffering();

    // instead of looping over all modified entries and skipping processed keys all the time
    // we would rather store "left to process" items and iterate over them removing already met keys
    List<DecoratedKey> leftToProcess = new ArrayList<DecoratedKey>(modifiedEntries.size());

    for (Map.Entry<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> entry : modifiedEntries
            .entrySet()) {
        ColumnFamily prevValue = entry.getValue().leftValue();
        ColumnFamily newValue = entry.getValue().rightValue();

        if (prevValue.isEmpty()) {
            addKeyspace(KSMetaData.fromSchema(new Row(entry.getKey(), newValue),
                    Collections.<CFMetaData>emptyList()));
            continue;
        }

        leftToProcess.add(entry.getKey());
    }

    if (leftToProcess.size() == 0)
        return Collections.emptySet();

    /**
     * At final step we updating modified keyspaces and saving keyspaces drop them later
     */

    Set<String> keyspacesToDrop = new HashSet<String>();

    for (DecoratedKey key : leftToProcess) {
        MapDifference.ValueDifference<ColumnFamily> valueDiff = modifiedEntries.get(key);

        ColumnFamily newState = valueDiff.rightValue();

        if (newState.isEmpty())
            keyspacesToDrop.add(AsciiType.instance.getString(key.key));
        else
            updateKeyspace(KSMetaData.fromSchema(new Row(key, newState), Collections.<CFMetaData>emptyList()));
    }

    return keyspacesToDrop;
}

From source file:com.netflix.paas.cassandra.tasks.ClusterRefreshTask.java

@Override
public void execte(TaskContext context) throws Exception {
    // Get parameters from the context
    String clusterName = context.getStringParameter("cluster");
    Boolean ignoreSystem = context.getBooleanParameter("ignoreSystem", true);
    CassandraClusterEntity entity = (CassandraClusterEntity) context.getParamater("entity");

    LOG.info("Refreshing cluster " + clusterName);

    // Read the current state from the DAO
    //        CassandraClusterEntity entity = clusterDao.read(clusterName);

    Map<String, String> existingKeyspaces = entity.getKeyspaces();
    if (existingKeyspaces == null) {
        existingKeyspaces = Maps.newHashMap();
        entity.setKeyspaces(existingKeyspaces);
    }/*from   w  w w.  j  a  v  a 2  s .com*/

    Map<String, String> existingColumnFamilies = entity.getColumnFamilies();
    if (existingColumnFamilies == null) {
        existingColumnFamilies = Maps.newHashMap();
        entity.setColumnFamilies(existingColumnFamilies);
    }

    Set<String> foundKeyspaces = Sets.newHashSet();
    Set<String> foundColumnFamilies = Sets.newHashSet();

    Cluster cluster = provider
            .acquireCluster(new ClusterKey(entity.getClusterName(), entity.getDiscoveryType()));

    boolean changed = false;

    //        // Iterate found keyspaces
    try {
        for (KeyspaceDefinition keyspace : cluster.describeKeyspaces()) {
            // Extract data from the KeyspaceDefinition
            String ksName = keyspace.getName();
            MapStringToObject keyspaceOptions = getKeyspaceOptions(keyspace);

            if (existingKeyspaces.containsKey(ksName)) {
                MapStringToObject previousOptions = JsonSerializer.fromString(existingKeyspaces.get(ksName),
                        MapStringToObject.class);
                MapDifference keyspaceDiff = Maps.difference(keyspaceOptions, previousOptions);
                if (keyspaceDiff.areEqual()) {
                    LOG.info("Keyspace '{}' didn't change", new Object[] { ksName });
                } else {
                    changed = true;
                    LOG.info("CF Changed: " + keyspaceDiff.entriesDiffering());
                }
            } else {
                changed = true;
            }
            String strKeyspaceOptions = JsonSerializer.toString(keyspaceOptions);

            //                // Keep track of keyspace
            foundKeyspaces.add(keyspace.getName());
            existingKeyspaces.put(ksName, strKeyspaceOptions);

            LOG.info("Found keyspace '{}|{}' : {}",
                    new Object[] { entity.getClusterName(), ksName, keyspaceOptions });

            //                // Iterate found column families
            for (ColumnFamilyDefinition cf : keyspace.getColumnFamilyList()) {
                // Extract data from the ColumnFamilyDefinition
                String cfName = String.format("%s|%s", keyspace.getName(), cf.getName());
                MapStringToObject cfOptions = getColumnFamilyOptions(cf);
                String strCfOptions = JsonSerializer.toString(cfOptions);
                //                    
                //                    // Check for changes
                if (existingColumnFamilies.containsKey(cfName)) {
                    MapStringToObject previousOptions = JsonSerializer
                            .fromString(existingColumnFamilies.get(cfName), MapStringToObject.class);

                    LOG.info("Old options: " + previousOptions);

                    MapDifference cfDiff = Maps.difference(cfOptions, previousOptions);
                    if (cfDiff.areEqual()) {
                        LOG.info("CF '{}' didn't change", new Object[] { cfName });
                    } else {
                        changed = true;
                        LOG.info("CF Changed: " + cfDiff.entriesDiffering());
                    }

                } else {
                    changed = true;
                }
                //                    
                //                    // Keep track of the cf
                foundColumnFamilies.add(cfName);
                existingColumnFamilies.put(cfName, strCfOptions);

                LOG.info("Found column family '{}|{}|{}' : {}", new Object[] { entity.getClusterName(),
                        keyspace.getName(), cf.getName(), strCfOptions });
            }
        }
    } catch (Exception e) {
        LOG.info("Error refreshing cluster: " + entity.getClusterName(), e);
        entity.setEnabled(false);
    }

    SetView<String> ksRemoved = Sets.difference(existingKeyspaces.keySet(), foundKeyspaces);
    LOG.info("Keyspaces removed: " + ksRemoved);

    SetView<String> cfRemoved = Sets.difference(existingColumnFamilies.keySet(), foundColumnFamilies);
    LOG.info("CF removed: " + cfRemoved);

    clusterDao.write(entity);
}

From source file:org.locationtech.geogig.plumbing.diff.DiffSummaryOp.java

private Map<String, NodeRef[]> resolveChangedPaths(RevTree left, RevTree right) {
    if (left.equals(right)) {
        return Collections.emptyMap();
    }// w w  w  .  j a v a2s .  c o  m
    // figure out if left and right are feature trees or root trees
    CompletableFuture<Set<NodeRef>> l = findTypeTrees(left, leftSource);
    CompletableFuture<Set<NodeRef>> r = findTypeTrees(right, rightSource);
    CompletableFuture.allOf(l, r).join();

    Set<NodeRef> leftnodes = l.join();
    Set<NodeRef> rightnodes = r.join();

    final MapDifference<String, NodeRef> difference = difference(uniqueIndex(leftnodes, NodeRef::path),
            uniqueIndex(rightnodes, NodeRef::path));

    Map<String, NodeRef[]> result = new HashMap<>();

    difference.entriesOnlyOnLeft().forEach((k, v) -> result.put(k, new NodeRef[] { v, null }));
    difference.entriesOnlyOnRight().forEach((k, v) -> result.put(k, new NodeRef[] { null, v }));
    difference.entriesDiffering()
            .forEach((k, v) -> result.put(k, new NodeRef[] { v.leftValue(), v.rightValue() }));

    return result;
}

From source file:org.sonarsource.sonarlint.core.container.connected.update.check.PluginsUpdateChecker.java

public void checkForUpdates(DefaultStorageUpdateCheckResult result, List<SonarAnalyzer> pluginList) {
    PluginReferences serverPluginReferences = pluginReferenceDownloader.fetchPlugins(pluginList);
    PluginReferences storagePluginReferences = storageManager.readPluginReferencesFromStorage();
    Map<String, String> serverPluginHashes = serverPluginReferences.getReferenceList().stream()
            .collect(Collectors.toMap(PluginReference::getKey, PluginReference::getHash));
    Map<String, String> storagePluginHashes = storagePluginReferences.getReferenceList().stream()
            .collect(Collectors.toMap(PluginReference::getKey, PluginReference::getHash));
    MapDifference<String, String> pluginDiff = Maps.difference(storagePluginHashes, serverPluginHashes);
    if (!pluginDiff.areEqual()) {
        for (Map.Entry<String, String> entry : pluginDiff.entriesOnlyOnLeft().entrySet()) {
            result.appendToChangelog(String.format("Plugin '%s' removed", entry.getKey()));
        }/*from   w w w  .jav  a  2  s .com*/
        for (Map.Entry<String, String> entry : pluginDiff.entriesOnlyOnRight().entrySet()) {
            result.appendToChangelog("Plugin '" + entry.getKey() + "' added");
        }
        for (Map.Entry<String, ValueDifference<String>> entry : pluginDiff.entriesDiffering().entrySet()) {
            result.appendToChangelog("Plugin '" + entry.getKey() + "' updated");
        }
    }
}