Example usage for com.google.common.collect MapDifference entriesOnlyOnRight

List of usage examples for com.google.common.collect MapDifference entriesOnlyOnRight

Introduction

In this page you can find the example usage for com.google.common.collect MapDifference entriesOnlyOnRight.

Prototype

Map<K, V> entriesOnlyOnRight();

Source Link

Document

Returns an unmodifiable map containing the entries from the right map whose keys are not present in the left map.

Usage

From source file:org.sonarsource.sonarlint.core.container.connected.update.check.GlobalSettingsUpdateChecker.java

public void checkForUpdates(String serverVersion, DefaultStorageUpdateCheckResult result) {
    GlobalProperties serverGlobalProperties = globalPropertiesDownloader.fetchGlobalSettings(serverVersion);
    GlobalProperties storageGlobalProperties = storageManager.readGlobalPropertiesFromStorage();
    MapDifference<String, String> propDiff = Maps.difference(filter(storageGlobalProperties.getPropertiesMap()),
            filter(serverGlobalProperties.getPropertiesMap()));
    if (!propDiff.areEqual()) {
        result.appendToChangelog("Global settings updated");
        for (Map.Entry<String, String> entry : propDiff.entriesOnlyOnLeft().entrySet()) {
            LOG.debug("Property '{}' removed", entry.getKey());
        }/*from w  w  w.j av  a 2 s  .co  m*/
        for (Map.Entry<String, String> entry : propDiff.entriesOnlyOnRight().entrySet()) {
            LOG.debug("Property '{}' added with value '{}'", entry.getKey(),
                    formatValue(entry.getKey(), entry.getValue()));
        }
        for (Map.Entry<String, ValueDifference<String>> entry : propDiff.entriesDiffering().entrySet()) {
            LOG.debug("Value of property '{}' changed from '{}' to '{}'", entry.getKey(),
                    formatLeftDiff(entry.getKey(), entry.getValue().leftValue(), entry.getValue().rightValue()),
                    formatRightDiff(entry.getKey(), entry.getValue().leftValue(),
                            entry.getValue().rightValue()));
        }
    }
}

From source file:org.apache.cassandra.schema.SchemaKeyspace.java

public static Mutation.SimpleBuilder makeUpdateTableMutation(KeyspaceMetadata keyspace, CFMetaData oldTable,
        CFMetaData newTable, long timestamp) {
    Mutation.SimpleBuilder builder = makeCreateKeyspaceMutation(keyspace.name, keyspace.params, timestamp);

    addTableToSchemaMutation(newTable, false, builder);

    MapDifference<ByteBuffer, ColumnDefinition> columnDiff = Maps.difference(oldTable.getColumnMetadata(),
            newTable.getColumnMetadata());

    // columns that are no longer needed
    for (ColumnDefinition column : columnDiff.entriesOnlyOnLeft().values())
        dropColumnFromSchemaMutation(oldTable, column, builder);

    // newly added columns
    for (ColumnDefinition column : columnDiff.entriesOnlyOnRight().values())
        addColumnToSchemaMutation(newTable, column, builder);

    // old columns with updated attributes
    for (ByteBuffer name : columnDiff.entriesDiffering().keySet())
        addColumnToSchemaMutation(newTable, newTable.getColumnDefinition(name), builder);

    // dropped columns
    MapDifference<ByteBuffer, CFMetaData.DroppedColumn> droppedColumnDiff = Maps
            .difference(oldTable.getDroppedColumns(), newTable.getDroppedColumns());

    // newly dropped columns
    for (CFMetaData.DroppedColumn column : droppedColumnDiff.entriesOnlyOnRight().values())
        addDroppedColumnToSchemaMutation(newTable, column, builder);

    // columns added then dropped again
    for (ByteBuffer name : droppedColumnDiff.entriesDiffering().keySet())
        addDroppedColumnToSchemaMutation(newTable, newTable.getDroppedColumns().get(name), builder);

    MapDifference<String, TriggerMetadata> triggerDiff = triggersDiff(oldTable.getTriggers(),
            newTable.getTriggers());// ww  w  .  j  a  va 2s  .co m

    // dropped triggers
    for (TriggerMetadata trigger : triggerDiff.entriesOnlyOnLeft().values())
        dropTriggerFromSchemaMutation(oldTable, trigger, builder);

    // newly created triggers
    for (TriggerMetadata trigger : triggerDiff.entriesOnlyOnRight().values())
        addTriggerToSchemaMutation(newTable, trigger, builder);

    MapDifference<String, IndexMetadata> indexesDiff = indexesDiff(oldTable.getIndexes(),
            newTable.getIndexes());

    // dropped indexes
    for (IndexMetadata index : indexesDiff.entriesOnlyOnLeft().values())
        dropIndexFromSchemaMutation(oldTable, index, builder);

    // newly created indexes
    for (IndexMetadata index : indexesDiff.entriesOnlyOnRight().values())
        addIndexToSchemaMutation(newTable, index, builder);

    // updated indexes need to be updated
    for (MapDifference.ValueDifference<IndexMetadata> diff : indexesDiff.entriesDiffering().values())
        addUpdatedIndexToSchemaMutation(newTable, diff.rightValue(), builder);

    return builder;
}

From source file:org.apache.cassandra.schema.SchemaKeyspace.java

public static Mutation.SimpleBuilder makeUpdateViewMutation(KeyspaceMetadata keyspace, ViewDefinition oldView,
        ViewDefinition newView, long timestamp) {
    Mutation.SimpleBuilder builder = makeCreateKeyspaceMutation(keyspace.name, keyspace.params, timestamp);

    addViewToSchemaMutation(newView, false, builder);

    MapDifference<ByteBuffer, ColumnDefinition> columnDiff = Maps
            .difference(oldView.metadata.getColumnMetadata(), newView.metadata.getColumnMetadata());

    // columns that are no longer needed
    for (ColumnDefinition column : columnDiff.entriesOnlyOnLeft().values())
        dropColumnFromSchemaMutation(oldView.metadata, column, builder);

    // newly added columns
    for (ColumnDefinition column : columnDiff.entriesOnlyOnRight().values())
        addColumnToSchemaMutation(newView.metadata, column, builder);

    // old columns with updated attributes
    for (ByteBuffer name : columnDiff.entriesDiffering().keySet())
        addColumnToSchemaMutation(newView.metadata, newView.metadata.getColumnDefinition(name), builder);

    // dropped columns
    MapDifference<ByteBuffer, CFMetaData.DroppedColumn> droppedColumnDiff = Maps
            .difference(oldView.metadata.getDroppedColumns(), oldView.metadata.getDroppedColumns());

    // newly dropped columns
    for (CFMetaData.DroppedColumn column : droppedColumnDiff.entriesOnlyOnRight().values())
        addDroppedColumnToSchemaMutation(oldView.metadata, column, builder);

    // columns added then dropped again
    for (ByteBuffer name : droppedColumnDiff.entriesDiffering().keySet())
        addDroppedColumnToSchemaMutation(newView.metadata, newView.metadata.getDroppedColumns().get(name),
                builder);/*from w w w.j  a  v a 2  s.  c o  m*/

    return builder;
}

From source file:org.apache.druid.indexing.materializedview.MaterializedViewSupervisor.java

/**
 * Find infomation about the intervals in which derived dataSource data should be rebuilt.
 * The infomation includes the version and DataSegments list of a interval.
 * The intervals include: in the interval,
 *  1) baseDataSource has data, but the derivedDataSource does not;
 *  2) version of derived segments isn't the max(created_date) of all base segments;
 *
 *  Drop the segments of the intervals in which derivedDataSource has data, but baseDataSource does not.
 *
 * @return the left part of Pair: interval -> version, and the right part: interval -> DataSegment list.
 *          Version and DataSegment list can be used to create HadoopIndexTask.
 *          Derived datasource data in all these intervals need to be rebuilt. 
 *///w w w .  j av a2 s .c  om
@VisibleForTesting
Pair<SortedMap<Interval, String>, Map<Interval, List<DataSegment>>> checkSegments() {
    // Pair<interval -> version, interval -> list<DataSegment>>
    Pair<Map<Interval, String>, Map<Interval, List<DataSegment>>> derivativeSegmentsSnapshot = getVersionAndBaseSegments(
            metadataStorageCoordinator.getUsedSegmentsForInterval(dataSource, ALL_INTERVAL));
    // Pair<interval -> max(created_date), interval -> list<DataSegment>>
    Pair<Map<Interval, String>, Map<Interval, List<DataSegment>>> baseSegmentsSnapshot = getMaxCreateDateAndBaseSegments(
            metadataStorageCoordinator.getUsedSegmentAndCreatedDateForInterval(spec.getBaseDataSource(),
                    ALL_INTERVAL));
    // baseSegments are used to create HadoopIndexTask
    Map<Interval, List<DataSegment>> baseSegments = baseSegmentsSnapshot.rhs;
    Map<Interval, List<DataSegment>> derivativeSegments = derivativeSegmentsSnapshot.rhs;
    // use max created_date of base segments as the version of derivative segments
    Map<Interval, String> maxCreatedDate = baseSegmentsSnapshot.lhs;
    Map<Interval, String> derivativeVersion = derivativeSegmentsSnapshot.lhs;
    SortedMap<Interval, String> sortedToBuildInterval = Maps
            .newTreeMap(Comparators.inverse(Comparators.intervalsByStartThenEnd()));
    // find the intervals to drop and to build
    MapDifference<Interval, String> difference = Maps.difference(maxCreatedDate, derivativeVersion);
    Map<Interval, String> toBuildInterval = Maps.newHashMap(difference.entriesOnlyOnLeft());
    Map<Interval, String> toDropInterval = Maps.newHashMap(difference.entriesOnlyOnRight());
    // if some intervals are in running tasks and the versions are the same, remove it from toBuildInterval
    // if some intervals are in running tasks, but the versions are different, stop the task. 
    for (Interval interval : runningVersion.keySet()) {
        if (toBuildInterval.containsKey(interval)
                && toBuildInterval.get(interval).equals(runningVersion.get(interval))) {
            toBuildInterval.remove(interval);

        } else if (toBuildInterval.containsKey(interval)
                && !toBuildInterval.get(interval).equals(runningVersion.get(interval))) {
            if (taskMaster.getTaskQueue().isPresent()) {
                taskMaster.getTaskQueue().get().shutdown(runningTasks.get(interval).getId());
                runningTasks.remove(interval);
            }
        }
    }
    // drop derivative segments which interval equals the interval in toDeleteBaseSegments 
    for (Interval interval : toDropInterval.keySet()) {
        for (DataSegment segment : derivativeSegments.get(interval)) {
            segmentManager.removeSegment(dataSource, segment.getIdentifier());
        }
    }
    // data of the latest interval will be built firstly.
    sortedToBuildInterval.putAll(toBuildInterval);
    return new Pair<>(sortedToBuildInterval, baseSegments);
}

From source file:com.addthis.hydra.job.alert.JobAlertRunner.java

/**
 * Iterate over alert map, checking the status of each alert and sending emails as needed.
 *///from   w w w .j a v a  2  s .co  m
public void scanAlerts() {
    if (alertsEnabled) {
        log.info("Started alert scan of {} alerts...", alertMap.size());
        try {
            for (Map.Entry<String, AbstractJobAlert> entry : alertMap.entrySet()) {
                AbstractJobAlert oldAlert = entry.getValue();
                Map<String, String> currentErrors = oldAlert.getActiveJobs();
                // entry may be concurrently deleted, so only recompute if still present, and while locked
                AbstractJobAlert alert = alertMap.computeIfPresent(entry.getKey(), (id, currentAlert) -> {
                    currentAlert.checkAlertForJobs(currentAlert.getAlertJobs(spawn), meshyClient);
                    if (!currentAlert.getActiveJobs().equals(currentErrors)) {
                        storeAlert(currentAlert.alertId, currentAlert);
                    }
                    return currentAlert;
                });
                // null if it was concurrently removed from the map. Does not catch all removals, but might as well
                // make a best effort attempt to send clears when convenient (should probably move clear emails to
                // the removal method at some point)
                if (alert == null) {
                    emailAlert(oldAlert, "[CLEAR] ", currentErrors);
                } else {
                    Map<String, String> newErrors = alert.getActiveJobs();
                    MapDifference<String, String> difference = Maps.difference(currentErrors, newErrors);
                    emailAlert(oldAlert, "[CLEAR] ", difference.entriesOnlyOnLeft());
                    emailAlert(alert, "[TRIGGER] ", difference.entriesOnlyOnRight());
                    Map<String, String> errorsChanged = new HashMap<>();
                    for (Map.Entry<String, MapDifference.ValueDifference<String>> differing : difference
                            .entriesDiffering().entrySet()) {
                        String oldValue = differing.getValue().leftValue();
                        String newValue = differing.getValue().rightValue();
                        if (!alert.suppressChanges.suppress(oldValue, newValue)) {
                            errorsChanged.put(differing.getKey(), newValue);
                        }
                    }
                    emailAlert(alert, "[ERROR CHANGED] ", errorsChanged);
                }
            }
            lastAlertScanFailed = false;
            log.info("Finished alert scan");
        } catch (Exception e) {
            lastAlertScanFailed = true;
            log.error("Unexpected error while scanning alerts: {}", e.getMessage(), e);
        }
    }
}

From source file:com.puppycrawl.tools.checkstyle.AbstractModuleTestSupport.java

/**
 * Performs verification of the given files.
 * @param checker {@link Checker} instance
 * @param processedFiles files to process.
 * @param expectedViolations a map of expected violations per files.
 * @throws Exception if exception occurs during verification process.
 *//*w  ww  .java 2 s  . com*/
protected final void verify(Checker checker, File[] processedFiles,
        Map<String, List<String>> expectedViolations) throws Exception {
    stream.flush();
    final List<File> theFiles = new ArrayList<>();
    Collections.addAll(theFiles, processedFiles);
    final int errs = checker.process(theFiles);

    // process each of the lines
    final Map<String, List<String>> actualViolations = getActualViolations(errs);
    final Map<String, List<String>> realExpectedViolations = Maps.filterValues(expectedViolations,
            input -> !input.isEmpty());
    final MapDifference<String, List<String>> violationDifferences = Maps.difference(realExpectedViolations,
            actualViolations);

    final Map<String, List<String>> missingViolations = violationDifferences.entriesOnlyOnLeft();
    final Map<String, List<String>> unexpectedViolations = violationDifferences.entriesOnlyOnRight();
    final Map<String, MapDifference.ValueDifference<List<String>>> differingViolations = violationDifferences
            .entriesDiffering();

    final StringBuilder message = new StringBuilder(256);
    if (!missingViolations.isEmpty()) {
        message.append("missing violations: ").append(missingViolations);
    }
    if (!unexpectedViolations.isEmpty()) {
        if (message.length() > 0) {
            message.append('\n');
        }
        message.append("unexpected violations: ").append(unexpectedViolations);
    }
    if (!differingViolations.isEmpty()) {
        if (message.length() > 0) {
            message.append('\n');
        }
        message.append("differing violations: ").append(differingViolations);
    }

    assertTrue(message.toString(),
            missingViolations.isEmpty() && unexpectedViolations.isEmpty() && differingViolations.isEmpty());

    checker.destroy();
}

From source file:org.terasology.identity.storageServiceClient.SyncIdentitiesAction.java

@Override
public void perform(StorageServiceWorker worker) {
    if (worker.hasConflictingIdentities()) {
        worker.logMessage(true, "${engine:menu#storage-service-sync-previous-conflicts}");
    } else {//from ww w. j  av a2s .co  m
        try {
            Map<PublicIdentityCertificate, ClientIdentity> local = worker.securityConfig.getAllIdentities();
            Map<PublicIdentityCertificate, ClientIdentity> remote = worker.sessionInstance.getAllIdentities();
            MapDifference<PublicIdentityCertificate, ClientIdentity> diff = Maps.difference(local, remote);
            //upload the "local only" ones
            for (Map.Entry<PublicIdentityCertificate, ClientIdentity> entry : diff.entriesOnlyOnLeft()
                    .entrySet()) {
                if (entry.getValue().getPlayerPrivateCertificate() != null) { //TODO: find out why sometimes it's null
                    worker.sessionInstance.putIdentity(entry.getKey(), entry.getValue());
                }
            }
            //download the "remote only" ones
            for (Map.Entry<PublicIdentityCertificate, ClientIdentity> entry : diff.entriesOnlyOnRight()
                    .entrySet()) {
                worker.securityConfig.addIdentity(entry.getKey(), entry.getValue());
            }
            //keep track of the conflicting ones for manual resolution
            worker.resetConflicts();
            for (Map.Entry<PublicIdentityCertificate, MapDifference.ValueDifference<ClientIdentity>> entry : diff
                    .entriesDiffering().entrySet()) {
                worker.conflictingRemoteIdentities
                        .addLast(new IdentityBundle(entry.getKey(), entry.getValue().rightValue()));
            }
            worker.saveConfig();
            worker.logMessage(false, "${engine:menu#storage-service-sync-ok}", diff.entriesOnlyOnRight().size(),
                    diff.entriesOnlyOnLeft().size(), diff.entriesDiffering().size());
            if (!diff.entriesDiffering().isEmpty()) {
                worker.logMessage(true, "${engine:menu#storage-service-sync-conflicts}");
            }
        } catch (Exception e) {
            worker.logMessage(true, "${engine:menu#storage-service-sync-fail}", e.getMessage());
        }
    }
    worker.status = StorageServiceWorkerStatus.LOGGED_IN;
}

From source file:com.facebook.buck.rules.ActionGraphCache.java

/**
 * Compares the cached ActionGraph with a newly generated from the targetGraph. The comparison
 * is done by generating and comparing content agnostic RuleKeys. In case of mismatch, the
 * mismatching BuildRules are printed and the building process is stopped.
 * @param eventBus Buck's event bus./* ww w. j  a va2 s.co m*/
 * @param lastActionGraphAndResolver The cached version of the graph that gets compared.
 * @param targetGraph Used to generate the actionGraph that gets compared with lastActionGraph.
 */
private void compareActionGraphs(final BuckEventBus eventBus,
        final ActionGraphAndResolver lastActionGraphAndResolver, final TargetGraph targetGraph,
        final int keySeed) {
    try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus,
            PerfEventId.of("ActionGraphCacheCheck"))) {
        // We check that the lastActionGraph is not null because it's possible we had a
        // invalidateCache() between the scheduling and the execution of this task.
        LOG.info("ActionGraph integrity check spawned.");
        Pair<TargetGraph, ActionGraphAndResolver> newActionGraph = new Pair<TargetGraph, ActionGraphAndResolver>(
                targetGraph,
                createActionGraph(eventBus, new DefaultTargetNodeToBuildRuleTransformer(), targetGraph));

        Map<BuildRule, RuleKey> lastActionGraphRuleKeys = getRuleKeysFromBuildRules(
                lastActionGraphAndResolver.getActionGraph().getNodes(),
                lastActionGraphAndResolver.getResolver(), keySeed);
        Map<BuildRule, RuleKey> newActionGraphRuleKeys = getRuleKeysFromBuildRules(
                newActionGraph.getSecond().getActionGraph().getNodes(),
                newActionGraph.getSecond().getResolver(), keySeed);

        if (!lastActionGraphRuleKeys.equals(newActionGraphRuleKeys)) {
            invalidateCache();
            String mismatchInfo = "RuleKeys of cached and new ActionGraph don't match:\n";
            MapDifference<BuildRule, RuleKey> mismatchedRules = Maps.difference(lastActionGraphRuleKeys,
                    newActionGraphRuleKeys);
            mismatchInfo += "Number of nodes in common/differing: " + mismatchedRules.entriesInCommon().size()
                    + "/" + mismatchedRules.entriesDiffering().size() + "\n"
                    + "Entries only in the cached ActionGraph: " + mismatchedRules.entriesOnlyOnLeft().size()
                    + "Entries only in the newly created ActionGraph: "
                    + mismatchedRules.entriesOnlyOnRight().size() + "The rules that did not match:\n";
            mismatchInfo += mismatchedRules.entriesDiffering().keySet().toString();
            LOG.error(mismatchInfo);
            throw new RuntimeException(mismatchInfo);
        }
    }
}

From source file:com.facebook.buck.core.model.actiongraph.computation.ActionGraphProvider.java

/**
 * Compares the cached ActionGraph with a newly generated from the targetGraph. The comparison is
 * done by generating and comparing content agnostic RuleKeys. In case of mismatch, the
 * mismatching BuildRules are printed and the building process is stopped.
 *
 * @param lastActionGraphAndBuilder The cached version of the graph that gets compared.
 * @param targetGraph Used to generate the actionGraph that gets compared with lastActionGraph.
 * @param fieldLoader//w  w w .j a va  2s . c  om
 * @param ruleKeyLogger The logger to use (if any) when computing the new action graph
 */
private void compareActionGraphs(ActionGraphAndBuilder lastActionGraphAndBuilder,
        TargetNodeToBuildRuleTransformer transformer, TargetGraph targetGraph, RuleKeyFieldLoader fieldLoader,
        Optional<ThriftRuleKeyLogger> ruleKeyLogger) {
    try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus,
            PerfEventId.of("ActionGraphCacheCheck"))) {
        // We check that the lastActionGraph is not null because it's possible we had a
        // invalidateCache() between the scheduling and the execution of this task.
        LOG.info("ActionGraph integrity check spawned.");
        ActionGraphAndBuilder newActionGraph = createActionGraph(transformer, targetGraph,
                IncrementalActionGraphMode.DISABLED);

        Map<BuildRule, RuleKey> lastActionGraphRuleKeys = getRuleKeysFromBuildRules(
                lastActionGraphAndBuilder.getActionGraph().getNodes(),
                lastActionGraphAndBuilder.getActionGraphBuilder(), fieldLoader,
                Optional.empty() /* Only log once, and only for the new graph */);
        Map<BuildRule, RuleKey> newActionGraphRuleKeys = getRuleKeysFromBuildRules(
                newActionGraph.getActionGraph().getNodes(), newActionGraph.getActionGraphBuilder(), fieldLoader,
                ruleKeyLogger);

        if (!lastActionGraphRuleKeys.equals(newActionGraphRuleKeys)) {
            actionGraphCache.invalidateCache();
            String mismatchInfo = "RuleKeys of cached and new ActionGraph don't match:\n";
            MapDifference<BuildRule, RuleKey> mismatchedRules = Maps.difference(lastActionGraphRuleKeys,
                    newActionGraphRuleKeys);
            mismatchInfo += "Number of nodes in common/differing: " + mismatchedRules.entriesInCommon().size()
                    + "/" + mismatchedRules.entriesDiffering().size() + "\n"
                    + "Entries only in the cached ActionGraph: " + mismatchedRules.entriesOnlyOnLeft().size()
                    + "Entries only in the newly created ActionGraph: "
                    + mismatchedRules.entriesOnlyOnRight().size() + "The rules that did not match:\n";
            mismatchInfo += mismatchedRules.entriesDiffering().keySet().toString();
            LOG.error(mismatchInfo);
            throw new RuntimeException(mismatchInfo);
        }
    }
}

From source file:eu.numberfour.n4js.ui.workingsets.GitRepositoryAwareWorkingSetManager.java

/**
 * Sole constructor for creating the working set manager. Internally initializes the cache for repositories.
 *///ww w  . ja v a2  s  .  c o m
public GitRepositoryAwareWorkingSetManager() {
    repositoryCache = Activator.getDefault().getRepositoryCache(); // might not be initialized yet.
    repositoryChangeListener = new IPreferenceChangeListener() {

        @SuppressWarnings("deprecation")
        // keep deprecated RepositoryUtil.PREFS_DIRECTORIES for backward-compatibility
        @Override
        public void preferenceChange(final PreferenceChangeEvent event) {
            if (!RepositoryUtil.PREFS_DIRECTORIES_REL.equals(event.getKey())
                    && !RepositoryUtil.PREFS_DIRECTORIES.equals(event.getKey())) {
                return;
            }

            if (!orderedWorkingSetIds.isEmpty() && !visibleWorkingSetIds.isEmpty()) {

                MapDifference<String, String> diff = calculateDifference(event);
                if (!diff.areEqual()) {

                    // Deletions
                    final Set<String> deletions = diff.entriesOnlyOnLeft().keySet();
                    for (String deletedUrl : deletions) {
                        orderedWorkingSetIds.remove(deletedUrl);
                        visibleWorkingSetIds.remove(deletedUrl);
                    }

                    // Additions
                    final Set<String> additions = diff.entriesOnlyOnRight().keySet();
                    for (String addedUrl : additions) {
                        orderedWorkingSetIds.add(addedUrl);
                        visibleWorkingSetIds.add(addedUrl);
                    }

                }

            }

            discardWorkingSetCaches();
            saveState(new NullProgressMonitor());

            WorkingSetManagerBroker workingSetManagerBroker = getWorkingSetManagerBroker();
            if (workingSetManagerBroker.isWorkingSetTopLevel()) {
                final WorkingSetManager activeManager = workingSetManagerBroker.getActiveManager();
                if (activeManager != null) {
                    if (activeManager.getId().equals(getId())) {
                        workingSetManagerBroker.refreshNavigator();
                    }
                }
            }

        }

        private MapDifference<String, String> calculateDifference(PreferenceChangeEvent event) {
            String oldValue = Strings.nullToEmpty((String) event.getOldValue());
            String newValue = Strings.nullToEmpty((String) event.getNewValue());

            Map<String, String> oldMappings = toMap(newHashSet(Splitter.on(pathSeparator).split(oldValue)),
                    i -> i);
            Map<String, String> newMappings = toMap(newHashSet(Splitter.on(pathSeparator).split(newValue)),
                    i -> i);

            return Maps.difference(oldMappings, newMappings);

        }

    };

    final IEclipsePreferences gitNode = InstanceScope.INSTANCE.getNode(Activator.getPluginId());
    gitNode.addPreferenceChangeListener(repositoryChangeListener);

    final BundleContext context = Activator.getDefault().getBundle().getBundleContext();
    context.addBundleListener(new BundleListener() {

        @Override
        public void bundleChanged(final BundleEvent event) {
            if (BundleEvent.STOPPING == event.getType()) {
                gitNode.removePreferenceChangeListener(repositoryChangeListener);
            }
        }

    });
}