Example usage for com.google.common.collect Maps difference

List of usage examples for com.google.common.collect Maps difference

Introduction

In this page you can find the example usage for com.google.common.collect Maps difference.

Prototype

public static <K, V> SortedMapDifference<K, V> difference(SortedMap<K, ? extends V> left,
        Map<? extends K, ? extends V> right) 

Source Link

Document

Computes the difference between two sorted maps, using the comparator of the left map, or Ordering.natural() if the left map uses the natural ordering of its elements.

Usage

From source file:com.stratelia.silverpeas.notificationserver.NotificationData.java

@Override
public boolean equals(Object o) {
    if (this == o) {
        return true;
    }//from  w w w.j  av a 2s. c o  m
    if (o == null || getClass() != o.getClass()) {
        return false;
    }
    NotificationData that = (NotificationData) o;
    if (!Objects.equal(mAnswerAllowed, that.mAnswerAllowed)) {
        return false;
    }
    if (!Objects.equal(mComment, that.mComment)) {
        return false;
    }
    if (!Objects.equal(mLoginPassword, that.mLoginPassword)) {
        return false;
    }
    if (!Objects.equal(mLoginUser, that.mLoginUser)) {
        return false;
    }
    if (!Objects.equal(mMessage, that.mMessage)) {
        return false;
    }
    if (!Objects.equal(mPrioritySpeed, that.mPrioritySpeed)) {
        return false;
    }
    if (!Objects.equal(mReportToLogStatus, that.mReportToLogStatus)) {
        return false;
    }
    if (!Objects.equal(mReportToSenderStatus, that.mReportToSenderStatus)) {
        return false;
    }
    if (!Objects.equal(mReportToSenderTargetChannel, that.mReportToSenderTargetChannel)) {
        return false;
    }
    if (!Objects.equal(mReportToSenderTargetParam, that.mReportToSenderTargetParam)) {
        return false;
    }
    if (!Objects.equal(mReportToSenderTargetReceipt, that.mReportToSenderTargetReceipt)) {
        return false;
    }
    if (!Objects.equal(mSenderId, that.mSenderId)) {
        return false;
    }
    if (!Objects.equal(mSenderName, that.mSenderName)) {
        return false;
    }
    if (!Objects.equal(mTargetChannel, that.mTargetChannel)) {
        return false;
    }
    if (!Objects.equal(mTargetName, that.mTargetName)) {
        return false;
    }
    if (!Maps.difference(mTargetParam, that.mTargetParam).areEqual()) {
        return false;
    }
    if (!Objects.equal(mTargetReceipt, that.mTargetReceipt)) {
        return false;
    }

    return true;
}

From source file:org.apache.cassandra.db.DefsTable.java

private static Set<String> mergeKeyspaces(Map<DecoratedKey, ColumnFamily> old,
        Map<DecoratedKey, ColumnFamily> updated) throws ConfigurationException, IOException {
    // calculate the difference between old and new states (note that entriesOnlyLeft() will be always empty)
    MapDifference<DecoratedKey, ColumnFamily> diff = Maps.difference(old, updated);

    /**/*from w w  w. j  a va 2s  .  c o  m*/
     * At first step we check if any new keyspaces were added.
     */
    for (Map.Entry<DecoratedKey, ColumnFamily> entry : diff.entriesOnlyOnRight().entrySet()) {
        ColumnFamily ksAttrs = entry.getValue();

        // we don't care about nested ColumnFamilies here because those are going to be processed separately
        if (!ksAttrs.isEmpty())
            addKeyspace(KSMetaData.fromSchema(new Row(entry.getKey(), entry.getValue()),
                    Collections.<CFMetaData>emptyList()));
    }

    /**
     * At second step we check if there were any keyspaces re-created, in this context
     * re-created means that they were previously deleted but still exist in the low-level schema as empty keys
     */

    Map<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> modifiedEntries = diff.entriesDiffering();

    // instead of looping over all modified entries and skipping processed keys all the time
    // we would rather store "left to process" items and iterate over them removing already met keys
    List<DecoratedKey> leftToProcess = new ArrayList<DecoratedKey>(modifiedEntries.size());

    for (Map.Entry<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> entry : modifiedEntries
            .entrySet()) {
        ColumnFamily prevValue = entry.getValue().leftValue();
        ColumnFamily newValue = entry.getValue().rightValue();

        if (prevValue.isEmpty()) {
            addKeyspace(KSMetaData.fromSchema(new Row(entry.getKey(), newValue),
                    Collections.<CFMetaData>emptyList()));
            continue;
        }

        leftToProcess.add(entry.getKey());
    }

    if (leftToProcess.size() == 0)
        return Collections.emptySet();

    /**
     * At final step we updating modified keyspaces and saving keyspaces drop them later
     */

    Set<String> keyspacesToDrop = new HashSet<String>();

    for (DecoratedKey key : leftToProcess) {
        MapDifference.ValueDifference<ColumnFamily> valueDiff = modifiedEntries.get(key);

        ColumnFamily newState = valueDiff.rightValue();

        if (newState.isEmpty())
            keyspacesToDrop.add(AsciiType.instance.getString(key.key));
        else
            updateKeyspace(KSMetaData.fromSchema(new Row(key, newState), Collections.<CFMetaData>emptyList()));
    }

    return keyspacesToDrop;
}

From source file:com.ikanow.aleph2.graph.titan.utils.TitanGraphBuildingUtils.java

/** (3/3) Merges user generated edges/vertices with the ones already in the system 
 * @param tx//  w ww  .  ja va 2 s  .co  m
 * @param config
 * @param security_service
 * @param logger
 * @param maybe_merger
 * @param mergeable
 */
public static void buildGraph_handleMerge(final TitanTransaction tx, final GraphSchemaBean config,
        final Tuple2<String, ISecurityService> security_service, final Optional<IBucketLogger> logger,
        final MutableStatsBean mutable_stats, final Collection<ObjectNode> mutable_new_vertex_keys,
        final Optional<Tuple2<IEnrichmentBatchModule, GraphMergeEnrichmentContext>> maybe_merger,
        final DataBucketBean bucket,
        final Stream<Tuple4<ObjectNode, List<ObjectNode>, List<ObjectNode>, List<Vertex>>> mergeable) {
    final org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper titan_mapper = tx.io(IoCore.graphson())
            .mapper().create().createMapper();
    final Multimap<JsonNode, Edge> mutable_existing_edge_endpoint_store = LinkedHashMultimap.create(); //(lazy simple way of handling 1.3/2)
    final Map<ObjectNode, Vertex> mutable_per_merge_cached_vertices = new HashMap<>();

    mergeable.forEach(t4 -> {

        //TODO (ALEPH-15): handling properties: add new properties and:
        // remove any properties of any vertex/edge over which the user does not have read permission .. and then re-combine later

        final ObjectNode key = t4._1();
        final List<ObjectNode> vertices = t4._2();
        final List<ObjectNode> edges = t4._3();
        final List<Vertex> existing_vertices = t4._4();
        mutable_stats.vertex_matches_found += existing_vertices.size();

        // 1) First step is to sort out the _vertices_, here's the cases:

        // 1.1) If there's no matching vertices then create a new vertex and get the id (via a merge if finalize is set)
        //      (overwrite the _id then map to a Vertex)
        // 1.2) If there are >0 matching vertices (and only one incoming vertex) then we run a merge in which the user "has to do" the following:
        // 1.2.a) pick the winning vertex (or emit the current one to create a "duplicate node"?)
        // 1.2.a.1) (Allow user to delete the others if he has permission, by the usual emit "id" only - but don't automatically do it because it gets complicated what to do with the other _bs)
        // 1.2.b) copy any properties from the original objects into the winner and remove any so-desired properties

        final long prev_created = mutable_stats.vertices_created; //(nasty hack, see below)

        final Optional<Vertex> maybe_vertex_winner = invokeUserMergeCode(tx, config, security_service, logger,
                maybe_merger, titan_mapper, mutable_stats, Vertex.class, bucket.full_name(), key, vertices,
                existing_vertices, Collections.emptyMap()).stream().findFirst();

        maybe_vertex_winner.ifPresent(vertex_winner -> {
            mutable_per_merge_cached_vertices.put(key, vertex_winner);

            //(slighty nasty hack, use stats to see if a vertex was created vs updated...)
            if (mutable_stats.vertices_created > prev_created) {
                mutable_new_vertex_keys.add(key);
            }

            // 1.3) Tidy up (mutate) the edges            

            // 1.3.1) Make a store of all the existing edges (won't worry about in/out, it will sort itself out)

            Stream.of(Optionals.streamOf(vertex_winner.edges(Direction.IN), false),
                    Optionals.streamOf(vertex_winner.edges(Direction.OUT), false),
                    Optionals.streamOf(vertex_winner.edges(Direction.BOTH), false)).flatMap(__ -> __)
                    .forEach(e -> {
                        mutable_existing_edge_endpoint_store.put(key, e);
                    });

            // 1.3.2) Handle incoming edges:

            final Map<ObjectNode, List<ObjectNode>> grouped_edges = finalEdgeGrouping(key, vertex_winner,
                    edges);

            // 2) By here we have a list of vertices and we've mutated the edges to fill in the _inV and _outV
            // 2.1) Now get the potentially matching edges from each of the selected vertices:
            // 2.1.1) If there's no matching edges (and only one incoming edge) then create a new edge (via a merge if finalize is set)
            // 2.1.2) If there are >0 matching edges then run a merge against the edges, pick the current one

            // OK now for any resolved edges (ie grouped_edges), match up with the mutable store (which will be correctly populated by construction):

            grouped_edges.entrySet().stream().forEach(kv -> {

                final Function<String, Map<Object, Edge>> getEdges = in_or_out -> Optionals
                        .ofNullable(mutable_existing_edge_endpoint_store.get(kv.getKey().get(in_or_out)))
                        .stream().filter(e -> labelMatches(kv.getKey(), e))
                        .filter(e -> isAllowed(bucket.full_name(), security_service, e)) // (check authorized)
                        .collect(Collectors.toMap(e -> e.id(), e -> e));
                final Map<Object, Edge> in_existing = getEdges.apply(GraphAnnotationBean.inV);
                final Map<Object, Edge> out_existing = getEdges.apply(GraphAnnotationBean.outV);

                final List<Edge> existing_edges = BucketUtils.isTestBucket(bucket) ? Collections.emptyList()
                        : Stream.of(
                                Maps.difference(in_existing, out_existing).entriesInCommon().values().stream(),
                                in_existing.values().stream().filter(e -> e.inVertex() == e.outVertex()) // (handle the case where an edge starts/ends at the same node)
                ).flatMap(__ -> __).collect(Collectors.toList());

                mutable_stats.edge_matches_found += existing_edges.size();

                invokeUserMergeCode(tx, config, security_service, logger, maybe_merger, titan_mapper,
                        mutable_stats, Edge.class, bucket.full_name(), kv.getKey(), kv.getValue(),
                        existing_edges, mutable_per_merge_cached_vertices);
            });
        });

    });

    //TRACE
    //System.err.println(new Date() + ": VERTICES FOUND = " + mutable_existing_vertex_store);
}

From source file:org.opentestsystem.authoring.testauth.service.impl.FileGroupServiceImpl.java

/********************************************************************************************************/

private Map<String, String[]> getPublishingRecordSearchParams(final Map<String, String[]> parameterMap) {
    final Map<String, String[]> publishingRecordParams = Maps.newHashMap();
    publishingRecordParams.putAll(Maps.difference(parameterMap, publishingRecordParams).entriesOnlyOnLeft());

    publishingRecordParams.put("currentPage", new String[] { "0" });
    publishingRecordParams.put("pageSize", new String[] { "9999" });
    publishingRecordParams.put("sortKey", new String[] { "version" });
    publishingRecordParams.remove("psychometricRecordId");
    return publishingRecordParams;
}

From source file:com.sam.moca.cluster.manager.simulator.ClusterTestUtils.java

/**
 * Make sure that cache contents are the same. If not, generate a nice
 * message telling what went wrong./*from  ww w . j  a v  a  2  s.  c o m*/
 * @param cacheName
 * @param compareAgainstMap the map that serves as the "correct" map
 * @param nodeCacheContents the map that could be wrong that we are checking
 */
void assertMapContentsEqual(String cacheName, final Map<Object, Object> compareAgainstMap,
        final Map<Object, Object> nodeCacheContents) {
    final StringBuilder err = new StringBuilder();
    if (!compareAgainstMap.equals(nodeCacheContents)) {
        err.append("DIFFERENT CACHE CONTENTS FOR CACHE " + cacheName + "! ");
        final MapDifference<Object, Object> d = Maps.difference(nodeCacheContents, compareAgainstMap);

        final Map<Object, Object> onlyLeft = d.entriesOnlyOnRight();
        if (onlyLeft.size() > 0) {
            err.append("MAP SHOULD HAVE INCLUDED: {");
            for (Map.Entry<Object, Object> entry : onlyLeft.entrySet()) {
                err.append("[" + entry.getKey() + "|" + entry.getValue() + "]");
            }
            err.append("} ");
        }

        final Map<Object, Object> onlyRight = d.entriesOnlyOnLeft();
        if (onlyRight.size() > 0) {
            err.append("MAP INCLUDED EXTRA: {");
            for (Map.Entry<Object, Object> entry : onlyRight.entrySet()) {
                err.append("[" + entry.getKey() + "|" + entry.getValue() + "]");
            }
            err.append("} ");
        }

        final Map<Object, ValueDifference<Object>> diff = d.entriesDiffering();
        if (diff.size() > 0) {
            for (Map.Entry<Object, ValueDifference<Object>> e : diff.entrySet()) {
                err.append("KEY {" + e.getKey() + "} HAD INCORRECT VALUE: {" + e.getValue().rightValue()
                        + "}, expected {" + e.getValue().leftValue() + "} ");
            }
        }

        if (err.length() > 0) {
            writeLineWithDate(err.toString());
            throw new AssertionError(err.toString());
        }
    }
}

From source file:org.apache.druid.indexing.materializedview.MaterializedViewSupervisor.java

/**
 * Find infomation about the intervals in which derived dataSource data should be rebuilt.
 * The infomation includes the version and DataSegments list of a interval.
 * The intervals include: in the interval,
 *  1) baseDataSource has data, but the derivedDataSource does not;
 *  2) version of derived segments isn't the max(created_date) of all base segments;
 *
 *  Drop the segments of the intervals in which derivedDataSource has data, but baseDataSource does not.
 *
 * @return the left part of Pair: interval -> version, and the right part: interval -> DataSegment list.
 *          Version and DataSegment list can be used to create HadoopIndexTask.
 *          Derived datasource data in all these intervals need to be rebuilt. 
 *//*  w ww  . java  2  s  .c o m*/
@VisibleForTesting
Pair<SortedMap<Interval, String>, Map<Interval, List<DataSegment>>> checkSegments() {
    // Pair<interval -> version, interval -> list<DataSegment>>
    Pair<Map<Interval, String>, Map<Interval, List<DataSegment>>> derivativeSegmentsSnapshot = getVersionAndBaseSegments(
            metadataStorageCoordinator.getUsedSegmentsForInterval(dataSource, ALL_INTERVAL));
    // Pair<interval -> max(created_date), interval -> list<DataSegment>>
    Pair<Map<Interval, String>, Map<Interval, List<DataSegment>>> baseSegmentsSnapshot = getMaxCreateDateAndBaseSegments(
            metadataStorageCoordinator.getUsedSegmentAndCreatedDateForInterval(spec.getBaseDataSource(),
                    ALL_INTERVAL));
    // baseSegments are used to create HadoopIndexTask
    Map<Interval, List<DataSegment>> baseSegments = baseSegmentsSnapshot.rhs;
    Map<Interval, List<DataSegment>> derivativeSegments = derivativeSegmentsSnapshot.rhs;
    // use max created_date of base segments as the version of derivative segments
    Map<Interval, String> maxCreatedDate = baseSegmentsSnapshot.lhs;
    Map<Interval, String> derivativeVersion = derivativeSegmentsSnapshot.lhs;
    SortedMap<Interval, String> sortedToBuildInterval = Maps
            .newTreeMap(Comparators.inverse(Comparators.intervalsByStartThenEnd()));
    // find the intervals to drop and to build
    MapDifference<Interval, String> difference = Maps.difference(maxCreatedDate, derivativeVersion);
    Map<Interval, String> toBuildInterval = Maps.newHashMap(difference.entriesOnlyOnLeft());
    Map<Interval, String> toDropInterval = Maps.newHashMap(difference.entriesOnlyOnRight());
    // if some intervals are in running tasks and the versions are the same, remove it from toBuildInterval
    // if some intervals are in running tasks, but the versions are different, stop the task. 
    for (Interval interval : runningVersion.keySet()) {
        if (toBuildInterval.containsKey(interval)
                && toBuildInterval.get(interval).equals(runningVersion.get(interval))) {
            toBuildInterval.remove(interval);

        } else if (toBuildInterval.containsKey(interval)
                && !toBuildInterval.get(interval).equals(runningVersion.get(interval))) {
            if (taskMaster.getTaskQueue().isPresent()) {
                taskMaster.getTaskQueue().get().shutdown(runningTasks.get(interval).getId());
                runningTasks.remove(interval);
            }
        }
    }
    // drop derivative segments which interval equals the interval in toDeleteBaseSegments 
    for (Interval interval : toDropInterval.keySet()) {
        for (DataSegment segment : derivativeSegments.get(interval)) {
            segmentManager.removeSegment(dataSource, segment.getIdentifier());
        }
    }
    // data of the latest interval will be built firstly.
    sortedToBuildInterval.putAll(toBuildInterval);
    return new Pair<>(sortedToBuildInterval, baseSegments);
}

From source file:co.cask.cdap.internal.app.store.DefaultStore.java

@Override
public List<ProgramSpecification> getDeletedProgramSpecifications(final Id.Application id,
        ApplicationSpecification appSpec) {

    ApplicationMeta existing = txnl// w  w w.j  a v a 2s.c om
            .executeUnchecked(new TransactionExecutor.Function<AppMds, ApplicationMeta>() {
                @Override
                public ApplicationMeta apply(AppMds mds) throws Exception {
                    return mds.apps.getApplication(id.getNamespaceId(), id.getId());
                }
            });

    List<ProgramSpecification> deletedProgramSpecs = Lists.newArrayList();

    if (existing != null) {
        ApplicationSpecification existingAppSpec = existing.getSpec();

        ImmutableMap<String, ProgramSpecification> existingSpec = new ImmutableMap.Builder<String, ProgramSpecification>()
                .putAll(existingAppSpec.getMapReduce()).putAll(existingAppSpec.getSpark())
                .putAll(existingAppSpec.getWorkflows()).putAll(existingAppSpec.getFlows())
                .putAll(existingAppSpec.getServices()).putAll(existingAppSpec.getWorkers()).build();

        ImmutableMap<String, ProgramSpecification> newSpec = new ImmutableMap.Builder<String, ProgramSpecification>()
                .putAll(appSpec.getMapReduce()).putAll(appSpec.getSpark()).putAll(appSpec.getWorkflows())
                .putAll(appSpec.getFlows()).putAll(appSpec.getServices()).putAll(appSpec.getWorkers()).build();

        MapDifference<String, ProgramSpecification> mapDiff = Maps.difference(existingSpec, newSpec);
        deletedProgramSpecs.addAll(mapDiff.entriesOnlyOnLeft().values());
    }

    return deletedProgramSpecs;
}

From source file:org.icgc.dcc.portal.repository.DonorRepository.java

private static List<Term> buildTermAggList(@NonNull final Terms termsFacet,
        @NonNull Map<String, Map<String, Integer>> baseline) {
    val results = ImmutableMap.<String, Integer>builder();

    // First we populate with the terms facets from the search response
    termsFacet.getBuckets().stream().forEach(entry -> {
        results.put(entry.getKey(), (int) entry.getDocCount());
    });/*from   w  ww .j a  v a 2  s.c  o m*/

    val facetName = termsFacet.getName();
    // Then augment the result in case of missing terms in the response.
    if (baseline.containsKey(facetName)) {
        val difference = Maps.difference(results.build(), baseline.get(facetName)).entriesOnlyOnRight();

        results.putAll(difference);
    }

    val termFacetList = transform(results.build().entrySet(),
            entry -> new Term(entry.getKey(), (long) entry.getValue()));

    return ImmutableList.copyOf(termFacetList);
}

From source file:gobblin.runtime.JobState.java

private void getTaskStateWithCommonAndSpecWuProps(int numTaskStates, DataInput in) throws IOException {
    Properties commonWuProps = new Properties();

    for (int i = 0; i < numTaskStates; i++) {
        TaskState taskState = new TaskState();
        taskState.readFields(in);/*  www . j a v a  2 s  .  c  o m*/
        if (i == 0) {
            commonWuProps.putAll(taskState.getWorkunit().getProperties());
        } else {
            Properties newCommonWuProps = new Properties();
            newCommonWuProps.putAll(
                    Maps.difference(commonWuProps, taskState.getWorkunit().getProperties()).entriesInCommon());
            commonWuProps = newCommonWuProps;
        }

        this.taskStates.put(taskState.getTaskId().intern(), taskState);
    }
    ImmutableProperties immutableCommonProperties = new ImmutableProperties(commonWuProps);
    for (TaskState taskState : this.taskStates.values()) {
        Properties newSpecProps = new Properties();
        newSpecProps.putAll(Maps.difference(immutableCommonProperties, taskState.getWorkunit().getProperties())
                .entriesOnlyOnRight());
        taskState.setWuProperties(immutableCommonProperties, newSpecProps);
    }
}

From source file:org.apache.cassandra.db.DefsTable.java

private static void mergeColumnFamilies(Map<DecoratedKey, ColumnFamily> old,
        Map<DecoratedKey, ColumnFamily> updated) throws ConfigurationException, IOException {
    // calculate the difference between old and new states (note that entriesOnlyLeft() will be always empty)
    MapDifference<DecoratedKey, ColumnFamily> diff = Maps.difference(old, updated);

    // check if any new Keyspaces with ColumnFamilies were added.
    for (Map.Entry<DecoratedKey, ColumnFamily> entry : diff.entriesOnlyOnRight().entrySet()) {
        ColumnFamily cfAttrs = entry.getValue();

        if (!cfAttrs.isEmpty()) {
            Map<String, CFMetaData> cfDefs = KSMetaData
                    .deserializeColumnFamilies(new Row(entry.getKey(), cfAttrs));

            for (CFMetaData cfDef : cfDefs.values())
                addColumnFamily(cfDef);/*from ww  w  . j a v  a  2s  . c  o  m*/
        }
    }

    // deal with modified ColumnFamilies (remember that all of the keyspace nested ColumnFamilies are put to the single row)
    Map<DecoratedKey, MapDifference.ValueDifference<ColumnFamily>> modifiedEntries = diff.entriesDiffering();

    for (DecoratedKey keyspace : modifiedEntries.keySet()) {
        MapDifference.ValueDifference<ColumnFamily> valueDiff = modifiedEntries.get(keyspace);

        ColumnFamily prevValue = valueDiff.leftValue(); // state before external modification
        ColumnFamily newValue = valueDiff.rightValue(); // updated state

        Row newRow = new Row(keyspace, newValue);

        if (prevValue.isEmpty()) // whole keyspace was deleted and now it's re-created
        {
            for (CFMetaData cfm : KSMetaData.deserializeColumnFamilies(newRow).values())
                addColumnFamily(cfm);
        } else if (newValue.isEmpty()) // whole keyspace is deleted
        {
            for (CFMetaData cfm : KSMetaData.deserializeColumnFamilies(new Row(keyspace, prevValue)).values())
                dropColumnFamily(cfm.ksName, cfm.cfName);
        } else // has modifications in the nested ColumnFamilies, need to perform nested diff to determine what was really changed
        {
            String ksName = AsciiType.instance.getString(keyspace.key);

            Map<String, CFMetaData> oldCfDefs = new HashMap<String, CFMetaData>();
            for (CFMetaData cfm : Schema.instance.getKSMetaData(ksName).cfMetaData().values())
                oldCfDefs.put(cfm.cfName, cfm);

            Map<String, CFMetaData> newCfDefs = KSMetaData.deserializeColumnFamilies(newRow);

            MapDifference<String, CFMetaData> cfDefDiff = Maps.difference(oldCfDefs, newCfDefs);

            for (CFMetaData cfDef : cfDefDiff.entriesOnlyOnRight().values())
                addColumnFamily(cfDef);

            for (CFMetaData cfDef : cfDefDiff.entriesOnlyOnLeft().values())
                dropColumnFamily(cfDef.ksName, cfDef.cfName);

            for (MapDifference.ValueDifference<CFMetaData> cfDef : cfDefDiff.entriesDiffering().values())
                updateColumnFamily(cfDef.rightValue());
        }
    }
}