Example usage for com.google.common.collect Sets difference

List of usage examples for com.google.common.collect Sets difference

Introduction

In this page you can find the example usage for com.google.common.collect Sets difference.

Prototype

public static <E> SetView<E> difference(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the difference of two sets.

Usage

From source file:io.v.impl.google.lib.discovery.DeviceCache.java

/**
 * Saves the set of advertisements for this deviceId and hash
 *
 * @param hash the hash provided by the advertisement.
 * @param advs the advertisements exposed by the device.
 * @param deviceId the id of the device.
 *///from w  w w . j  av a2  s  .c  o  m
public void saveDevice(long hash, Set<Advertisement> advs, String deviceId) {
    CacheEntry entry = new CacheEntry(advs, hash, deviceId);
    synchronized (this) {
        CacheEntry oldEntry = knownIds.get(deviceId);
        Set<Advertisement> oldValues = null;
        if (oldEntry != null) {
            cachedDevices.remove(oldEntry.hash);
            knownIds.remove(oldEntry.deviceId);
            oldValues = oldEntry.advertisements;
        } else {
            oldValues = new HashSet<>();
        }
        Set<Advertisement> removed = Sets.difference(oldValues, advs);
        for (Advertisement adv : removed) {
            UUID uuid = UUIDUtil.UuidToUUID(adv.getServiceUuid());
            adv.setLost(true);
            knownServices.remove(uuid, adv);
            handleUpdate(adv);
        }

        Set<Advertisement> added = Sets.difference(advs, oldValues);
        for (Advertisement adv : added) {
            UUID uuid = UUIDUtil.UuidToUUID(adv.getServiceUuid());
            knownServices.put(uuid, adv);
            handleUpdate(adv);
        }
        cachedDevices.put(hash, entry);
        CacheEntry oldDeviceEntry = knownIds.get(deviceId);
        if (oldDeviceEntry != null) {
            // Delete the old hash value.
            cachedDevices.remove(hash);
        }
        knownIds.put(deviceId, entry);
    }
}

From source file:com.google.devtools.cyclefinder.ReferenceGraph.java

private void addSubtypeEdges() {
    SetMultimap<String, String> subtypes = HashMultimap.create();
    for (ITypeBinding type : allTypes.values()) {
        collectSubtypes(type.getKey(), type, subtypes);
    }//from  w  ww .j  a  v  a2 s  . c o m
    for (String type : allTypes.keySet()) {
        for (Edge e : ImmutableList.copyOf(edges.get(type))) {
            Set<String> targetSubtypes = subtypes.get(e.getTarget().getKey());
            Set<String> whitelistKeys = Sets.newHashSet();
            IVariableBinding field = e.getField();
            for (String subtype : targetSubtypes) {
                ITypeBinding subtypeBinding = allTypes.get(subtype);
                if ((field != null && field.isField()
                        && whitelist.isWhitelistedTypeForField(field, subtypeBinding))
                        || whitelist.containsType(subtypeBinding)) {
                    whitelistKeys.add(subtype);
                    whitelistKeys.addAll(subtypes.get(subtype));
                }
            }
            for (String subtype : Sets.difference(targetSubtypes, whitelistKeys)) {
                addEdge(Edge.newSubtypeEdge(e, allTypes.get(subtype)));
            }
        }
    }
}

From source file:io.crate.analyze.relations.RelationSplitter.java

private void processOutputs() {
    FieldCollectingVisitor.Context context = new FieldCollectingVisitor.Context(specs.size());

    // declare all symbols from the remaining order by as required for query
    if (remainingOrderBy != null) {
        OrderBy orderBy = remainingOrderBy.orderBy();
        requiredForQuery.addAll(orderBy.orderBySymbols());
        // we need to add also the used symbols for query phase
        FieldCollectingVisitor.INSTANCE.process(orderBy.orderBySymbols(), context);
    }/*w  w  w  .  jav  a 2s  .co  m*/

    if (querySpec.where().hasQuery()) {
        FieldCollectingVisitor.INSTANCE.process(querySpec.where().query(), context);
    }

    // collect all fields from all join conditions
    FieldCollectingVisitor.INSTANCE.process(joinConditions, context);

    // set the limit and offset where possible
    Optional<Symbol> limit = querySpec.limit();
    if (limit.isPresent()) {
        Optional<Symbol> limitAndOffset = Limits.mergeAdd(limit, querySpec.offset());
        for (AnalyzedRelation rel : Sets.difference(specs.keySet(), context.fields.keySet())) {
            QuerySpec spec = specs.get(rel);
            spec.limit(limitAndOffset);
        }
    }

    // add all order by symbols to context outputs
    for (Map.Entry<AnalyzedRelation, QuerySpec> entry : specs.entrySet()) {
        if (entry.getValue().orderBy().isPresent()) {
            context.fields.putAll(entry.getKey(), entry.getValue().orderBy().get().orderBySymbols());
        }
    }

    // everything except the actual outputs is required for query
    requiredForQuery.addAll(context.fields.values());

    // capture items from the outputs
    canBeFetched = FetchFieldExtractor.process(querySpec.outputs(), context.fields);

    FieldCollectingVisitor.INSTANCE.process(querySpec.outputs(), context);

    // generate the outputs of the subSpecs
    for (Map.Entry<AnalyzedRelation, QuerySpec> entry : specs.entrySet()) {
        Collection<Symbol> fields = context.fields.get(entry.getKey());
        assert entry.getValue().outputs() == null;
        entry.getValue().outputs(new ArrayList<>(fields));
    }
}

From source file:org.apache.bookkeeper.stream.storage.impl.sc.DefaultStorageContainerController.java

@Override
public ClusterAssignmentData computeIdealState(ClusterMetadata clusterMetadata,
        ClusterAssignmentData currentState, Set<BookieSocketAddress> currentCluster) {

    if (currentCluster.isEmpty()) {
        log.info("Current cluster is empty. No alive server is found.");
        return currentState;
    }//from  w  ww  .j a v  a  2  s .  co m

    // 1. get current server assignments
    Map<BookieSocketAddress, Set<Long>> currentServerAssignments;
    try {
        currentServerAssignments = currentState.getServersMap().entrySet().stream()
                .collect(Collectors.toMap(e1 -> {
                    try {
                        return new BookieSocketAddress(e1.getKey());
                    } catch (UnknownHostException uhe) {
                        log.error("Invalid cluster ");
                        throw new UncheckedExecutionException(
                                "Invalid server found in current assignment map" + e1.getKey(), uhe);
                    }
                }, e2 -> e2.getValue().getContainersList().stream().collect(Collectors.toSet())));
    } catch (UncheckedExecutionException uee) {
        log.warn("Invalid cluster assignment data is found : {} - {}. Recompute assignment from empty state",
                currentState, uee.getCause().getMessage());
        currentServerAssignments = Maps.newHashMap();
    }
    Set<BookieSocketAddress> currentServersAssigned = currentServerAssignments.keySet();

    // 2. if no servers is assigned, initialize the ideal state
    if (currentServersAssigned.isEmpty()) {
        return initializeIdealState(clusterMetadata, currentCluster);
    }

    // 3. get the cluster diffs
    Set<BookieSocketAddress> serversAdded = Sets.difference(currentCluster, currentServersAssigned)
            .immutableCopy();
    Set<BookieSocketAddress> serversRemoved = Sets.difference(currentServersAssigned, currentCluster)
            .immutableCopy();

    if (serversAdded.isEmpty() && serversRemoved.isEmpty()) {
        // cluster is unchanged, assuming the current state is ideal, no re-assignment is required.
        return currentState;
    }

    log.info(
            "Storage container controller detects cluster changed:\n"
                    + "\t {} servers added: {}\n\t {} servers removed: {}",
            serversAdded.size(), serversAdded, serversRemoved.size(), serversRemoved);

    // 4. compute the containers that owned by servers removed. these containers are needed to be reassigned.
    Set<Long> containersToReassign = currentServerAssignments.entrySet().stream()
            .filter(serverEntry -> !currentCluster.contains(serverEntry.getKey()))
            .flatMap(serverEntry -> serverEntry.getValue().stream()).collect(Collectors.toSet());

    // 5. use an ordered set as priority deque to sort the servers by the number of assigned containers
    TreeSet<Pair<BookieSocketAddress, LinkedList<Long>>> assignmentQueue = new TreeSet<>(
            new ServerAssignmentDataComparator());
    for (Map.Entry<BookieSocketAddress, Set<Long>> entry : currentServerAssignments.entrySet()) {
        BookieSocketAddress host = entry.getKey();

        if (!currentCluster.contains(host)) {
            if (log.isTraceEnabled()) {
                log.trace("Host {} is not in current cluster anymore", host);
            }
            continue;
        } else {
            if (log.isTraceEnabled()) {
                log.trace("Adding host {} to assignment queue", host);
            }
            assignmentQueue.add(Pair.of(host, Lists.newLinkedList(entry.getValue())));
        }
    }

    // 6. add new servers
    for (BookieSocketAddress server : serversAdded) {
        assignmentQueue.add(Pair.of(server, Lists.newLinkedList()));
    }

    // 7. assign the containers that are needed to be reassigned.
    for (Long containerId : containersToReassign) {
        Pair<BookieSocketAddress, LinkedList<Long>> leastLoadedServer = assignmentQueue.pollFirst();
        leastLoadedServer.getValue().add(containerId);
        assignmentQueue.add(leastLoadedServer);
    }

    // 8. rebalance the containers if needed
    int diffAllowed;
    if (assignmentQueue.size() > clusterMetadata.getNumStorageContainers()) {
        diffAllowed = 1;
    } else {
        diffAllowed = clusterMetadata.getNumStorageContainers() % assignmentQueue.size() == 0 ? 0 : 1;
    }

    Pair<BookieSocketAddress, LinkedList<Long>> leastLoaded = assignmentQueue.first();
    Pair<BookieSocketAddress, LinkedList<Long>> mostLoaded = assignmentQueue.last();
    while (mostLoaded.getValue().size() - leastLoaded.getValue().size() > diffAllowed) {
        leastLoaded = assignmentQueue.pollFirst();
        mostLoaded = assignmentQueue.pollLast();

        // move container from mostLoaded to leastLoaded
        Long containerId = mostLoaded.getValue().removeFirst();
        // add the container to the end to avoid balancing this container again.
        leastLoaded.getValue().addLast(containerId);

        assignmentQueue.add(leastLoaded);
        assignmentQueue.add(mostLoaded);

        leastLoaded = assignmentQueue.first();
        mostLoaded = assignmentQueue.last();
    }

    // 9. the new ideal state is computed, finalize it
    Map<String, ServerAssignmentData> newAssignmentMap = Maps.newHashMap();
    assignmentQueue.forEach(assignment -> newAssignmentMap.put(assignment.getKey().toString(),
            ServerAssignmentData.newBuilder().addAllContainers(assignment.getValue()).build()));
    return ClusterAssignmentData.newBuilder().putAllServers(newAssignmentMap).build();
}

From source file:cz.cuni.mff.ms.brodecva.botnicek.ide.design.system.model.DefaultSystemGraph.java

private static void checkForDependingArcs(final Arc removed, final Map<EnterNode, Set<RecurentArc>> references,
        final Node from, final Node newFrom) throws IllegalArgumentException {
    final Set<RecurentArc> referring = references.get(from);

    if (Presence.isAbsent(referring) || newFrom.equals(from)) {
        return;//www .j  ava  2  s.c o  m
    }

    final Set<RecurentArc> referringWithoutRemoved = Sets.difference(referring, ImmutableSet.of(removed));

    final boolean refersOnlyToItself = referringWithoutRemoved.isEmpty();
    if (refersOnlyToItself) {
        return;
    }

    final Network fromNetwork = from.getNetwork();
    final RecurentArc firstReferring = referringWithoutRemoved.iterator().next();
    final Network firstReferringNetwork = firstReferring.getNetwork();

    throw new IllegalArgumentException(ExceptionLocalizer.print("ArcRemovalForbidden", from.getName(),
            removed.getName(), fromNetwork.getName().getText(), firstReferring.getName(),
            firstReferringNetwork.getName().getText()));
}

From source file:com.opengamma.bbg.referencedata.cache.AbstractInvalidFieldCachingReferenceDataProvider.java

/**
 * Examines and groups the request using the known invalid fields.
 * /*w w  w  . j av a2  s  . c o  m*/
 * @param request  the request, not null
 * @param invalidFieldsByIdentifier  the invalid fields, keyed by identifier, not null
 * @return the map of field-set to identifier-set, not null
 */
protected Map<Set<String>, Set<String>> buildUnderlyingRequestGroups(ReferenceDataProviderGetRequest request,
        Map<String, Set<String>> invalidFieldsByIdentifier) {
    Map<Set<String>, Set<String>> result = Maps.newHashMap();
    for (String identifier : request.getIdentifiers()) {
        // select known invalid fields for the identifier
        Set<String> invalidFields = invalidFieldsByIdentifier.get(identifier);

        // calculate the missing fields that must be queried from the underlying
        Set<String> missingFields = null;
        if (invalidFields == null) {
            missingFields = Sets.newHashSet(request.getFields());
        } else {
            missingFields = Sets.difference(request.getFields(), invalidFields);
        }

        // build the grouped result map, keyed from field-set to identifier-set
        Set<String> resultIdentifiers = result.get(missingFields);
        if (resultIdentifiers == null) {
            resultIdentifiers = Sets.newTreeSet();
            result.put(missingFields, resultIdentifiers);
        }
        resultIdentifiers.add(identifier);
    }
    return result;
}

From source file:org.eclipse.sirius.business.internal.movida.registry.StatusUpdater.java

private void checkAllActualDependenciesAreAvailable(Entry entry) {
    Set<URI> actualPhysical = ImmutableSet
            .copyOf(Iterables.transform(entry.getActualDependencies(), new Function<URI, URI>() {
                @Override/*  ww w.j a v a 2s.  c o  m*/
                public URI apply(URI from) {
                    return resourceSet.getURIConverter().normalize(from);
                }
            }));
    Set<URI> availablePhysical = ImmutableSet
            .copyOf(Iterables.transform(entries.values(), new Function<Entry, URI>() {
                @Override
                public URI apply(Entry from) {
                    return from.getResource().getURI();
                };
            }));
    Set<URI> unavailable = Sets.difference(actualPhysical, availablePhysical);
    if (!unavailable.isEmpty()) {
        entry.setState(ViewpointState.INVALID);
        Object[] data = Iterables.toArray(Iterables.transform(unavailable, Functions.toStringFunction()),
                String.class);
        addDiagnostic(entry, Diagnostic.ERROR, PHYSICAL_DEPENDENCY_UNAVAILABLE,
                "Sirius definition depends on resources not available.", data); //$NON-NLS-1$
    }
}

From source file:org.sakaiproject.nakamura.user.counts.ContentCountChangeListener.java

public void handleEvent(Event event) {
    Session adminSession = null;//from w  w  w .  ja  v a  2  s . co  m
    try {
        adminSession = repository.loginAdministrative();
        ContentManager contentManager = adminSession.getContentManager();
        LOG.debug("handleEvent() " + dumpEvent(event));
        // The members of a group are defined in the membership, so simply use that value, no need to increment or decrement.
        String path = (String) event.getProperty(StoreListener.PATH_PROPERTY);
        Content content = contentManager.get(path);
        @SuppressWarnings("unchecked")
        Map<String, Object> beforeEvent = (Map<String, Object>) event
                .getProperty(StoreListener.BEFORE_EVENT_PROPERTY);
        // content will be null when listening to DELETE topic as it has been deleted before reaching here
        String resourceType = null;
        if (content != null) {
            resourceType = content.hasProperty("sling:resourceType")
                    ? (String) content.getProperty("sling:resourceType")
                    : null;
        } else if (beforeEvent != null) {
            resourceType = beforeEvent.containsKey("sling:resourceType")
                    ? (String) beforeEvent.get("sling:resourceType")
                    : null;
        }
        if ("sakai/pooled-content".equals(resourceType)) {
            // this either is or was a content node.
            if (beforeEvent != null && content != null) {
                Set<String> before = Sets.newHashSet();
                before.addAll(ImmutableList.copyOf(StorageClientUtils
                        .nonNullStringArray((String[]) beforeEvent.get("sakai:pooled-content-viewer"))));
                before.addAll(ImmutableList.copyOf(StorageClientUtils
                        .nonNullStringArray((String[]) beforeEvent.get("sakai:pooled-content-manager"))));
                Set<String> after = Sets.newHashSet(StorageClientUtils
                        .nonNullStringArray((String[]) content.getProperty("sakai:pooled-content-viewer")));
                after.addAll(ImmutableList.copyOf(StorageClientUtils
                        .nonNullStringArray((String[]) content.getProperty("sakai:pooled-content-manager"))));
                before = Sets.difference(before, CountProvider.IGNORE_AUTHIDS);
                after = Sets.difference(after, CountProvider.IGNORE_AUTHIDS);
                Set<String> removed = Sets.difference(before, after);
                Set<String> added = Sets.difference(after, before);
                LOG.info("Path:{} Before:{} After:{} Added:{} Removed:{} ",
                        new Object[] { path, before, after, added, removed });
                // Only increase count if content upload has completed (KERN-2041)
                if (content.getProperty("sakai:pooled-content-file-name") != null) {
                    for (String userId : added) {
                        if (!CountProvider.IGNORE_AUTHIDS.contains(userId)) {
                            inc(userId, UserConstants.CONTENT_ITEMS_PROP);
                        }
                    }
                }
                for (String userId : removed) {
                    if (!CountProvider.IGNORE_AUTHIDS.contains(userId)) {
                        dec(userId, UserConstants.CONTENT_ITEMS_PROP);
                    }
                }
            } // we're in a DELETE topic where content is null because it has been deleted already and removed is just the users in the beforeEvent
            else if ("org/sakaiproject/nakamura/lite/content/DELETE".equals(event.getTopic())
                    && beforeEvent != null) {
                Set<String> removed = Sets.newHashSet(StorageClientUtils
                        .nonNullStringArray((String[]) beforeEvent.get("sakai:pooled-content-viewer")));
                removed.addAll(ImmutableList.copyOf(StorageClientUtils
                        .nonNullStringArray((String[]) beforeEvent.get("sakai:pooled-content-manager"))));
                for (String userId : removed) {
                    if (!CountProvider.IGNORE_AUTHIDS.contains(userId)) {
                        dec(userId, UserConstants.CONTENT_ITEMS_PROP);
                    }
                }
            }
        }
    } catch (StorageClientException e) {
        LOG.debug("Failed to update count ", e);
    } catch (AccessDeniedException e) {
        LOG.debug("Failed to update count ", e);
    }
}

From source file:com.google.errorprone.bugpatterns.MissingTestCall.java

@Override
public Description matchMethod(MethodTree tree, VisitorState state) {
    if (!JUnitMatchers.TEST_CASE.matches(tree, state)) {
        return NO_MATCH;
    }/*from   www.  j  a va2 s.  co  m*/
    Set<MethodPairing> required = new HashSet<>();
    Set<MethodPairing> called = new HashSet<>();
    new TreePathScanner<Void, Void>() {
        @Override
        public Void visitMethodInvocation(MethodInvocationTree node, Void unused) {
            for (MethodPairing pairing : PAIRINGS) {
                if (pairing.ifCall().matches(node, state)) {
                    if (!isField(getUltimateReceiver(node))
                            || isLastStatementInBlock(state.findPathToEnclosing(StatementTree.class))) {
                        required.add(pairing);
                    }
                }
                if (pairing.mustCall().matches(node, state)) {
                    called.add(pairing);
                }
            }
            return super.visitMethodInvocation(node, null);
        }
    }.scan(state.getPath(), null);
    return Sets.difference(required, called).stream().findFirst()
            .map(p -> buildDescription(tree).setMessage(
                    String.format("%s requires a terminating method call to have any effect.", p.name()))
                    .build())
            .orElse(NO_MATCH);
}

From source file:tiger.NewScopeCalculator.java

public List<String> initialize() {
    allScopes.putAll(getExplicitScopes());

    for (NewBindingKey key : bindingsRequired) {
        if (!allScopes.containsKey(key)) {
            calculateInternal(key, Lists.<NewBindingKey>newArrayList());
        }//from   w ww .ja va  2 s. com
    }

    if (!allScopes.keySet().containsAll(bindingsRequired)) {
        errors.add(
                String.format("Scope of required keys not calculated.\nDiff: %s\nRequired: %s\nCalculated: %s",
                        Sets.difference(bindingsRequired, allScopes.keySet()), bindingsRequired, allScopes));
    }

    verifyScopes();

    if (errors.isEmpty()) {
        initialized = true;
    }

    //    messager.printMessage(Kind.NOTE, String.format("%s all scopes:", TAG));
    //    for (Map.Entry<NewBindingKey, NewScopeCalculatingInfo> entry : allScopes.entrySet()) {
    //      messager.printMessage(Kind.NOTE,
    //          String.format("%s: %s -> %s", TAG, entry.getKey(), entry.getValue()));
    //    }

    return errors;
}