Example usage for com.google.common.collect SetMultimap get

List of usage examples for com.google.common.collect SetMultimap get

Introduction

In this page you can find the example usage for com.google.common.collect SetMultimap get.

Prototype

@Override
Set<V> get(@Nullable K key);

Source Link

Document

Because a SetMultimap has unique values for a given key, this method returns a Set , instead of the java.util.Collection specified in the Multimap interface.

Usage

From source file:org.apache.brooklyn.feed.jmx.JmxFeed.java

@Override
protected void preStart() {
    /*//  ww w.  ja  v  a2  s .  c o m
     * All actions on the JmxHelper are done async (through the poller's threading) so we don't 
     * block on start/rebind if the entity is unreachable 
     * (without this we get a 120s pause in JmxHelper.connect restarting)
     */
    final SetMultimap<NotificationFilter, JmxNotificationSubscriptionConfig<?>> notificationSubscriptions = getConfig(
            NOTIFICATION_SUBSCRIPTIONS);
    final SetMultimap<List<?>, JmxOperationPollConfig<?>> operationPolls = getConfig(OPERATION_POLLS);
    final SetMultimap<String, JmxAttributePollConfig<?>> attributePolls = getConfig(ATTRIBUTE_POLLS);

    getPoller().submit(new Callable<Void>() {
        public Void call() {
            getHelper().connect(getConfig(JMX_CONNECTION_TIMEOUT));
            return null;
        }

        @Override
        public String toString() {
            return "Connect JMX " + getHelper().getUrl();
        }
    });

    for (final NotificationFilter filter : notificationSubscriptions.keySet()) {
        getPoller().submit(new Callable<Void>() {
            public Void call() {
                // TODO Could config.getObjectName have wildcards? Is this code safe?
                Set<JmxNotificationSubscriptionConfig<?>> configs = notificationSubscriptions.get(filter);
                NotificationListener listener = registerNotificationListener(configs);
                ObjectName objectName = Iterables.get(configs, 0).getObjectName();
                notificationListeners.put(objectName, listener);
                return null;
            }

            @Override
            public String toString() {
                return "Register JMX notifications: " + notificationSubscriptions.get(filter);
            }
        });
    }

    // Setup polling of sensors
    for (final String jmxAttributeName : attributePolls.keys()) {
        registerAttributePoller(attributePolls.get(jmxAttributeName));
    }

    // Setup polling of operations
    for (final List<?> operationIdentifier : operationPolls.keys()) {
        registerOperationPoller(operationPolls.get(operationIdentifier));
    }
}

From source file:brooklyn.event.feed.jmx.JmxFeed.java

@Override
protected void preStart() {
    /*/*ww w .j  av  a2 s . c  o  m*/
     * All actions on the JmxHelper are done async (through the poller's threading) so we don't 
     * block on start for a long time (e.g. if the entity is not contactable and doing a rebind 
     * on restart of brooklyn). Without that, one gets a 120 second pause with it stuck in a 
     * stack trace like:
     * 
     *      at brooklyn.event.feed.jmx.JmxHelper.sleep(JmxHelper.java:640)
     *      at brooklyn.event.feed.jmx.JmxHelper.connect(JmxHelper.java:320)
     *      at brooklyn.event.feed.jmx.JmxFeed.preStart(JmxFeed.java:172)
     *      at brooklyn.event.feed.AbstractFeed.start(AbstractFeed.java:68)
     *      at brooklyn.event.feed.jmx.JmxFeed$Builder.build(JmxFeed.java:119)
     *      at brooklyn.entity.java.JavaAppUtils.connectMXBeanSensors(JavaAppUtils.java:109)
     *      at brooklyn.entity.java.VanillaJavaApp.connectSensors(VanillaJavaApp.java:97)
     *      at brooklyn.entity.basic.SoftwareProcessImpl.callRebindHooks(SoftwareProcessImpl.java:189)
     *      at brooklyn.entity.basic.SoftwareProcessImpl.rebind(SoftwareProcessImpl.java:235)
     *      ...
     *      at brooklyn.entity.rebind.RebindManagerImpl.rebind(RebindManagerImpl.java:184)
     */
    final SetMultimap<NotificationFilter, JmxNotificationSubscriptionConfig<?>> notificationSubscriptions = getConfig(
            NOTIFICATION_SUBSCRIPTIONS);
    final SetMultimap<List<?>, JmxOperationPollConfig<?>> operationPolls = getConfig(OPERATION_POLLS);
    final SetMultimap<String, JmxAttributePollConfig<?>> attributePolls = getConfig(ATTRIBUTE_POLLS);

    getPoller().submit(new Callable<Void>() {
        public Void call() {
            getHelper().connect(getConfig(JMX_CONNECTION_TIMEOUT));
            return null;
        }

        @Override
        public String toString() {
            return "Connect JMX " + getHelper().getUrl();
        }
    });

    for (final NotificationFilter filter : notificationSubscriptions.keySet()) {
        getPoller().submit(new Callable<Void>() {
            public Void call() {
                // TODO Could config.getObjectName have wildcards? Is this code safe?
                Set<JmxNotificationSubscriptionConfig<?>> configs = notificationSubscriptions.get(filter);
                NotificationListener listener = registerNotificationListener(configs);
                ObjectName objectName = Iterables.get(configs, 0).getObjectName();
                notificationListeners.put(objectName, listener);
                return null;
            }

            @Override
            public String toString() {
                return "Register JMX notifications: " + notificationSubscriptions.get(filter);
            }
        });
    }

    // Setup polling of sensors
    for (final String jmxAttributeName : attributePolls.keys()) {
        registerAttributePoller(attributePolls.get(jmxAttributeName));
    }

    // Setup polling of operations
    for (final List<?> operationIdentifier : operationPolls.keys()) {
        registerOperationPoller(operationPolls.get(operationIdentifier));
    }
}

From source file:org.xbib.tools.merge.zdb.licenseinfo.timeline.WithHoldingsAndLicensesInTimelinePipeline.java

private void indexManifestation(Manifestation m, Set<String> visited) throws IOException {
    String id = m.externalID();/*from w w  w .ja v  a 2s . com*/
    // protection against recursion (should not happen)
    if (visited.contains(id)) {
        return;
    }
    visited.add(id);
    // make sure at other threads that we do never index a manifestation twice
    if (service.indexed().contains(id)) {
        return;
    }
    service.indexed().add(id);

    // index this manifestation
    String tag = service.settings().get("tag");
    XContentBuilder builder = jsonBuilder();
    String docid = m.build(builder, tag, null);
    if (docid != null) {
        service.ingest().index(manifestationsIndex, manifestationsIndexType, docid, builder.string());
    }
    // volumes by date and the services for them
    Integer volumeHoldingsCount = 0;
    if (!m.getVolumesByDate().isEmpty()) {
        SetMultimap<Integer, Holding> volumesByDate;
        synchronized (m.getVolumesByDate()) {
            volumesByDate = ImmutableSetMultimap.copyOf(m.getVolumesByDate());
        }
        for (Integer date : volumesByDate.keySet()) {
            String identifier = (tag != null ? tag + "." : "") + m.externalID()
                    + (date != -1 ? "." + date : "");
            Set<Holding> holdings = volumesByDate.get(date);
            if (holdings != null && !holdings.isEmpty()) {
                builder = jsonBuilder();
                docid = m.buildHoldingsByDate(builder, tag, m.externalID(), date, holdings);
                if (docid != null) {
                    service.ingest().index(volumesIndex, volumesIndexType, identifier, builder.string());
                    serviceMetric.mark(1);
                    if (logger.isDebugEnabled()) {
                        logger.debug("indexed volume identifier {}, date {}", docid, date);
                    }
                    volumeHoldingsCount++;
                }
            }
        }
    }
    // holdings (list of institutions)
    if (!m.getVolumesByHolder().isEmpty()) {
        SetMultimap<String, Holding> holdings;
        synchronized (m.getVolumesByHolder()) {
            holdings = ImmutableSetMultimap.copyOf(m.getVolumesByHolder());
        }
        builder = jsonBuilder();
        builder.startObject().startArray("holdings");
        for (String holder : holdings.keySet()) {
            docid = m.buildHoldingsByISIL(builder, tag, m.externalID(), holder, holdings.get(holder));
        }
        builder.endArray().endObject();
        if (docid != null) {
            service.ingest().index(holdingsIndex, holdingsIndexType, docid, builder.string());
        }
        volumeHoldingsCount++;
        serviceMetric.mark(holdings.size());
        if (logger.isDebugEnabled()) {
            logger.debug("indexed {} holdings for {}", holdings.size(), docid);
        }
        if (volumeHoldingsCount == 0) {
            logger.warn("no volumes/holdings indexed for {}", m.externalID());
        }
    }
    // index related manifestations
    if (!m.getRelatedManifestations().isEmpty()) {
        SetMultimap<String, Manifestation> rels;
        synchronized (m.getRelatedManifestations()) {
            rels = ImmutableSetMultimap.copyOf(m.getRelatedManifestations());
        }
        for (String rel : rels.keys()) {
            for (Manifestation mm : rels.get(rel)) {
                indexManifestation(mm, visited);
            }
        }
    }
}

From source file:org.xbib.tools.merge.zdb.entities.Manifestation.java

public String buildHoldingsByDate(XContentBuilder builder, String tag, String parentIdentifier, Integer date,
        Set<Holding> holdings) throws IOException {
    String id = tag != null ? tag + "." + parentIdentifier : parentIdentifier;
    builder.startObject().field("@id", id).field("@type", "DateHoldings").fieldIfNotNull("@tag", tag);
    if (date != -1) {
        builder.field("date", date);
    }/*from  w w w.jav a2s.c  om*/
    if (hasLinks()) {
        builder.field("links", getLinks());
    }
    SetMultimap<String, Holding> institutions = HashMultimap.create();
    for (Holding holding : unique(holdings)) {
        institutions.put(holding.getISIL(), holding);
    }
    builder.field("institutioncount", institutions.size()).startArray("institution");
    List<XContentBuilder> instBuilders = newLinkedList();
    for (String institution : institutions.keySet()) {
        Set<Holding> holdingsPerInstitution = institutions.get(institution);
        XContentBuilder institutionBuilder = jsonBuilder();
        Holding h = holdingsPerInstitution.iterator().next();
        institutionBuilder.startObject().field("@id", institution).field("region", h.getRegion())
                .field("organization", h.getOrganization()).field("servicecount", holdingsPerInstitution.size())
                .startArray("service");
        List<XContentBuilder> list = newLinkedList();
        for (Holding holding : holdingsPerInstitution) {
            XContentBuilder serviceBuilder = jsonBuilder();
            serviceBuilder.startObject().field("@id", holding.identifier()).field("@type", "Service")
                    .startArray("@parent");
            for (Manifestation m : holding.getManifestations()) {
                serviceBuilder.value(m.externalID());
            }
            serviceBuilder.endArray().field("mediatype", holding.mediaType())
                    .field("carriertype", holding.carrierType()).field("region", holding.getRegion())
                    .field("organization", holding.getOrganization()).field("isil", institution)
                    .field("serviceisil", holding.getServiceISIL()).field("priority", holding.getPriority())
                    .fieldIfNotNull("type", holding.getServiceType())
                    .fieldIfNotNull("mode", holding.getServiceMode())
                    .fieldIfNotNull("distribution", holding.getServiceDistribution())
                    .fieldIfNotNull("comment", holding.getServiceComment()).field("info", holding.getInfo())
                    .endObject();
            serviceBuilder.close();
            list.add(serviceBuilder);
            map.put(holding.identifier(), serviceBuilder);
        }
        institutionBuilder.copy(list);
        institutionBuilder.endArray().endObject();
        institutionBuilder.close();
        instBuilders.add(institutionBuilder);
    }
    builder.copy(instBuilders);
    builder.endArray().endObject();
    return id;
}

From source file:com.facebook.buck.config.BuckConfig.java

/**
 * In a {@link BuckConfig}, an alias can either refer to a fully-qualified build target, or an
 * alias defined earlier in the {@code alias} section. The mapping produced by this method
 * reflects the result of resolving all aliases as values in the {@code alias} section.
 */// www.  ja  v  a  2s . co m
private ImmutableSetMultimap<String, BuildTarget> createAliasToBuildTargetMap(
        ImmutableMap<String, String> rawAliasMap) {
    // We use a LinkedHashMap rather than an ImmutableMap.Builder because we want both (1) order to
    // be preserved, and (2) the ability to inspect the Map while building it up.
    SetMultimap<String, BuildTarget> aliasToBuildTarget = LinkedHashMultimap.create();
    for (Map.Entry<String, String> aliasEntry : rawAliasMap.entrySet()) {
        String alias = aliasEntry.getKey();
        validateAliasName(alias);

        // Determine whether the mapping is to a build target or to an alias.
        List<String> values = Splitter.on(' ').splitToList(aliasEntry.getValue());
        for (String value : values) {
            Set<BuildTarget> buildTargets;
            if (isValidAliasName(value)) {
                buildTargets = aliasToBuildTarget.get(value);
                if (buildTargets.isEmpty()) {
                    throw new HumanReadableException("No alias for: %s.", value);
                }
            } else if (value.isEmpty()) {
                continue;
            } else {
                // Here we parse the alias values with a BuildTargetParser to be strict. We could be
                // looser and just grab everything between "//" and ":" and assume it's a valid base path.
                buildTargets = ImmutableSet.of(BuildTargetParser.INSTANCE.parse(value,
                        BuildTargetPatternParser.fullyQualified(), getCellPathResolver()));
            }
            aliasToBuildTarget.putAll(alias, buildTargets);
        }
    }
    return ImmutableSetMultimap.copyOf(aliasToBuildTarget);
}

From source file:org.jakstab.transformation.VpcCfgReconstruction.java

/**
 * Fold ART into a map from VPC locations to sets of abstract states, and
 * then flatten the state sets into single abstract states by joining.
 * //from ww w  .  ja  va  2  s. c  om
 * @return a map from VPC locations to the join of all abstract states at 
 * that VPC location
 */
private Map<Location, AbstractState> flattenArtOntoVpcLocations() {

    SetMultimap<Location, AbstractState> vpcSensitiveReached = HashMultimap.create();

    Deque<AbstractState> worklist = new LinkedList<AbstractState>();
    worklist.add(art.getRoot());
    Set<AbstractState> visited = new HashSet<AbstractState>();
    visited.add(art.getRoot());

    while (!worklist.isEmpty()) {
        AbstractState headState = worklist.removeFirst();
        if (isVpcStateBot(headState))
            continue;

        BasedNumberElement vpcVal = getVPC(headState);
        VpcLocation headVpcLoc = new VpcLocation(vpcVal, (RTLLabel) headState.getLocation());

        vpcSensitiveReached.put(headVpcLoc, headState);

        Set<Pair<CFAEdge, AbstractState>> successors = art.getChildren(headState);
        for (Pair<CFAEdge, AbstractState> sPair : successors) {
            AbstractState nextState = sPair.getRight();

            if (!visited.contains(nextState)) {
                visited.add(nextState);
                worklist.add(nextState);
            }
        }
    }

    Map<Location, AbstractState> constants = new HashMap<Location, AbstractState>();
    for (Location l : vpcSensitiveReached.keySet()) {
        constants.put(l, Lattices.joinAll(vpcSensitiveReached.get(l)));
    }

    return constants;
}

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.arp.ArpTasker.java

/**
 * Uses ARP to get MAC for the given L3 endpoint. Tries to find MAC for IP from
 * {@link EndpointL3#getKey()}.<br>
 * {@link EndpointL3#getNetworkContainment()} has to point to a {@link Subnet}.<br>
 * ARP Request is sent from all node connectors obtaining from
 * {@link OfOverlayNodeConfig#getExternalInterfaces()}<br>
 * MAC address obtained from ARP reply is added to the given L3 endpoint (if still exits).<br>
 * Also an {@link Endpoint} is created based on MAC If the subnet from network containment point
 * to {@link L2BridgeDomain} directly or throught {@link L2FloodDomain}.
 *
 * @param l3Ep the L3 endpoint which needs to have an MAC address
 *//*from w  w w. j  ava  2 s .  c  o  m*/
public void addMacForL3EpAndCreateEp(final EndpointL3 l3Ep) {
    final Ipv4Address tpa = getIPv4Addresses(l3Ep);
    if (tpa == null) {
        LOG.debug("L3 endpoint {} does not contain IPv4 address.", l3Ep.getKey());
        return;
    }
    ReadOnlyTransaction rTx = dataProvider.newReadOnlyTransaction();
    final SetMultimap<Node, Pair<InstanceIdentifier<NodeConnector>, MacAddress>> extNcWithMacByNode = readNodesWithExternalIfaces(
            rTx);
    if (extNcWithMacByNode.isEmpty()) {
        LOG.debug("No node with external interface was found.");
        rTx.close();
        return;
    }
    final Ipv4Address senderIpAddress = createSenderIpAddress(l3Ep, rTx);
    if (senderIpAddress == null) {
        LOG.warn("Cannot create sender IPv4 address for L3 endpoint {}", l3Ep);
        rTx.close();
        return;
    }
    rTx.close();

    for (final Node node : extNcWithMacByNode.keySet()) {
        final InstanceIdentifier<Node> nodeIid = InstanceIdentifier.builder(Nodes.class)
                .child(Node.class, node.getKey()).build();
        final NodeRef nodeRef = new NodeRef(nodeIid);
        List<ListenableFuture<RpcResult<AddFlowOutput>>> arpFlowResultFutures = new ArrayList<>();
        List<Pair<RemoveFlowInput, EndpointL3Key>> flowsForRemove = new ArrayList<>();
        for (final Pair<InstanceIdentifier<NodeConnector>, MacAddress> extNcIidAndMac : extNcWithMacByNode
                .get(node)) {
            final ArpMessageAddress senderAddress = new ArpMessageAddress(extNcIidAndMac.getRight(),
                    senderIpAddress);
            NodeConnectorId ncId = extNcIidAndMac.getLeft()
                    .firstKeyOf(NodeConnector.class, NodeConnectorKey.class).getId();
            final Flow arpReplyToControllerFlow = createArpReplyToControllerFlow(senderAddress, tpa, ncId);
            flowsForRemove.add(new ImmutablePair<>(
                    new RemoveFlowInputBuilder(arpReplyToControllerFlow).setNode(nodeRef).build(),
                    l3Ep.getKey()));
            final InstanceIdentifier<Flow> flowIid = createFlowIid(arpReplyToControllerFlow, nodeIid);
            Future<RpcResult<AddFlowOutput>> futureAddFlowResult = flowService
                    .addFlow(new AddFlowInputBuilder(arpReplyToControllerFlow).setFlowRef(new FlowRef(flowIid))
                            .setNode(nodeRef).build());
            arpFlowResultFutures.add(JdkFutureAdapters.listenInPoolThread(futureAddFlowResult));
        }
        requestInfoByKey.putAll(createKey(node.getId(), tpa), flowsForRemove);
        ListenableFuture<List<RpcResult<AddFlowOutput>>> futureArpFlowResults = Futures
                .allAsList(arpFlowResultFutures);
        Futures.addCallback(futureArpFlowResults, new FutureCallback<List<RpcResult<AddFlowOutput>>>() {

            @Override
            public void onSuccess(List<RpcResult<AddFlowOutput>> result) {
                for (RpcResult<AddFlowOutput> addFlowResult : result) {
                    if (!addFlowResult.isSuccessful()) {
                        LOG.warn("An ARP Reply to Controller flow was not created on node {} \nErrors: {}",
                                node.getId().getValue(), addFlowResult.getErrors());
                        continue;
                    }
                }
                LOG.debug("ARP Reply to Controller flows were created on node {}", node.getId().getValue());
                for (final Pair<InstanceIdentifier<NodeConnector>, MacAddress> extNcIidAndMac : extNcWithMacByNode
                        .get(node)) {
                    final ArpMessageAddress senderAddress = new ArpMessageAddress(extNcIidAndMac.getRight(),
                            senderIpAddress);
                    ListenableFuture<RpcResult<Void>> futureSendArpResult = arpSender.sendArp(senderAddress,
                            tpa, extNcIidAndMac.getLeft());
                    Futures.addCallback(futureSendArpResult, logResult(tpa, extNcIidAndMac.getLeft()));
                }
            }

            @Override
            public void onFailure(Throwable t) {
                LOG.error(
                        "Illegal state - Installation of ARP flows on node {} failed. Node can contain just some ARP flows.",
                        node.getId(), t);
            }
        });
    }
}

From source file:org.xbib.tools.merge.zdb.licenseinfo.WithHoldingsAndLicensesPipeline.java

private void indexManifestation(Manifestation m, Set<String> visited) throws IOException {
    String id = m.externalID();/*from  w  w  w. j  a va 2 s.  co m*/
    // protection against recursion (should not happen)
    if (visited.contains(id)) {
        return;
    }
    visited.add(id);
    // make sure at other threads that we do never index a manifestation twice
    if (service.indexed().contains(id)) {
        return;
    }
    service.indexed().add(id);
    String tag = service.settings().get("tag");

    // first, index related volumes (conference/proceedings/abstracts/...)
    List<String> vids = newArrayList();
    if (!m.getVolumes().isEmpty()) {
        final ImmutableList<Volume> volumes;
        synchronized (m.getVolumes()) {
            volumes = ImmutableList.copyOf(m.getVolumes());
        }
        for (Volume volume : volumes) {
            XContentBuilder builder = jsonBuilder();
            String vid = volume.build(builder, tag, null);
            service.ingest().index(manifestationsIndex, manifestationsIndexType, vid, builder.string());
            vids.add(vid);
            for (VolumeHolding volumeHolding : volume.getHoldings()) {
                builder = jsonBuilder();
                vid = volumeHolding.build(builder, tag);
                // by holding
                service.ingest().index(holdingsIndex, holdingsIndexType, vid, builder.string());
                // extra entry by date
                service.ingest().index(dateHoldingsIndex, dateHoldingsIndexType,
                        vid + "." + volumeHolding.dates().get(0), builder.string());
            }
            int n = 1 + 2 * volume.getHoldings().size();
            service.indexMetric().mark(n);
        }
        int n = m.getVolumes().size();
        service.indexMetric().mark(n);
    }
    m.addVolumeIDs(vids);

    // index this manifestation
    XContentBuilder builder = jsonBuilder();
    String docid = m.build(builder, tag, null);
    service.ingest().index(manifestationsIndex, manifestationsIndexType, docid, builder.string());
    service.indexMetric().mark(1);
    // holdings by date and the services for them
    if (!m.getVolumesByDate().isEmpty()) {
        SetMultimap<Integer, Holding> volumesByDate;
        synchronized (m.getVolumesByDate()) {
            volumesByDate = ImmutableSetMultimap.copyOf(m.getVolumesByDate());
        }
        for (Integer date : volumesByDate.keySet()) {
            String identifier = (tag != null ? tag + "." : "") + m.externalID()
                    + (date != -1 ? "." + date : "");
            Set<Holding> holdings = volumesByDate.get(date);
            if (holdings != null && !holdings.isEmpty()) {
                builder = jsonBuilder();
                docid = m.buildHoldingsByDate(builder, tag, m.externalID(), date, holdings);
                service.ingest().index(dateHoldingsIndex, dateHoldingsIndexType, identifier, builder.string());
                service.indexMetric().mark(1);
                logger.debug("indexed volume {} date {}", docid, date);
            }
        }
    }
    // holdings (list of institutions)
    if (!m.getVolumesByHolder().isEmpty()) {
        final SetMultimap<String, Holding> holdings;
        synchronized (m.getVolumesByHolder()) {
            holdings = ImmutableSetMultimap.copyOf(m.getVolumesByHolder());
        }
        builder = jsonBuilder();
        builder.startObject().startArray("holdings");
        for (String holder : holdings.keySet()) {
            docid = m.buildHoldingsByISIL(builder, tag, m.externalID(), holder, holdings.get(holder));
        }
        builder.endArray().endObject();
        service.ingest().index(holdingsIndex, holdingsIndexType, docid, builder.string());
        service.indexMetric().mark(1);
        logger.debug("indexed {} holdings for {}", holdings.size(), docid);
    }
    // index related manifestations
    if (!m.getRelatedManifestations().isEmpty()) {
        SetMultimap<String, Manifestation> rels;
        synchronized (m.getRelatedManifestations()) {
            rels = ImmutableSetMultimap.copyOf(m.getRelatedManifestations());
        }
        for (String rel : rels.keys()) {
            for (Manifestation mm : rels.get(rel)) {
                indexManifestation(mm, visited);
            }
        }
    }
}

From source file:org.carrot2.output.metrics.NormalizedMutualInformationMetric.java

public void calculate() {
    final int partitionCount = getPartitionsCount(documents);
    if (partitionCount == 0) {
        return;//w ww  .  j av a2  s .c  om
    }

    if (clusters.size() == 0) {
        return;
    }

    final Set<Object> partitions = getPartitions(documents);
    final SetMultimap<Object, Document> documentsByPartition = getDocumentsByPartition(documents);
    final Map<Object, Integer> documentCountByPartition = getDocumentCountByPartition(documents);
    final int documentCount = documents.size();

    if (partitions.size() <= 1) {
        normalizedMutualInformation = 0.0;
        return;
    }

    final Collection<Integer> partitionSizes = Maps
            .transformValues(documentsByPartition.asMap(), new Function<Collection<Document>, Integer>() {
                public Integer apply(Collection<Document> documents) {
                    return documents.size();
                }
            }).values();
    double partitionEntropy = entropy(documentCount,
            partitionSizes.toArray(new Integer[partitionSizes.size()]));

    final List<Integer> clusterSizes = Lists.transform(clusters, new Function<Cluster, Integer>() {
        public Integer apply(Cluster cluster) {
            return cluster.size();
        }
    });
    double clusterEntropy = entropy(documentCount, clusterSizes.toArray(new Integer[clusterSizes.size()]));

    double mutualInformation = 0;
    for (Cluster cluster : this.clusters) {
        final int clusterSize = cluster.size();
        for (Object partition : partitions) {
            final List<Document> clusterDocuments = cluster.getAllDocuments();
            if (cluster.isOtherTopics() || clusterDocuments.size() == 0) {
                continue;
            }

            final Set<Document> commonDocuments = Sets.newHashSet(documentsByPartition.get(partition));
            commonDocuments.retainAll(clusterDocuments);
            int commonDocumentsCount = commonDocuments.size();

            if (commonDocumentsCount != 0) {
                mutualInformation += (commonDocumentsCount / (double) documentCount)
                        * Math.log(documentCount * commonDocumentsCount
                                / (double) (clusterSize * documentCountByPartition.get(partition)));
            }
        }
    }

    normalizedMutualInformation = mutualInformation / ((clusterEntropy + partitionEntropy) / 2);
}

From source file:grakn.core.graql.reasoner.atom.binary.RelationAtom.java

@Override
public Set<String> validateAsRuleBody(Label ruleLabel) {
    Set<String> errors = new HashSet<>();
    SchemaConcept type = getSchemaConcept();
    if (type != null && !type.isRelationType()) {
        errors.add(ErrorMessage.VALIDATION_RULE_INVALID_RELATION_TYPE.getMessage(ruleLabel, type.label()));
        return errors;
    }/*from  www  . j  a  v  a 2  s.co m*/

    //check role-type compatibility
    SetMultimap<Variable, Type> varTypeMap = getParentQuery().getVarTypeMap();
    for (Map.Entry<Role, Collection<Variable>> e : getRoleVarMap().asMap().entrySet()) {
        Role role = e.getKey();
        if (!Schema.MetaSchema.isMetaLabel(role.label())) {
            //check whether this role can be played in this relation
            if (type != null && type.asRelationType().roles().noneMatch(r -> r.equals(role))) {
                errors.add(ErrorMessage.VALIDATION_RULE_ROLE_CANNOT_BE_PLAYED.getMessage(ruleLabel,
                        role.label(), type.label()));
            }

            //check whether the role player's type allows playing this role
            for (Variable player : e.getValue()) {
                varTypeMap.get(player).stream()
                        .filter(playerType -> playerType.playing().noneMatch(plays -> plays.equals(role)))
                        .forEach(playerType -> errors
                                .add(ErrorMessage.VALIDATION_RULE_TYPE_CANNOT_PLAY_ROLE.getMessage(ruleLabel,
                                        playerType.label(), role.label(), type == null ? "" : type.label())));
            }
        }
    }
    return errors;
}