Example usage for com.google.common.collect Sets intersection

List of usage examples for com.google.common.collect Sets intersection

Introduction

In this page you can find the example usage for com.google.common.collect Sets intersection.

Prototype

public static <E> SetView<E> intersection(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the intersection of two sets.

Usage

From source file:grakn.core.graql.reasoner.plan.QueryCollectionBase.java

/**
 * @param entryQuery query for which candidates are to be determined
 * @param plan current plan//from ww w .jav  a  2 s . c  om
 * @return set of candidate queries for this query
 */
public QuerySet getCandidates(ReasonerQueryImpl entryQuery, QueryList plan) {
    Equivalence.Wrapper<ReasonerQueryImpl> query = equality().wrap(entryQuery);
    Set<Equivalence.Wrapper<ReasonerQueryImpl>> availableQueries = this.wrappedStream()
            .filter(q -> !(plan.contains(q) || q.equals(query))).collect(Collectors.toSet());

    Set<Equivalence.Wrapper<ReasonerQueryImpl>> availableImmediateNeighbours = this
            .getImmediateNeighbours(query).stream().filter(availableQueries::contains)
            .collect(Collectors.toSet());

    Set<Variable> subbedVars = plan.stream().flatMap(q -> q.getVarNames().stream()).collect(Collectors.toSet());

    Set<Equivalence.Wrapper<ReasonerQueryImpl>> availableImmediateNeighboursFromSubs = availableQueries.stream()
            .map(Equivalence.Wrapper::get).filter(Objects::nonNull)
            .filter(q -> !Sets.intersection(q.getVarNames(), subbedVars).isEmpty()).map(q -> equality().wrap(q))
            .collect(Collectors.toSet());

    return QuerySet.create(this.isQueryDisconnected(query) ? availableQueries
            : this.isQueryReachable(query, availableQueries)
                    ? Sets.union(availableImmediateNeighbours, availableImmediateNeighboursFromSubs)
                    : availableQueries);
}

From source file:org.caleydo.view.crossword.api.model.TypedSet.java

private static int and(BitSetSet a, Set<Integer> b) {
    if (b instanceof BitSetSet) {
        BitSet clone = (BitSet) a.getBitSet().clone();
        clone.and(((BitSetSet) b).getBitSet());
        return clone.cardinality();
    }/*from   ww w.ja va  2s  . co  m*/
    return Sets.intersection(b, a).size(); // as the predicate is: in the second argument
}

From source file:com.eucalyptus.node.Nodes.java

public static void updateNodeInfo(ServiceConfiguration ccConfig, List<NodeType> nodes) {
    ConcurrentNavigableMap<String, NodeInfo> clusterNodeMap = Clusters.lookup(ccConfig).getNodeMap();
    /** prepare key sets for comparison **/
    Set<String> knownTags = Sets.newHashSet(clusterNodeMap.keySet());
    Set<String> reportedTags = Sets.newHashSet();
    for (final NodeType node : nodes) {
        reportedTags.add(node.getServiceTag());
    }//from   www  .j a  va2 s  .c o m
    /** compute intersections and differences **/
    Set<String> unreportedTags = Sets.difference(knownTags, reportedTags);
    Set<String> newTags = Sets.difference(reportedTags, knownTags);
    Set<String> stillKnownTags = Sets.intersection(knownTags, reportedTags);
    StringBuilder nodeLog = new StringBuilder();
    /** maybe remove unreported nodes **/
    for (String unreportedTag : unreportedTags) {
        NodeInfo unreportedNode = clusterNodeMap.get(unreportedTag);
        if (unreportedNode != null && (System.currentTimeMillis()
                - unreportedNode.getLastSeen().getTime()) > Nodes.REFRESH_TIMEOUT) {
            Topology.destroy(Components.lookup(NodeController.class).lookup(unreportedNode.getName()));
            NodeInfo removed = clusterNodeMap.remove(unreportedTag);
            nodeLog.append("GONE:").append(removed.getName()).append(":").append(removed.getLastState())
                    .append(" ");
        }
    }
    /** add new nodes or updated existing node infos **/
    Set<NodeInfo> nodesToUpdate = Sets.newHashSet();
    for (final NodeType node : nodes) {
        try {
            String serviceTag = node.getServiceTag();
            if (newTags.contains(serviceTag)) {
                clusterNodeMap.putIfAbsent(serviceTag, new NodeInfo(ccConfig.getPartition(), node));
                NodeInfo nodeInfo = clusterNodeMap.get(serviceTag);
                nodeLog.append("NEW:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState())
                        .append(" ");
                nodesToUpdate.add(nodeInfo);
            } else if (stillKnownTags.contains(serviceTag)) {
                NodeInfo nodeInfo = clusterNodeMap.get(serviceTag);
                nodeInfo.setIqn(node.getIqn());
                nodeLog.append("OLD:").append(nodeInfo.getName()).append(":").append(nodeInfo.getLastState())
                        .append(" ");
                nodesToUpdate.add(nodeInfo);
            }
        } catch (NoSuchElementException e) {
            LOG.error(e);
            LOG.debug(e, e);
        }
    }
    LOG.debug("Updated node info map: " + nodeLog.toString());
    try {
        Nodes.updateServiceConfiguration(ccConfig, nodesToUpdate);
    } catch (Exception e) {
        if (!Component.State.ENABLED.apply(ccConfig))
            LOG.debug("Error while updating nodes: " + e.getMessage(), e);
    }
    /**
     * TODO:GRZE: if not present emulate {@link ClusterController.NodeController} using
     * {@link Component#setup()} TODO:GRZE: emulate update of emulate
     * {@link ClusterController.NodeController} state
     * TODO:GRZE: {@link Component#destroy()} for the NodeControllers which are not reported by the
     * CC.
     */

}

From source file:org.openqa.selenium.remote.server.NewSessionPayload.java

private void validate(Sources sources) {
    if (!sources.getDialects().contains(Dialect.W3C)) {
        return; // Nothing to do
    }/*from   w w  w.  j a  v a 2 s.c  o  m*/

    // Ensure that the W3C payload looks okay
    Map<String, Object> alwaysMatch = sources.getAlwaysMatch().get();
    validateSpecCompliance(alwaysMatch);

    Set<String> duplicateKeys = sources.getFirstMatch().stream().map(Supplier::get)
            .peek(this::validateSpecCompliance)
            .map(fragment -> Sets.intersection(alwaysMatch.keySet(), fragment.keySet()))
            .flatMap(Collection::stream).collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));

    if (!duplicateKeys.isEmpty()) {
        throw new IllegalArgumentException(
                "W3C payload contained keys duplicated between the firstMatch and alwaysMatch items: "
                        + duplicateKeys);
    }
}

From source file:com.google.gerrit.elasticsearch.ElasticChangeIndex.java

@Override
public ChangeDataSource getSource(Predicate<ChangeData> p, QueryOptions opts) throws QueryParseException {
    Set<Change.Status> statuses = ChangeIndexRewriter.getPossibleStatus(p);
    List<String> indexes = Lists.newArrayListWithCapacity(2);
    if (!Sets.intersection(statuses, OPEN_STATUSES).isEmpty()) {
        indexes.add(OPEN_CHANGES);//from   w  w  w .ja  v  a  2  s . c  o  m
    }
    if (!Sets.intersection(statuses, CLOSED_STATUSES).isEmpty()) {
        indexes.add(CLOSED_CHANGES);
    }
    return new QuerySource(indexes, p, opts);
}

From source file:de.kussm.chain.ChainLayouter.java

/**
 * May the current chain link be placed here?
 *
 * @return//from   w ww .ja  v a2 s. co  m
 */
private boolean canPlaceChainLink() {
    switch (currentChainLink) {
    case TRANSMITTER:
        return isTransmitterAllowed.test(currentPosition)
                && Sets.intersection(currentPosition.neighbours(), placedReceivers).isEmpty();
    case RECEIVER:
        return isReceiverAllowed.test(currentPosition);
    // && Sets.intersection(currentPosition.neighbours(), placedTransmitters).isEmpty();
    case CONDITIONAL:
        return prevDirection == nextDirection;
    default:
        return true;
    }
}

From source file:com.b2international.index.revision.DefaultRevisionIndex.java

private RevisionCompare compare(final RevisionBranch base, final RevisionBranch compare, final int limit) {
    return index.read(searcher -> {
        final Set<Integer> commonPath = Sets.intersection(compare.segments(), base.segments());
        final Set<Integer> segmentsToCompare = Sets.difference(compare.segments(), base.segments());
        final RevisionBranch baseOfCompareBranch = new RevisionBranch(base.path(),
                Ordering.natural().max(commonPath), commonPath);

        final Set<Class<? extends Revision>> typesToCompare = getRevisionTypes();
        final Builder result = RevisionCompare.builder(DefaultRevisionIndex.this, baseOfCompareBranch, compare,
                limit);/*from   w w w.  jav a 2  s.  c  om*/

        LongSet newOrChangedKeys = PrimitiveSets.newLongOpenHashSet();
        LongKeyMap<String> newOrChangedHashes = PrimitiveMaps.newLongKeyOpenHashMap();
        LongSet deletedOrChangedKeys = PrimitiveSets.newLongOpenHashSet();
        // Don't need to keep track of deleted-or-changed hashes

        // query all registered revision types for new, changed and deleted components
        for (Class<? extends Revision> type : typesToCompare) {

            // The current storage key-hash pairs from the "compare" segments
            final Query<String[]> newOrChangedQuery = Query.select(String[].class).from(type)
                    .fields(Revision.STORAGE_KEY, DocumentMapping._HASH)
                    .where(Revision.branchSegmentFilter(segmentsToCompare)).scroll(SCROLL_KEEP_ALIVE)
                    .limit(SCROLL_LIMIT).build();

            for (final Hits<String[]> newOrChangedHits : searcher.scroll(newOrChangedQuery)) {

                newOrChangedKeys.clear();
                newOrChangedHashes.clear();

                for (final String[] newOrChangedHit : newOrChangedHits) {
                    final long storageKey = Long.parseLong(newOrChangedHit[0]);
                    final String hash = newOrChangedHit[1];
                    newOrChangedKeys.add(storageKey);
                    if (hash != null) {
                        newOrChangedHashes.put(storageKey, hash);
                    }
                }

                /* 
                 * Create "dependent sub-query": try to find the same IDs in the "base" segments, which 
                 * will be either changed or "same" revisions from a compare point of view 
                 * (in case of a matching content hash value)
                 */
                final Query<String[]> changedOrSameQuery = Query.select(String[].class).from(type)
                        .fields(Revision.STORAGE_KEY, DocumentMapping._HASH)
                        .where(Expressions.builder()
                                .filter(matchAnyLong(Revision.STORAGE_KEY, LongSets.toList(newOrChangedKeys)))
                                .filter(matchAnyInt(Revision.SEGMENT_ID, commonPath))
                                .filter(matchAnyInt(Revision.REPLACED_INS, segmentsToCompare)).build())
                        .scroll(SCROLL_KEEP_ALIVE).limit(SCROLL_LIMIT).build();

                for (Hits<String[]> changedOrSameHits : searcher.scroll(changedOrSameQuery)) {
                    for (final String[] changedOrSameHit : changedOrSameHits) {
                        final long storageKey = Long.parseLong(changedOrSameHit[0]);
                        final String hash = changedOrSameHit[1];

                        // CHANGED, unless the hashes tell us otherwise
                        if (hash == null || !newOrChangedHashes.containsKey(storageKey)
                                || !Objects.equals(newOrChangedHashes.get(storageKey), hash)) {

                            result.changedRevision(type, storageKey);
                        }

                        // Remove this storage key from newOrChanged, it is decidedly changed-or-same
                        newOrChangedKeys.remove(storageKey);
                        newOrChangedHashes.remove(storageKey);
                    }

                } // changedOrSameHits

                // Everything remaining in newOrChanged is NEW, as it had no previous revision in the common segments
                for (LongIterator itr = newOrChangedKeys.iterator(); itr.hasNext(); /* empty */) {
                    result.newRevision(type, itr.next());
                }

            } // newOrChangedHits

            // Revisions which existed on "base", but where replaced by another revision on "compare" segments
            final Query<String[]> deletedOrChangedQuery = Query.select(String[].class).from(type)
                    .fields(Revision.STORAGE_KEY)
                    .where(Expressions.builder().filter(matchAnyInt(Revision.SEGMENT_ID, commonPath))
                            .filter(matchAnyInt(Revision.REPLACED_INS, segmentsToCompare)).build())
                    .scroll(SCROLL_KEEP_ALIVE).limit(SCROLL_LIMIT).build();

            for (Hits<String[]> deletedOrChangedHits : searcher.scroll(deletedOrChangedQuery)) {

                deletedOrChangedKeys.clear();

                for (String[] deletedOrChanged : deletedOrChangedHits) {
                    final long storageKey = Long.parseLong(deletedOrChanged[0]);
                    deletedOrChangedKeys.add(storageKey);
                }

                /* 
                 * Create "dependent sub-query": try to find the same IDs in the "compare" segments,
                 * if they are present, the revision is definitely not deleted
                 */
                final Query<String[]> changedOrSameQuery = Query.select(String[].class).from(type)
                        .fields(Revision.STORAGE_KEY)
                        .where(Expressions.builder()
                                .filter(matchAnyLong(Revision.STORAGE_KEY,
                                        LongSets.toList(deletedOrChangedKeys)))
                                .filter(Revision.branchSegmentFilter(segmentsToCompare)).build())
                        .scroll(SCROLL_KEEP_ALIVE).limit(SCROLL_LIMIT).build();

                for (Hits<String[]> changedOrSameHits : searcher.scroll(changedOrSameQuery)) {
                    for (final String[] changedOrSameHit : changedOrSameHits) {
                        final long storageKey = Long.parseLong(changedOrSameHit[0]);

                        // Remove this storage key from deletedOrChanged, it is decidedly still existing
                        deletedOrChangedKeys.remove(storageKey);
                    }
                }

                // Everything remaining in deletedOrChanged is DELETED, as it had successor in the "compare" segments
                for (LongIterator itr = deletedOrChangedKeys.iterator(); itr.hasNext(); /* empty */) {
                    result.deletedRevision(type, itr.next());
                }

            } // deletedOrChangedHits

        } // type

        return result.build();
    });
}

From source file:edu.cmu.lti.oaqa.baseqa.answer_type.AnswerTypeClassifierTrainer.java

@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
    super.collectionProcessComplete();
    if (cvPredictFile != null) {
        try (BufferedWriter bw = Files.newWriter(new File(cvPredictFile), Charsets.UTF_8)) {
            Set<Double> f1s = new HashSet<>();
            List<List<String>> results = classifier.crossTrainPredictMultiLabel(trainX, trainY, RESAMPLE_TYPE,
                    limit);//  w w  w .j a  va2s.c  om
            for (int i = 0; i < qids.size(); i++) {
                String qid = qids.get(i);
                List<String> predLabels = results.get(i);
                // calculate f1
                Set<String> gsLabels = qid2labels.get(qid);
                f1s.add(2.0 * Sets.intersection(gsLabels, ImmutableSet.copyOf(predLabels)).size()
                        / (gsLabels.size() + predLabels.size()));
                // write to file
                bw.write(qid + "\t" + predLabels.stream().collect(joining(";")) + "\n");
            }
            f1s.stream().mapToDouble(Double::doubleValue).average()
                    .ifPresent(f1 -> LOG.info("Micro F1: {}", f1));
            bw.close();
        } catch (IOException e) {
            throw new AnalysisEngineProcessException(e);
        }
    }
    LOG.info("Train Classifier");
    // changed CV to false, as a "micro f1" will be calculated if the cvPredictFile is specifie
    classifier.trainMultiLabel(trainX, trainY, RESAMPLE_TYPE, false);
}

From source file:com.opengamma.bbg.loader.BloombergHistoricalTimeSeriesLoader.java

/**
 * Finds those time-series that are not in the master.
 * /*from ww  w.  j av  a  2 s.  c  om*/
 * @param externalIds  the identifiers to lookup, not null
 * @param dataProvider  the data provider, not null
 * @param dataField  the data field, not null
 * @param result  the result map of identifiers, updated if already in database, not null
 * @return the missing identifiers, not null
 */
protected Set<ExternalId> findTimeSeries(Set<ExternalId> externalIds, String dataProvider, String dataField,
        Map<ExternalId, UniqueId> result) {

    HistoricalTimeSeriesInfoSearchRequest searchRequest = new HistoricalTimeSeriesInfoSearchRequest();
    searchRequest.addExternalIds(externalIds);
    searchRequest.setDataField(dataField);
    searchRequest.setDataProvider(dataProvider);
    searchRequest.setDataSource(BLOOMBERG_DATA_SOURCE_NAME);
    HistoricalTimeSeriesInfoSearchResult searchResult = _htsMaster.search(searchRequest);

    Set<ExternalId> missing = new HashSet<ExternalId>(externalIds);
    for (HistoricalTimeSeriesInfoDocument doc : searchResult.getDocuments()) {
        Set<ExternalId> intersection = Sets
                .intersection(doc.getInfo().getExternalIdBundle().toBundle().getExternalIds(), externalIds)
                .immutableCopy();
        if (intersection.size() == 1) {
            ExternalId identifier = intersection.iterator().next();
            missing.remove(identifier);
            result.put(identifier, doc.getUniqueId());
        } else {
            throw new OpenGammaRuntimeException(
                    "Unable to match single identifier: " + doc.getInfo().getExternalIdBundle());
        }
    }
    return missing;
}

From source file:edu.mit.streamjit.impl.distributed.node.BlobsManagerImpl.java

private ImmutableMap<Token, Buffer> createBufferMap(Set<Blob> blobSet) {
    ImmutableMap.Builder<Token, Buffer> bufferMapBuilder = ImmutableMap.<Token, Buffer>builder();

    Map<Token, Integer> minInputBufCapaciy = new HashMap<>();
    Map<Token, Integer> minOutputBufCapaciy = new HashMap<>();

    for (Blob b : blobSet) {
        Set<Blob.Token> inputs = b.getInputs();
        for (Token t : inputs) {
            minInputBufCapaciy.put(t, b.getMinimumBufferCapacity(t));
        }/*from  ww w  . j a v a  2s.  c  o  m*/

        Set<Blob.Token> outputs = b.getOutputs();
        for (Token t : outputs) {
            minOutputBufCapaciy.put(t, b.getMinimumBufferCapacity(t));
        }
    }

    Set<Token> localTokens = Sets.intersection(minInputBufCapaciy.keySet(), minOutputBufCapaciy.keySet());
    Set<Token> globalInputTokens = Sets.difference(minInputBufCapaciy.keySet(), localTokens);
    Set<Token> globalOutputTokens = Sets.difference(minOutputBufCapaciy.keySet(), localTokens);

    for (Token t : localTokens) {
        int bufSize = lcm(minInputBufCapaciy.get(t), minOutputBufCapaciy.get(t));
        addBuffer(t, bufSize, bufferMapBuilder);
    }

    for (Token t : globalInputTokens) {
        int bufSize = minInputBufCapaciy.get(t);
        addBuffer(t, bufSize, bufferMapBuilder);
    }

    for (Token t : globalOutputTokens) {
        int bufSize = minOutputBufCapaciy.get(t);
        addBuffer(t, bufSize, bufferMapBuilder);
    }
    return bufferMapBuilder.build();
}