Example usage for com.google.common.collect Sets union

List of usage examples for com.google.common.collect Sets union

Introduction

In this page you can find the example usage for com.google.common.collect Sets union.

Prototype

public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2) 

Source Link

Document

Returns an unmodifiable view of the union of two sets.

Usage

From source file:org.sosy_lab.cpachecker.cpa.livevar.DeclarationCollectingVisitor.java

@Override
public Set<ASimpleDeclaration> visit(AArraySubscriptExpression exp) throws RuntimeException {
    return Sets.union(accept0(exp.getArrayExpression()), accept0(exp.getSubscriptExpression()));
}

From source file:org.onosproject.net.flow.impl.FlowRuleDriverProvider.java

@Override
public void executeBatch(FlowRuleBatchOperation batch) {
    ImmutableList.Builder<FlowRule> toAdd = ImmutableList.builder();
    ImmutableList.Builder<FlowRule> toRemove = ImmutableList.builder();
    for (FlowRuleBatchEntry fbe : batch.getOperations()) {
        if (fbe.operator() == ADD || fbe.operator() == MODIFY) {
            toAdd.add(fbe.target());//from ww w .ja  va2s.  co  m
        } else if (fbe.operator() == REMOVE) {
            toRemove.add(fbe.target());
        }
    }

    ImmutableList<FlowRule> rulesToAdd = toAdd.build();
    ImmutableList<FlowRule> rulesToRemove = toRemove.build();

    Collection<FlowRule> added = applyFlowRules(batch.deviceId(), rulesToAdd);
    Collection<FlowRule> removed = removeFlowRules(batch.deviceId(), rulesToRemove);

    Set<FlowRule> failedRules = Sets.union(Sets.difference(copyOf(rulesToAdd), copyOf(added)),
            Sets.difference(copyOf(rulesToRemove), copyOf(removed)));
    CompletedBatchOperation status = new CompletedBatchOperation(failedRules.isEmpty(), failedRules,
            batch.deviceId());
    providerService.batchOperationCompleted(batch.id(), status);
}

From source file:com.digitalpetri.opcua.stack.core.serialization.DelegateRegistry.java

private static void loadGeneratedClasses(ClassLoader classLoader) throws IOException, ClassNotFoundException {
    ClassPath classPath = ClassPath.from(classLoader);

    ImmutableSet<ClassInfo> structures = classPath
            .getTopLevelClasses("com.digitalpetri.opcua.stack.core.types.structured");

    ImmutableSet<ClassInfo> enumerations = classPath
            .getTopLevelClasses("com.digitalpetri.opcua.stack.core.types.enumerated");

    for (ClassInfo classInfo : Sets.union(structures, enumerations)) {
        Class<?> clazz = classInfo.load();
        Class.forName(clazz.getName(), true, classLoader);
    }/*from  w  w w  .  ja v  a 2  s .  c  o m*/
}

From source file:io.bazel.rules.closure.worker.ErrorReporter.java

private void finish() {
    if (suppress.isPresent()) {
        Set<String> superfluous = Sets.difference(suppress.get(), Sets.union(suppressed, NEVER_SUPERFLUOUS));
        if (!superfluous.isEmpty()) {
            report(SUPERFLUOUS_SUPPRESS_ERROR, "Superfluous suppress codes: " + joinWords(superfluous));
        }// ww  w  .  j av  a  2  s  . c  o  m
    }
}

From source file:co.cask.cdap.etl.planner.ConnectorDag.java

/**
 * Insert connector nodes into the dag.//from   ww w.  j a  v  a 2s . co  m
 *
 * A connector node is a boundary at which the pipeline can be split into sub dags.
 * It is treated as a sink within one subdag and as a source in another subdag.
 * A connector is inserted in front of a reduce node (aggregator plugin type, etc)
 * when there is a path from some source to one or more reduce nodes or sinks.
 * This is required because in a single mapper, we can't write to both a sink and do a reduce.
 * We also can't have 2 reducers in a single mapreduce job.
 * A connector is also inserted in front of any node if the inputs into the node come from multiple sources.
 * A connector is also inserted in front of a reduce node that has another reduce node as its input.
 *
 * After splitting, the result will be a collection of subdags, with each subdag representing a single
 * mapreduce job (or possibly map-only job). Or in spark, each subdag would be a series of operations from
 * one rdd to another rdd.
 *
 * @return the nodes that had connectors inserted in front of them
 */
public Set<String> insertConnectors() {
    // none of this is particularly efficient, but this should never be a bottleneck
    // unless we're dealing with very very large dags

    Set<String> addedAlready = new HashSet<>();

    /*
        Isolate the specified node by inserting a connector in front of and behind the node.
        If all inputs into the the node are sources, a connector will not be inserted in front.
        If all outputs from the node are sinks, a connector will not be inserted after.
        Other connectors count as both a source and a sink.
     */
    for (String isolationNode : isolationNodes) {
        isolate(isolationNode, addedAlready);
    }

    /*
        Find sections of the dag where a source is writing to both a sink and a reduce node
        or to multiple reduce nodes. a connector counts as both a source and a sink.
            
        for example, if a source is writing to both a sink and a reduce:
            
            |---> sink1
          source ---|
            |---> reduce ---> sink2
            
        we need to split this up into:
            
            |---> sink1
          source ---|                    =>     connector ---> reduce ---> sink2
            |---> connector
            
        The same logic applies if a source is writing to multiple reduce nodes. So if we run into this scenario,
        we will add a connector in front of all reduce nodes accessible from the source.
        When trying to find a path from a source to multiple reduce nodes, we also need to stop searching
        once we see a reduce node or a connector. Otherwise, every single reduce node would end up
        with a connector in front of it.
     */
    for (String node : getTopologicalOrder()) {
        if (!sources.contains(node) && !connectors.contains(node)) {
            continue;
        }

        Set<String> accessibleByNode = accessibleFrom(node, Sets.union(connectors, reduceNodes));
        Set<String> sinksAndReduceNodes = Sets.intersection(accessibleByNode,
                Sets.union(connectors, Sets.union(sinks, reduceNodes)));
        // don't count this node
        sinksAndReduceNodes = Sets.difference(sinksAndReduceNodes, ImmutableSet.of(node));

        if (sinksAndReduceNodes.size() > 1) {
            for (String reduceNodeConnector : Sets.intersection(sinksAndReduceNodes, reduceNodes)) {
                addConnectorInFrontOf(reduceNodeConnector, addedAlready);
            }
        }
    }

    /*
        Find nodes that have input from multiple sources and add them to the connectors set.
        We can probably remove this part once we support multiple sources. Even though we don't support
        multiple sources today, the fact that we support forks means we have to deal with the multi-input case
        and break it down into separate phases. For example:
            
        |---> reduce1 ---|
          n1 ---|                |---> n2
        |---> reduce2 ---|
            
        From the previous section, both reduces will get a connector inserted in front:
            
        |---> reduce1.connector               reduce1.connector ---> reduce1 ---|
          n1 ---|                              =>                                       |---> n2
        |---> reduce2.connector               reduce2.connector ---> reduce2 ---|
            
        Since we don't support multi-input yet, we need to convert that further into 3 phases:
            
          reduce1.connector ---> reduce1 ---> n2.connector
                                                            =>       sink.connector ---> n2
          reduce2.connector ---> reduce2 ---> n2.connector
            
        To find these nodes, we traverse the graph in order and keep track of sources that have a path to each node
        with a map of node -> [ sources that have a path to the node ]
        if we find that a node is accessible by more than one source, we insert a connector in front of it and
        reset all sources for that node to its connector
     */
    SetMultimap<String, String> nodeSources = HashMultimap.create();
    for (String source : sources) {
        nodeSources.put(source, source);
    }
    for (String node : getTopologicalOrder()) {
        Set<String> connectedSources = nodeSources.get(node);
        /*
            If this node is a connector, replace all sources for this node with itself, since a connector is a source
            Taking the example above, we end up with:
                
              reduce1.connector ---> reduce1 ---|
                                      |---> n2
              reduce2.connector ---> reduce2 ---|
                
            When we get to n2, we need it to see that it has 2 sources: reduce1.connector and reduce2.connector
            So when get to reduce1.connector, we need to replace its source (n1) with itself.
            Similarly, when we get to reduce2.connector, we need to replaces its source (n1) with itself.
            If we didn't, when we got to n2, it would think its only source is n1, and we would
            miss the connector that should be inserted in front of it.
         */
        if (connectors.contains(node)) {
            connectedSources = new HashSet<>();
            connectedSources.add(node);
            nodeSources.replaceValues(node, connectedSources);
        }
        // if more than one source is connected to this node, then we need to insert a connector in front of this node.
        // its source should then be changed to the connector that was inserted in front of it.
        if (connectedSources.size() > 1) {
            String connectorNode = addConnectorInFrontOf(node, addedAlready);
            connectedSources = new HashSet<>();
            connectedSources.add(connectorNode);
            nodeSources.replaceValues(node, connectedSources);
        }
        for (String nodeOutput : getNodeOutputs(node)) {
            // propagate the source connected to me to all my outputs
            nodeSources.putAll(nodeOutput, connectedSources);
        }
    }

    /*
        Find reduce nodes that are accessible from other reduce nodes. For example:
            
          source ---> reduce1 ---> reduce2 ---> sink
            
        Needs to be broken down into:
            
          source ---> reduce1 ---> reduce2.connector      =>     reduce2.connector ---> reduce2 ---> sink
     */
    for (String reduceNode : reduceNodes) {
        Set<String> accessibleByNode = accessibleFrom(reduceNode, Sets.union(connectors, reduceNodes));
        Set<String> accessibleReduceNodes = Sets.intersection(accessibleByNode, reduceNodes);

        // Sets.difference because we don't want to add ourselves
        accessibleReduceNodes = Sets.difference(accessibleReduceNodes, ImmutableSet.of(reduceNode));
        for (String accessibleReduceNode : accessibleReduceNodes) {
            addConnectorInFrontOf(accessibleReduceNode, addedAlready);
        }
    }

    return addedAlready;
}

From source file:com.wrmsr.wava.analyze.ValueTypeAnalysis.java

private static ImMap<Name, Set<Type>> updateBreakValueTypes(ImMap<Name, Set<Type>> map, List<Name> targets,
        Type type) {//  w  w w . ja v  a2 s .co m
    for (Name target : targets) {
        map = map.assoc(target,
                immutableEnumSet(Sets.union(map.getOrElse(target, ImmutableSet.of()), ImmutableSet.of(type))));
    }
    return map;
}

From source file:org.gradle.api.internal.tasks.compile.incremental.deps.ClassSetAnalysis.java

private DependentsSet getDependents(String className) {
    DependentsSet dependents = classAnalysis.getDependents(className);
    if (dependents.isDependencyToAll()) {
        return dependents;
    }//from   w  ww.  j a  v  a 2  s.  c  o  m
    ImmutableSet<String> additionalDeps = dependenciesFromAnnotationProcessing.get(className);
    if (additionalDeps.isEmpty()) {
        return dependents;
    }
    return DependentsSet.dependents(Sets.union(dependents.getDependentClasses(), additionalDeps));
}

From source file:org.eclipse.sw360.licenseinfo.parsers.CombinedCLIParser.java

@Override
public <T> List<LicenseInfoParsingResult> getLicenseInfos(Attachment attachment, User user, T context)
        throws TException {
    AttachmentContent attachmentContent = attachmentContentProvider.getAttachmentContent(attachment);
    InputStream attachmentStream = null;
    List<LicenseInfoParsingResult> parsingResults = new ArrayList<>();
    Map<String, Release> releasesByExternalId = prepareReleasesByExternalId(getCorrelationKey());

    try {//from  www  .  j av a  2 s.  c o  m
        attachmentStream = attachmentConnector.getAttachmentStream(attachmentContent, user, context);
        Document doc = getDocument(attachmentStream);

        Map<String, Set<String>> copyrightSetsByExternalId = getCopyrightSetsByExternalIdsMap(doc);
        Map<String, Set<LicenseNameWithText>> licenseNamesWithTextsByExternalId = getLicenseNamesWithTextsByExternalIdsMap(
                doc);

        Set<String> allExternalIds = Sets.union(copyrightSetsByExternalId.keySet(),
                licenseNamesWithTextsByExternalId.keySet());
        allExternalIds.forEach(extId -> {
            LicenseInfoParsingResult parsingResult = getLicenseInfoParsingResultForExternalId(attachmentContent,
                    releasesByExternalId, copyrightSetsByExternalId, licenseNamesWithTextsByExternalId, extId);
            parsingResults.add(parsingResult);
        });
    } catch (ParserConfigurationException | IOException | XPathExpressionException | SAXException
            | SW360Exception e) {
        log.error(e);
        parsingResults.add(new LicenseInfoParsingResult().setStatus(LicenseInfoRequestStatus.FAILURE)
                .setMessage("Error while parsing combined CLI file: " + e.toString()));
    } finally {
        closeQuietly(attachmentStream, log);
    }
    return parsingResults;
}

From source file:io.druid.firehose.rocketmq.RocketMQFirehoseFactory.java

@Override
public Firehose connect(ByteBufferInputRowParser byteBufferInputRowParser) throws IOException, ParseException {

    Set<String> newDimExclus = Sets.union(
            byteBufferInputRowParser.getParseSpec().getDimensionsSpec().getDimensionExclusions(),
            Sets.newHashSet("feed"));

    final ByteBufferInputRowParser theParser = byteBufferInputRowParser
            .withParseSpec(byteBufferInputRowParser.getParseSpec().withDimensionsSpec(byteBufferInputRowParser
                    .getParseSpec().getDimensionsSpec().withDimensionExclusions(newDimExclus)));

    /**//from   www. j  a v a 2  s  .co  m
     * Topic-Queue mapping.
     */
    final ConcurrentHashMap<String, Set<MessageQueue>> topicQueueMap;

    /**
     * Default Pull-style client for RocketMQ.
     */
    final DefaultMQPullConsumer defaultMQPullConsumer;
    final DruidPullMessageService pullMessageService;

    messageQueueTreeSetMap.clear();
    windows.clear();

    try {
        defaultMQPullConsumer = new DefaultMQPullConsumer(this.consumerGroup);
        defaultMQPullConsumer.setMessageModel(MessageModel.CLUSTERING);
        topicQueueMap = new ConcurrentHashMap<>();

        pullMessageService = new DruidPullMessageService(defaultMQPullConsumer);
        for (String topic : feed) {
            Validators.checkTopic(topic);
            topicQueueMap.put(topic, defaultMQPullConsumer.fetchSubscribeMessageQueues(topic));
        }
        DruidMessageQueueListener druidMessageQueueListener = new DruidMessageQueueListener(
                Sets.newHashSet(feed), topicQueueMap, defaultMQPullConsumer);
        defaultMQPullConsumer.setMessageQueueListener(druidMessageQueueListener);
        defaultMQPullConsumer.start();
        pullMessageService.start();
    } catch (MQClientException e) {
        LOGGER.error("Failed to start DefaultMQPullConsumer", e);
        throw new IOException("Failed to start RocketMQ client", e);
    }

    return new Firehose() {

        @Override
        public boolean hasMore() {
            boolean hasMore = false;
            DruidPullRequest earliestPullRequest = null;

            for (Map.Entry<String, Set<MessageQueue>> entry : topicQueueMap.entrySet()) {
                for (MessageQueue messageQueue : entry.getValue()) {
                    if (messageQueueTreeSetMap.keySet().contains(messageQueue)
                            && !messageQueueTreeSetMap.get(messageQueue).isEmpty()) {
                        hasMore = true;
                    } else {
                        try {
                            long offset = defaultMQPullConsumer.fetchConsumeOffset(messageQueue, false);
                            int batchSize = (null == pullBatchSize || pullBatchSize.isEmpty())
                                    ? DEFAULT_PULL_BATCH_SIZE
                                    : Integer.parseInt(pullBatchSize);

                            DruidPullRequest newPullRequest = new DruidPullRequest(messageQueue, null, offset,
                                    batchSize, !hasMessagesPending());

                            // notify pull message service to pull messages from brokers.
                            pullMessageService.putRequest(newPullRequest);

                            // set the earliest pull in case we need to block.
                            if (null == earliestPullRequest) {
                                earliestPullRequest = newPullRequest;
                            }
                        } catch (MQClientException e) {
                            LOGGER.error("Failed to fetch consume offset for queue: {}", entry.getKey());
                        }
                    }
                }
            }

            // Block only when there is no locally pending messages.
            if (!hasMore && null != earliestPullRequest) {
                try {
                    earliestPullRequest.getCountDownLatch().await();
                    hasMore = true;
                } catch (InterruptedException e) {
                    LOGGER.error("CountDownLatch await got interrupted", e);
                }
            }
            return hasMore;
        }

        @Override
        public InputRow nextRow() {
            for (Map.Entry<MessageQueue, ConcurrentSkipListSet<MessageExt>> entry : messageQueueTreeSetMap
                    .entrySet()) {
                if (!entry.getValue().isEmpty()) {
                    MessageExt message = entry.getValue().pollFirst();
                    InputRow inputRow = theParser.parse(ByteBuffer.wrap(message.getBody()));

                    if (!windows.keySet().contains(entry.getKey())) {
                        windows.put(entry.getKey(), new ConcurrentSkipListSet<Long>());
                    }
                    windows.get(entry.getKey()).add(message.getQueueOffset());
                    return inputRow;
                }
            }

            // should never happen.
            throw new RuntimeException("Unexpected Fatal Error! There should have been one row available.");
        }

        @Override
        public Runnable commit() {
            return new Runnable() {
                @Override
                public void run() {
                    OffsetStore offsetStore = defaultMQPullConsumer.getOffsetStore();
                    Set<MessageQueue> updated = new HashSet<>();
                    // calculate offsets according to consuming windows.
                    for (ConcurrentHashMap.Entry<MessageQueue, ConcurrentSkipListSet<Long>> entry : windows
                            .entrySet()) {
                        while (!entry.getValue().isEmpty()) {

                            long offset = offsetStore.readOffset(entry.getKey(),
                                    ReadOffsetType.MEMORY_FIRST_THEN_STORE);
                            if (offset + 1 > entry.getValue().first()) {
                                entry.getValue().pollFirst();
                            } else if (offset + 1 == entry.getValue().first()) {
                                entry.getValue().pollFirst();
                                offsetStore.updateOffset(entry.getKey(), offset + 1, true);
                                updated.add(entry.getKey());
                            } else {
                                break;
                            }

                        }
                    }
                    offsetStore.persistAll(updated);
                }
            };
        }

        @Override
        public void close() throws IOException {
            defaultMQPullConsumer.shutdown();
            pullMessageService.shutdown(false);
        }
    };
}

From source file:com.squareup.javapoet.TypeSpec.java

void emit(CodeWriter codeWriter, String enumName, Set<Modifier> implicitModifiers) throws IOException {
    if (enumName != null) {
        codeWriter.emit("$L", enumName);
        if (!anonymousTypeArguments.formatParts.isEmpty()) {
            codeWriter.emit("(");
            codeWriter.emit(anonymousTypeArguments);
            codeWriter.emit(")");
        }//from  w w w.j a v  a 2 s .c om
        if (fieldSpecs.isEmpty() && methodSpecs.isEmpty() && typeSpecs.isEmpty()) {
            return; // Avoid unnecessary braces "{}".
        }
        codeWriter.emit(" {\n");
    } else if (anonymousTypeArguments != null) {
        codeWriter.emit("new $T(", getOnlyElement(superinterfaces, superclass));
        codeWriter.emit(anonymousTypeArguments);
        codeWriter.emit(") {\n");
    } else {
        codeWriter.emitJavadoc(javadoc);
        codeWriter.emitAnnotations(annotations, false);
        codeWriter.emitModifiers(modifiers, Sets.union(implicitModifiers, kind.asMemberModifiers));
        codeWriter.emit("$L $L", Ascii.toLowerCase(kind.name()), name);
        codeWriter.emitTypeVariables(typeVariables);

        List<Type> extendsTypes;
        List<Type> implementsTypes;
        if (kind == Kind.INTERFACE) {
            extendsTypes = superinterfaces;
            implementsTypes = ImmutableList.of();
        } else {
            extendsTypes = superclass.equals(ClassName.OBJECT) ? ImmutableList.<Type>of()
                    : ImmutableList.of(superclass);
            implementsTypes = superinterfaces;
        }

        if (!extendsTypes.isEmpty()) {
            codeWriter.emit(" extends");
            boolean firstType = true;
            for (Type type : extendsTypes) {
                if (!firstType)
                    codeWriter.emit(",");
                codeWriter.emit(" $T", type);
                firstType = false;
            }
        }

        if (!implementsTypes.isEmpty()) {
            codeWriter.emit(" implements");
            boolean firstType = true;
            for (Type type : implementsTypes) {
                if (!firstType)
                    codeWriter.emit(",");
                codeWriter.emit(" $T", type);
                firstType = false;
            }
        }

        codeWriter.emit(" {\n");
    }

    codeWriter.pushType(this);
    codeWriter.indent();
    boolean firstMember = true;
    for (Iterator<Map.Entry<String, TypeSpec>> i = enumConstants.entrySet().iterator(); i.hasNext();) {
        Map.Entry<String, TypeSpec> enumConstant = i.next();
        if (!firstMember)
            codeWriter.emit("\n");
        enumConstant.getValue().emit(codeWriter, enumConstant.getKey(), ImmutableSet.<Modifier>of());
        firstMember = false;
        if (i.hasNext()) {
            codeWriter.emit(",\n");
        } else if (!fieldSpecs.isEmpty() || !methodSpecs.isEmpty() || !typeSpecs.isEmpty()) {
            codeWriter.emit(";\n");
        } else {
            codeWriter.emit("\n");
        }
    }

    // Static fields.
    for (FieldSpec fieldSpec : fieldSpecs) {
        if (!fieldSpec.hasModifier(Modifier.STATIC))
            continue;
        if (!firstMember)
            codeWriter.emit("\n");
        fieldSpec.emit(codeWriter, kind.implicitFieldModifiers);
        firstMember = false;
    }

    // Non-static fields.
    for (FieldSpec fieldSpec : fieldSpecs) {
        if (fieldSpec.hasModifier(Modifier.STATIC))
            continue;
        if (!firstMember)
            codeWriter.emit("\n");
        fieldSpec.emit(codeWriter, kind.implicitFieldModifiers);
        firstMember = false;
    }

    // Constructors.
    for (MethodSpec methodSpec : methodSpecs) {
        if (!methodSpec.isConstructor())
            continue;
        if (!firstMember)
            codeWriter.emit("\n");
        methodSpec.emit(codeWriter, name, kind.implicitMethodModifiers);
        firstMember = false;
    }

    // Methods (static and non-static).
    for (MethodSpec methodSpec : methodSpecs) {
        if (methodSpec.isConstructor())
            continue;
        if (!firstMember)
            codeWriter.emit("\n");
        methodSpec.emit(codeWriter, name, kind.implicitMethodModifiers);
        firstMember = false;
    }

    // Types.
    for (TypeSpec typeSpec : typeSpecs) {
        if (!firstMember)
            codeWriter.emit("\n");
        typeSpec.emit(codeWriter, null, kind.implicitTypeModifiers);
        firstMember = false;
    }

    codeWriter.unindent();
    codeWriter.popType();

    codeWriter.emit("}");
    if (enumName == null && anonymousTypeArguments == null) {
        codeWriter.emit("\n"); // If this type isn't also a value, include a trailing newline.
    }
}