Example usage for com.google.common.collect Sets union

List of usage examples for com.google.common.collect Sets union

Introduction

In this page you can find the example usage for com.google.common.collect Sets union.

Prototype

public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2) 

Source Link

Document

Returns an unmodifiable view of the union of two sets.

Usage

From source file:com.jgaap.distances.CosineDistance.java

/**
  * Returns cosine distance between event sets es1 and es2
  * //from ww  w . j  av a2  s  .com
  * @param es1
  *            The first EventSet
  * @param es2
  *            The second EventSet
  * @return the cosine distance between them
  */
@Override
public double distance(Histogram unknownHistogram, Histogram knownHistogram) {

    double distance = 0.0;
    double h1Magnitude = 0.0;
    double h2Magnitude = 0.0;

    Set<Event> events = Sets.union(unknownHistogram.uniqueEvents(), knownHistogram.uniqueEvents());

    for (Event event : events) {
        double unknownNormalizedFrequencey = unknownHistogram.normalizedFrequency(event);
        double knownNormalizedFrequencey = knownHistogram.normalizedFrequency(event);

        distance += unknownNormalizedFrequencey * knownNormalizedFrequencey;
        h1Magnitude += unknownNormalizedFrequencey * unknownNormalizedFrequencey;
        h2Magnitude += knownNormalizedFrequencey * knownNormalizedFrequencey;
    }

    return Math.abs((distance / (Math.sqrt(h1Magnitude * h2Magnitude))) - 1);
}

From source file:org.sosy_lab.cpachecker.cfa.ast.c.FileLocationCollectingVisitor.java

@Override
public Set<FileLocation> visit(CComplexCastExpression pE) throws RuntimeException {
    return Sets.union(Collections.singleton(pE.getFileLocation()), pE.getOperand().accept(this));
}

From source file:terrastore.cluster.ensemble.impl.View.java

public int percentageOfChange(View anotherView) {
    Set<Member> a = new HashSet<Member>(members);
    Set<Member> b = new HashSet<Member>(anotherView.getMembers());
    int abDifference = Sets.difference(a, b).size();
    int baDifference = Sets.difference(b, a).size();
    int abUnion = Sets.union(a, b).size();
    return (int) (((float) (abDifference + baDifference) / abUnion) * 100);
}

From source file:org.apache.fluo.core.observer.v2.ObserversV2.java

public ObserversV2(Environment env, JsonObservers jco, Set<Column> strongColumns, Set<Column> weakColumns) {

    ObserverProvider obsProvider = ObserverStoreV2.newObserverProvider(jco.getObserverProviderClass());

    ObserverProviderContextImpl ctx = new ObserverProviderContextImpl(env);

    ObserverRegistry or = new ObserverRegistry(strongColumns, weakColumns);
    obsProvider.provide(or, ctx);/*from   w ww  . j a  va 2  s. c om*/

    this.observers = or.observers;
    this.aliases = or.aliases;
    this.observers.forEach((k, v) -> aliases.computeIfAbsent(k, col -> Hex.encNonAscii(col, ":")));

    // the following check ensures observers are provided for all previously configured columns
    SetView<Column> diff = Sets.difference(observers.keySet(), Sets.union(strongColumns, weakColumns));
    if (diff.size() > 0) {
        throw new FluoException("ObserverProvider " + jco.getObserverProviderClass()
                + " did not provide observers for columns " + diff);
    }
}

From source file:biz.ganttproject.impex.csv.ResourceRecords.java

ResourceRecords(HumanResourceManager resourceManager, RoleManager roleManager) {
    super("Resource group",
            Sets.union(Sets.newHashSet(GanttCSVOpen.getFieldNames(ResourceFields.values())),
                    ImmutableSet.of(ResourceDefaultColumn.STANDARD_RATE.getName())),
            Sets.newHashSet(GanttCSVOpen.getFieldNames(ResourceFields.ID, ResourceFields.NAME)));
    this.resourceManager = Preconditions.checkNotNull(resourceManager);
    myRoleManager = Preconditions.checkNotNull(roleManager);
}

From source file:io.druid.firehose.kafka.KafkaEightFirehoseFactory.java

@Override
public Firehose connect(final ByteBufferInputRowParser firehoseParser) throws IOException {
    Set<String> newDimExclus = Sets.union(
            firehoseParser.getParseSpec().getDimensionsSpec().getDimensionExclusions(),
            Sets.newHashSet("feed"));
    final ByteBufferInputRowParser theParser = firehoseParser
            .withParseSpec(firehoseParser.getParseSpec().withDimensionsSpec(
                    firehoseParser.getParseSpec().getDimensionsSpec().withDimensionExclusions(newDimExclus)));

    final ConsumerConnector connector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));

    final Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector
            .createMessageStreams(ImmutableMap.of(feed, 1));

    final List<KafkaStream<byte[], byte[]>> streamList = streams.get(feed);
    if (streamList == null || streamList.size() != 1) {
        return null;
    }//from   w ww.  j a  va 2 s . co m

    final KafkaStream<byte[], byte[]> stream = streamList.get(0);
    final ConsumerIterator<byte[], byte[]> iter = stream.iterator();

    return new Firehose() {
        @Override
        public boolean hasMore() {
            return iter.hasNext();
        }

        @Override
        public InputRow nextRow() {
            final byte[] message = iter.next().message();

            if (message == null) {
                return null;
            }

            return theParser.parse(ByteBuffer.wrap(message));
        }

        @Override
        public Runnable commit() {
            return new Runnable() {
                @Override
                public void run() {
                    /*
                      This is actually not going to do exactly what we want, cause it will be called asynchronously
                      after the persist is complete.  So, it's going to commit that it's processed more than was actually
                      persisted.  This is unfortunate, but good enough for now.  Should revisit along with an upgrade
                      of our Kafka version.
                    */

                    log.info("committing offsets");
                    connector.commitOffsets();
                }
            };
        }

        @Override
        public void close() throws IOException {
            connector.shutdown();
        }
    };
}

From source file:uk.co.flax.luwak.benchmark.ValidatorResults.java

public Collection<String> getBadDocuments() {
    return Sets.union(missingMatches.keySet(), extraMatches.keySet());
}

From source file:com.google.enterprise.connector.pusher.AclInheritFromDocidFilter.java

@Override
public Set<String> getPropertyNames(Document source) throws RepositoryException {
    return Sets.union(Sets.filter(source.getPropertyNames(), propsPredicate),
            Sets.<String>newHashSet(SpiConstants.PROPNAME_ACLINHERITFROM));
}

From source file:springfox.documentation.spring.web.plugins.CombinedRequestHandler.java

@Override
public PatternsRequestCondition getPatternsCondition() {
    SetView<String> patterns = Sets.union(first.getPatternsCondition().getPatterns(),
            second.getPatternsCondition().getPatterns());
    return new PatternsRequestCondition(patterns.toArray(new String[patterns.size()]));
}

From source file:org.opendaylight.tcpmd5.nio.MD5ChannelOptions.java

public Set<SocketOption<?>> supportedOptions() {
    if (access != null) {
        return Sets.union(MD5_OPTIONS, ch.supportedOptions());
    } else {// w ww  . ja va 2  s. c  o  m
        return ch.supportedOptions();
    }
}