Example usage for com.google.common.collect Sets union

List of usage examples for com.google.common.collect Sets union

Introduction

In this page you can find the example usage for com.google.common.collect Sets union.

Prototype

public static <E> SetView<E> union(final Set<? extends E> set1, final Set<? extends E> set2) 

Source Link

Document

Returns an unmodifiable view of the union of two sets.

Usage

From source file:com.google.enterprise.connector.pusher.InheritFromExtractedAclDocumentFilter.java

@Override
public Set<String> getPropertyNames(Document source) throws RepositoryException {
    return Sets.union(super.getPropertyNames(source), INHERIT_FROM_EXTRACTED_ACL_PROPS);
}

From source file:org.caleydo.view.relationshipexplorer.ui.column.operation.ESetOperation.java

public Set<Object> apply(Set<Object> set1, Set<Object> set2) {
    switch (this) {
    case REMOVE:// w  w w  . j av a  2  s  . c  o m
        Set<Object> result = new HashSet<>(set2);
        result.removeAll(set1);
        return result;
    // case REPLACE:
    // return set1;
    case INTERSECTION:
        return Sets.intersection(set1, set2);
    case UNION:
        return Sets.union(set1, set2);
    default:
        return null;
    }
}

From source file:com.googlecode.blaisemath.style.StyleContext.java

/**
 * Get collection of style types supported by this context, including
 * types supported by the parent context.
 * @return types/*  w  w w .  j  a v a2s  .c  o m*/
 */
public Set<StyleModifier> getAllModifiers() {
    return parent.isPresent() ? Sets.union(parent.get().getAllModifiers(), modifiers) : modifiers;
}

From source file:org.gradle.api.tasks.diagnostics.TaskReportTask.java

public void generate(Project project) throws IOException {
    renderer.showDetail(isDetail());//from   ww w .j  a va2s  .  co m
    renderer.addDefaultTasks(project.getDefaultTasks());

    AggregateMultiProjectTaskReportModel aggregateModel = new AggregateMultiProjectTaskReportModel(!isDetail());
    TaskDetailsFactory taskDetailsFactory = new TaskDetailsFactory(project);

    SingleProjectTaskReportModel projectTaskModel = new SingleProjectTaskReportModel(taskDetailsFactory);
    ProjectInternal projectInternal = (ProjectInternal) project;
    TaskContainerInternal tasks = projectInternal.getTasks();
    tasks.actualize();
    projectTaskModel.build(Sets.union(tasks, projectInternal.getImplicitTasks()));
    aggregateModel.add(projectTaskModel);

    for (Project subproject : project.getSubprojects()) {
        SingleProjectTaskReportModel subprojectTaskModel = new SingleProjectTaskReportModel(taskDetailsFactory);
        ProjectInternal subprojectInternal = (ProjectInternal) subproject;
        TaskContainerInternal subprojectTasks = subprojectInternal.getTasks();
        subprojectTasks.actualize();
        subprojectTaskModel.build(subprojectTasks);
        aggregateModel.add(subprojectTaskModel);
    }

    aggregateModel.build();

    DefaultGroupTaskReportModel model = new DefaultGroupTaskReportModel();
    model.build(aggregateModel);

    for (String group : model.getGroups()) {
        renderer.startTaskGroup(group);
        for (TaskDetails task : model.getTasksForGroup(group)) {
            renderer.addTask(task);
            for (TaskDetails child : task.getChildren()) {
                renderer.addChildTask(child);
            }
        }
    }
    renderer.completeTasks();

    for (Rule rule : project.getTasks().getRules()) {
        renderer.addRule(rule);
    }
}

From source file:org.jetbrains.kotlin.codegen.KotlinCodegenFacade.java

public static void doGenerateFiles(@NotNull Collection<KtFile> files, @NotNull GenerationState state,
        @NotNull CompilationErrorHandler errorHandler) {
    MultiMap<FqName, KtFile> filesInPackages = new MultiMap<FqName, KtFile>();
    MultiMap<FqName, KtFile> filesInMultifileClasses = new MultiMap<FqName, KtFile>();

    for (KtFile file : files) {
        if (file == null)
            throw new IllegalArgumentException("A null file given for compilation");

        JvmFileClassInfo fileClassInfo = state.getFileClassesProvider().getFileClassInfo(file);

        if (fileClassInfo.getWithJvmMultifileClass()) {
            filesInMultifileClasses.putValue(fileClassInfo.getFacadeClassFqName(), file);
        } else {/*from   w  w  w .ja v  a2 s  . c om*/
            filesInPackages.putValue(file.getPackageFqName(), file);
        }
    }

    Set<FqName> obsoleteMultifileClasses = new HashSet<FqName>(state.getObsoleteMultifileClasses());
    for (FqName multifileClassFqName : Sets.union(filesInMultifileClasses.keySet(), obsoleteMultifileClasses)) {
        doCheckCancelled(state);
        generateMultifileClass(state, multifileClassFqName, filesInMultifileClasses.get(multifileClassFqName),
                errorHandler);
    }

    Set<FqName> packagesWithObsoleteParts = new HashSet<FqName>(state.getPackagesWithObsoleteParts());
    for (FqName packageFqName : Sets.union(packagesWithObsoleteParts, filesInPackages.keySet())) {
        doCheckCancelled(state);
        generatePackage(state, packageFqName, filesInPackages.get(packageFqName), errorHandler);
    }

    doCheckCancelled(state);
    state.getFactory().done();
}

From source file:org.usergrid.persistence.query.ir.result.UnionIterator.java

@Override
protected Set<UUID> advance() {

    int size = iterators.size();

    if (size == 0) {
        return null;
    }//from  ww  w .j a va2s  . co m

    Set<UUID> resultSet = null;

    if (remainderResults != null) {
        resultSet = remainderResults;
        remainderResults = null;
    } else {
        resultSet = new LinkedHashSet<UUID>();
    }

    /**
     * We have results from a previous merge
     */

    int complete = 0;

    while (resultSet.size() < pageSize && complete < size) {

        currentIndex = (currentIndex + 1) % iterators.size();

        ResultIterator itr = iterators.get(currentIndex);

        if (!itr.hasNext()) {
            complete++;
            continue;
        }

        resultSet = Sets.union(resultSet, itr.next());

    }

    // now check if we need to split our results if they went over the page size
    if (resultSet.size() > pageSize) {
        Set<UUID> returnSet = new LinkedHashSet<UUID>(pageSize);

        Iterator<UUID> itr = resultSet.iterator();

        for (int i = 0; i < pageSize && itr.hasNext(); i++) {
            returnSet.add(itr.next());
        }

        remainderResults = new LinkedHashSet<UUID>(pageSize);

        while (itr.hasNext()) {
            remainderResults.add(itr.next());
        }

        resultSet = returnSet;
    }

    return resultSet;

}

From source file:com.palantir.docker.compose.configuration.RemoteEnvironmentValidator.java

private Collection<String> getMissingEnvVariables(Map<String, String> dockerEnvironment) {
    Collection<String> requiredVariables = Sets.union(newHashSet(DOCKER_HOST),
            secureVariablesRequired(dockerEnvironment));
    return requiredVariables.stream()
            .filter(envVariable -> Strings.isNullOrEmpty(dockerEnvironment.get(envVariable)))
            .collect(Collectors.toSet());
}

From source file:natlab.toolkits.analysis.example.MaybeLive.java

/**
 * Merges the two sets using set union.//from   w  w w  .  j  av  a 2s  .c  o  m
 */
public Set<String> merge(Set<String> in1, Set<String> in2) {
    return new HashSet<>(Sets.union(in1, in2));
}

From source file:com.jgaap.distances.KendallCorrelationDistance.java

/**
 * Returns KC distance between event sets es1 and es2
 * // w  w w .jav a 2s.c o  m
 * @param es1
 *            The first EventSet
 * @param es2
 *            The second EventSet
 * @return the KC distance between them
 */
@Override
public double distance(Histogram unknownHistogram, Histogram knownHistogram) {

    Set<Event> s = Sets.union(unknownHistogram.uniqueEvents(), knownHistogram.uniqueEvents());

    List<Pair<Event, Double>> l1 = new ArrayList<Pair<Event, Double>>();
    List<Pair<Event, Double>> l2 = new ArrayList<Pair<Event, Double>>();

    HashMap<Event, Integer> hm1 = new HashMap<Event, Integer>();
    HashMap<Event, Integer> hm2 = new HashMap<Event, Integer>();

    double oldfreq = Double.POSITIVE_INFINITY;

    double correlation = 0.0;

    /* make lists of the histograms */
    for (Event e : unknownHistogram.uniqueEvents()) {
        l1.add(new Pair<Event, Double>(e, unknownHistogram.relativeFrequency(e), 2));
    }
    for (Event e : knownHistogram.uniqueEvents()) {
        l2.add(new Pair<Event, Double>(e, knownHistogram.relativeFrequency(e), 2));
    }

    /* sort the list so the most frequent items are at the top */
    /* NOTE : THIS MAY BE USEFUL ELSEWHERE : SAVE THIS CODE */
    Collections.sort(l1);
    Collections.reverse(l1);
    Collections.sort(l2);
    Collections.reverse(l2);

    /* DEBUGGING STUFF 
    for (Pair <Event,Double> p : l1) {
       System.out.println("L1: " + p.toString());
    }
    for (Pair <Event,Double> p : l1) {
       System.out.println("L2: " + p.toString());
    }
    */

    /* Convert lists into a hashmap of event:rank pairs */
    int rank = 0;
    int count = 0;
    for (Pair<Event, Double> p : l1) {
        Event e = (Event) (p.getFirst());
        double f = (Double) (p.getSecond());
        count++;
        if (f != oldfreq) {
            rank = count;
            oldfreq = f;
        }
        hm1.put(e, rank);
    }

    /* reset and do second list */
    rank = 0;
    count = 0;
    for (Pair<Event, Double> p : l2) {
        Event e = (Event) (p.getFirst());
        double f = (Double) (p.getSecond());
        count++;
        if (f != oldfreq) {
            rank = count;
            oldfreq = f;
        }
        hm2.put(e, rank);
    }

    /* More debugging stuff 
    System.out.println(hm1.toString());
    System.out.println(hm2.toString());
    System.out.println(s.toString());
    */

    Integer x1, x2, y1, y2;
    Set<Event> s2 = new HashSet<Event>(s);
    for (Event e1 : s) {
        //s2.remove(e1);
        for (Event e2 : s2) {

            if (e1.equals(e2))
                continue;

            /* get ranks of events e1 and e2 in both x and y distributions */
            x1 = hm1.get(e1);
            /* if not present, rank is size + 1 */
            if (x1 == null)
                x1 = hm1.size() + 1;

            x2 = hm2.get(e1);
            if (x2 == null)
                x2 = hm2.size() + 1;

            y1 = hm1.get(e2);
            /* if not present, rank is size + 1 */
            //broke because if (y1 == null) x1 = hm1.size()+1; x1 should be y1
            if (y1 == null)
                y1 = hm1.size() + 1;

            y2 = hm2.get(e2);
            if (y2 == null)
                y2 = hm2.size() + 1;

            /* more debugging stuff 
            System.out.println(e1.toString() + " is ("+x1+","+x2+")");
            System.out.println(e2.toString() + " is ("+y1+","+y2+")");
            System.out.println(sgn(x1.compareTo(y1)) + " " +
             sgn(x2.compareTo(y2)) );
            System.out.println("");
            */

            correlation += (sgn(x1.compareTo(y1)) * sgn(x2.compareTo(y2)));
            //            System.out.println(correlation);
        }
    }

    //System.out.println(correlation);
    correlation /= (hm1.size() * (hm2.size() - 1));
    //System.out.println(correlation);
    //System.out.println("---");

    return 1.0 - correlation;

}

From source file:com.jgaap.distances.PearsonCorrelationDistance.java

/**
 * Returns PPMCC distance between event sets es1 and es2
 * /*from ww w  . j av a2 s.  c o m*/
 * @param es1
 *            The first EventSet
 * @param es2
 *            The second EventSet
 * @return the PPMCC distance (1 - Pearson's r) between them.
 *         Returns 0 if sd of both es1 and es2 are zero (point mass),
 *            otherwise returns 1 if only one is 0.  
 */
@Override
public double distance(Histogram unknownHistogram, Histogram knownHistogram) {
    Set<Event> s = Sets.union(unknownHistogram.uniqueEvents(), knownHistogram.uniqueEvents());

    int n; // number of elements

    double sigX; // sum of relative frequencies in h1;
    double sigY; // sum of relative frequencies in h2;
    double sigXY; // sum of products of relative frequencies
    double sigX2; // sum of squared relative frequencies in h1;
    double sigY2; // sum of squared relative frequencies in h2;

    double denom1, denom2; // factors of denominator

    double correlation = 0.0;

    //System.out.println(h1.toString());
    //System.out.println(h2.toString());
    //System.out.println(s.toString());

    n = s.size();
    sigX = 0.0;
    sigY = 0.0;
    sigXY = 0.0;
    sigX2 = 0.0;
    sigY2 = 0.0;
    for (Event e : s) {
        double x = unknownHistogram.relativeFrequency(e);
        double y = knownHistogram.relativeFrequency(e);
        sigX += x;
        sigY += y;
        sigX2 += x * x;
        sigY2 += y * y;
        sigXY += x * y;
    }
    ;

    //System.out.println("n = " + n);
    //System.out.println("sigX = " + sigX);
    //System.out.println("sigY = " + sigY);
    //System.out.println("sigXY = " + sigXY);
    //System.out.println("sigX2 = " + sigX2);
    //System.out.println("sigY2 = " + sigY2);

    // formula from http://davidmlane.com/hyperstat/A56626.html
    // as well as lots of other places

    denom1 = sigX2 - (sigX * sigX) / n;
    denom2 = sigY2 - (sigY * sigY) / n;

    //System.out.println("denom1 = " + denom1);
    //System.out.println("denom2 = " + denom2);

    // check for edge cases
    if (Math.abs(denom1) < 0.000001 && Math.abs(denom2) < 0.000001)
        return 0;

    if (Math.abs(denom1) < 0.000001 || Math.abs(denom2) < 0.000001)
        return 1;

    correlation = (sigXY - (sigX * sigY) / n) / Math.sqrt(denom1 * denom2);

    //System.out.println("correlation = "+correlation);
    //System.out.println("---");

    return 1.0 - correlation;

}