Example usage for com.google.common.collect Sets difference

List of usage examples for com.google.common.collect Sets difference

Introduction

In this page you can find the example usage for com.google.common.collect Sets difference.

Prototype

public static <E> SetView<E> difference(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the difference of two sets.

Usage

From source file:com.thinkbiganalytics.metadata.rest.model.nifi.NiFiFlowCacheSync.java

public Set<String> getFeedsUpdatedSinceLastSync(Set<String> feeds) {
    com.google.common.collect.Sets.SetView<String> diff = Sets.difference(feeds, snapshot.getAllFeeds());
    return diff.copyInto(new HashSet<>());
}

From source file:google.registry.util.DiffUtils.java

/**
 * Returns a string displaying the differences between the old values in a set and the new ones.
 *//*from   ww  w.ja va 2 s  . c  om*/
@VisibleForTesting
static String prettyPrintSetDiff(Set<?> a, Set<?> b) {
    Set<?> removed = Sets.difference(a, b);
    Set<?> added = Sets.difference(b, a);
    if (removed.isEmpty() && added.isEmpty()) {
        return "NO DIFFERENCES";
    }
    return Joiner.on("\n    ").skipNulls().join("",
            !added.isEmpty() ? ("ADDED:" + formatSetContents(added)) : null,
            !removed.isEmpty() ? ("REMOVED:" + formatSetContents(removed)) : null,
            "FINAL CONTENTS:" + formatSetContents(b));
}

From source file:org.apache.whirr.compute.StartupProcess.java

void addSuccessAndLostNodes(RunNodesException rnex) {
    // workaround https://code.google.com/p/jclouds/issues/detail?id=923
    // by ensuring that any nodes in the "NodeErrors" do not get considered
    // successful
    Set<? extends NodeMetadata> reportedSuccessfulNodes = rnex.getSuccessfulNodes();
    Map<? extends NodeMetadata, ? extends Throwable> errorNodesMap = rnex.getNodeErrors();
    Set<? extends NodeMetadata> errorNodes = errorNodesMap.keySet();

    // "actual" successful nodes are ones that don't appear in the errorNodes 
    Set<? extends NodeMetadata> actualSuccessfulNodes = Sets.difference(reportedSuccessfulNodes, errorNodes);

    successfulNodes.addAll(actualSuccessfulNodes);
    lostNodes.putAll(errorNodesMap);/*  w w w  .j a  v  a2 s.  co m*/
}

From source file:com.google.dart.engine.internal.index.file.SplitIndexStoreImpl.java

@Override
public boolean aboutToIndexDart(AnalysisContext context, CompilationUnitElement unitElement) {
    context = unwrapContext(context);/* ww w  .j av a  2 s  .c o  m*/
    // may be already disposed in other thread
    if (context.isDisposed()) {
        return false;
    }
    // validate unit
    if (unitElement == null) {
        return false;
    }
    LibraryElement libraryElement = unitElement.getLibrary();
    if (libraryElement == null) {
        return false;
    }
    CompilationUnitElement definingUnitElement = libraryElement.getDefiningCompilationUnit();
    if (definingUnitElement == null) {
        return false;
    }
    // prepare sources
    Source library = definingUnitElement.getSource();
    Source unit = unitElement.getSource();
    // special handling for the defining library unit
    if (unit.equals(library)) {
        // prepare new parts
        Set<Source> newParts = Sets.newHashSet();
        for (CompilationUnitElement part : libraryElement.getParts()) {
            newParts.add(part.getSource());
        }
        // prepare old parts
        Map<Source, Set<Source>> libraryToUnits = contextToLibraryToUnits.get(context);
        if (libraryToUnits == null) {
            libraryToUnits = Maps.newHashMap();
            contextToLibraryToUnits.put(context, libraryToUnits);
        }
        Set<Source> oldParts = libraryToUnits.get(library);
        // check if some parts are not in the library now
        if (oldParts != null) {
            Set<Source> noParts = Sets.difference(oldParts, newParts);
            for (Source noPart : noParts) {
                removeLocations(context, library, noPart);
            }
        }
        // remember new parts
        libraryToUnits.put(library, newParts);
    }
    // remember library/unit relations
    recordUnitInLibrary(context, library, unit);
    recordLibraryWithUnit(context, library, unit);
    sources.add(library);
    sources.add(unit);
    // prepare node
    String libraryName = library.getFullName();
    String unitName = unit.getFullName();
    int libraryNameIndex = stringCodec.encode(libraryName);
    int unitNameIndex = stringCodec.encode(unitName);
    currentNodeName = libraryNameIndex + "_" + unitNameIndex + ".index";
    currentNodeNameId = stringCodec.encode(currentNodeName);
    currentNode = nodeManager.newNode(context);
    currentContextId = contextCodec.encode(context);
    // remove Universe information for the current node
    for (Map<Integer, ?> nodeRelations : contextNodeRelations.values()) {
        nodeRelations.remove(currentNodeNameId);
    }
    // done
    return true;
}

From source file:com.opengamma.integration.marketdata.manipulator.dsl.Simulation.java

/**
 * Builds cycle execution options for each scenario in this simulation.
 * @param baseOptions Base set of options
 * @param allSelectors This simulation's selectors
 * @return Execution options for each scenario in this simulation
 *//*from w  w  w .j av a  2s . c o m*/
/* package */ List<ViewCycleExecutionOptions> cycleExecutionOptions(ViewCycleExecutionOptions baseOptions,
        Set<DistinctMarketDataSelector> allSelectors) {
    List<ViewCycleExecutionOptions> options = Lists.newArrayListWithCapacity(_scenarios.size());
    for (Scenario scenario : _scenarios.values()) {
        ScenarioDefinition definition = scenario.createDefinition();
        Map<DistinctMarketDataSelector, FunctionParameters> scenarioParams = definition.getDefinitionMap();
        Map<DistinctMarketDataSelector, FunctionParameters> params = Maps.newHashMap();
        params.putAll(scenarioParams);
        // if a selector isn't used by a particular scenario then it needs to have a no-op manipulator. if it didn't
        // then the manipulator from the previous scenario would be used
        Set<DistinctMarketDataSelector> unusedSelectors = Sets.difference(allSelectors, params.keySet());
        for (DistinctMarketDataSelector unusedSelector : unusedSelectors) {
            params.put(unusedSelector, EmptyFunctionParameters.INSTANCE);
        }
        ViewCycleExecutionOptions scenarioOptions = baseOptions.copy().setFunctionParameters(params)
                .setValuationTime(scenario.getValuationTime())
                .setResolverVersionCorrection(scenario.getResolverVersionCorrection()).create();
        options.add(scenarioOptions);
    }
    return options;
}

From source file:org.grouplens.lenskit.util.parallel.TaskGraphManager.java

/**
 * Get the next runnable task, or {@code null} if no tasks can be run.
 * @return The runnable task.//from   w w  w  .  ja  v a  2s .com
 */
@Nullable
private DAGNode<T, E> findRunnableTask() {
    for (DAGNode<T, E> task : tasksToRun) {
        if (runningTasks.contains(task)) {
            continue;
        }
        int nleft = Sets.difference(task.getAdjacentNodes(), finishedTasks).size();
        if (nleft == 0) {
            return task;
        } else {
            logger.debug("deferring task {}, has {} unfinished dependencies", task.getLabel(), nleft);
        }
    }
    return null;
}

From source file:com.netflix.suro.connection.ConnectionPool.java

public void populateClients() {
    for (Server server : lb.getServerList(true)) {
        SuroConnection connection = new SuroConnection(server, config, true);
        try {/*from w  w w .  j  a v a 2s .c o m*/
            connection.connect();
            addConnection(server, connection, true);
            logger.info(connection + " is added to SuroClientPool");
        } catch (Exception e) {
            logger.error("Error in connecting to " + connection + " message: " + e.getMessage(), e);
            lb.markServerDown(server);
        }
    }

    connectionSweeper.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            removeConnection(Sets.difference(serverSet, new HashSet<Server>(lb.getServerList(true))));
        }
    }, config.getConnectionSweepInterval(), config.getConnectionSweepInterval(), TimeUnit.SECONDS);
}

From source file:org.jpmml.evaluator.EvaluationExample.java

@Override
public void execute() throws Exception {
    MetricRegistry metricRegistry = new MetricRegistry();

    ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry).convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS).build();

    CsvUtil.Table inputTable = readTable(this.input, this.separator);

    List<? extends Map<FieldName, ?>> inputRecords = BatchUtil.parseRecords(inputTable, Example.CSV_PARSER);

    if (this.waitBefore) {
        waitForUserInput();/*from   w ww. j av a 2s  .c  o  m*/
    }

    PMML pmml = readPMML(this.model);

    if (this.cacheBuilderSpec != null) {
        CacheBuilderSpec cacheBuilderSpec = CacheBuilderSpec.parse(this.cacheBuilderSpec);

        CacheUtil.setCacheBuilderSpec(cacheBuilderSpec);
    } // End if

    if (this.optimize) {
        List<? extends Visitor> optimizers = Arrays.asList(new ExpressionOptimizer(), new FieldOptimizer(),
                new PredicateOptimizer(), new GeneralRegressionModelOptimizer(), new NaiveBayesModelOptimizer(),
                new RegressionModelOptimizer());

        for (Visitor optimizer : optimizers) {
            optimizer.applyTo(pmml);
        }
    }

    ModelEvaluatorFactory modelEvaluatorFactory = ModelEvaluatorFactory.newInstance();

    Evaluator evaluator = modelEvaluatorFactory.newModelEvaluator(pmml);

    // Perform self-testing
    evaluator.verify();

    List<InputField> inputFields = evaluator.getInputFields();
    List<InputField> groupFields = Collections.emptyList();

    if (evaluator instanceof HasGroupFields) {
        HasGroupFields hasGroupfields = (HasGroupFields) evaluator;

        groupFields = hasGroupfields.getGroupFields();
    } // End if

    if (inputRecords.size() > 0) {
        Map<FieldName, ?> inputRecord = inputRecords.get(0);

        Sets.SetView<FieldName> missingInputFields = Sets
                .difference(new LinkedHashSet<>(EvaluatorUtil.getNames(inputFields)), inputRecord.keySet());
        if ((missingInputFields.size() > 0) && !this.sparse) {
            throw new IllegalArgumentException("Missing input field(s): " + missingInputFields.toString());
        }

        Sets.SetView<FieldName> missingGroupFields = Sets
                .difference(new LinkedHashSet<>(EvaluatorUtil.getNames(groupFields)), inputRecord.keySet());
        if (missingGroupFields.size() > 0) {
            throw new IllegalArgumentException("Missing group field(s): " + missingGroupFields.toString());
        }
    } // End if

    if (evaluator instanceof HasGroupFields) {
        HasGroupFields hasGroupFields = (HasGroupFields) evaluator;

        inputRecords = EvaluatorUtil.groupRows(hasGroupFields, inputRecords);
    }

    List<Map<FieldName, ?>> outputRecords = new ArrayList<>(inputRecords.size());

    Timer timer = new Timer(new SlidingWindowReservoir(this.loop));

    metricRegistry.register("main", timer);

    int epoch = 0;

    do {
        Timer.Context context = timer.time();

        try {
            outputRecords.clear();

            Map<FieldName, FieldValue> arguments = new LinkedHashMap<>();

            for (Map<FieldName, ?> inputRecord : inputRecords) {
                arguments.clear();

                for (InputField inputField : inputFields) {
                    FieldName name = inputField.getName();

                    FieldValue value = EvaluatorUtil.prepare(inputField, inputRecord.get(name));

                    arguments.put(name, value);
                }

                Map<FieldName, ?> result = evaluator.evaluate(arguments);

                outputRecords.add(result);
            }
        } finally {
            context.close();
        }

        epoch++;
    } while (epoch < this.loop);

    if (this.waitAfter) {
        waitForUserInput();
    }

    List<TargetField> targetFields = evaluator.getTargetFields();
    List<OutputField> outputFields = evaluator.getOutputFields();

    List<? extends ResultField> resultFields = Lists.newArrayList(Iterables.concat(targetFields, outputFields));

    CsvUtil.Table outputTable = new CsvUtil.Table();
    outputTable.setSeparator(inputTable.getSeparator());

    outputTable.addAll(BatchUtil.formatRecords(outputRecords, EvaluatorUtil.getNames(resultFields),
            Example.CSV_FORMATTER));

    if ((inputTable.size() == outputTable.size()) && this.copyColumns) {

        for (int i = 0; i < inputTable.size(); i++) {
            List<String> inputRow = inputTable.get(i);
            List<String> outputRow = outputTable.get(i);

            outputRow.addAll(0, inputRow);
        }
    }

    writeTable(outputTable, this.output);

    if (this.loop > 1) {
        reporter.report();
    }

    reporter.close();
}

From source file:org.apache.tephra.hbase.txprune.InvalidListPruningDebugTool.java

/**
 * Returns a set of regions that are live but are not empty nor have a prune upper bound recorded. These regions
 * will stop the progress of pruning./*from   w w w .j av  a2  s . c  om*/
 * <p/>
 * Note that this can return false positives in the following case -
 * At time 't' empty regions were recorded, and time 't+1' prune iteration was invoked.
 * Since  a new set of regions was recorded at time 't+1', all regions recorded as empty before time 't + 1' will
 * now be reported as blocking the pruning, even though they are empty. This is because we cannot tell if those
 * regions got any new data between time 't' and 't + 1'.
 *
 * @param numRegions number of regions
 * @param time time in milliseconds or relative time, regions recorded before the given time are returned
 * @return {@link Set} of regions that needs to be compacted and flushed
 */
@Override
@SuppressWarnings("WeakerAccess")
public Set<String> getRegionsToBeCompacted(Integer numRegions, String time) throws IOException {
    // Fetch the live regions at the given time
    RegionsAtTime timeRegion = getRegionsOnOrBeforeTime(time);
    if (timeRegion.getRegions().isEmpty()) {
        return Collections.emptySet();
    }

    Long timestamp = timeRegion.getTime();
    SortedSet<String> regions = timeRegion.getRegions();

    // Get the live regions
    SortedSet<String> liveRegions = getRegionsOnOrBeforeTime(NOW).getRegions();
    // Retain only the live regions
    regions = Sets.newTreeSet(Sets.intersection(liveRegions, regions));

    SortedSet<byte[]> emptyRegions = dataJanitorState.getEmptyRegionsAfterTime(timestamp, null);
    SortedSet<String> emptyRegionNames = new TreeSet<>();
    Iterable<String> regionStrings = Iterables.transform(emptyRegions, TimeRegions.BYTE_ARR_TO_STRING_FN);
    for (String regionString : regionStrings) {
        emptyRegionNames.add(regionString);
    }

    Set<String> nonEmptyRegions = Sets.newHashSet(Sets.difference(regions, emptyRegionNames));

    // Get all pruned regions for the current time and remove them from the nonEmptyRegions,
    // resulting in a set of regions that are not empty and have not been registered prune upper bound
    List<RegionPruneInfo> prunedRegions = dataJanitorState.getPruneInfoForRegions(null);
    for (RegionPruneInfo prunedRegion : prunedRegions) {
        if (nonEmptyRegions.contains(prunedRegion.getRegionNameAsString())) {
            nonEmptyRegions.remove(prunedRegion.getRegionNameAsString());
        }
    }

    if ((numRegions < 0) || (numRegions >= nonEmptyRegions.size())) {
        return nonEmptyRegions;
    }

    Set<String> subsetRegions = new HashSet<>(numRegions);
    for (String regionName : nonEmptyRegions) {
        if (subsetRegions.size() == numRegions) {
            break;
        }
        subsetRegions.add(regionName);
    }
    return subsetRegions;
}

From source file:com.github.benmanes.caffeine.cache.IsValidBoundedLocalCache.java

private void checkLinks(BoundedLocalCache<K, V> cache, ImmutableList<LinkedDeque<Node<K, V>>> deques,
        DescriptionBuilder desc) {//from ww  w.  j  a  va 2s . co  m
    int size = 0;
    long weightedSize = 0;
    Set<Node<K, V>> seen = Sets.newIdentityHashSet();
    for (LinkedDeque<Node<K, V>> deque : deques) {
        size += deque.size();
        weightedSize += scanLinks(cache, seen, deque, desc);
    }
    if (cache.size() != size) {
        desc.expectThat(() -> "deque size " + deques, size, is(cache.size()));
    }

    Supplier<String> errorMsg = () -> String.format("Size != list length; pending=%s, additional: %s",
            cache.writeBuffer().size(), Sets.difference(seen, ImmutableSet.copyOf(cache.data.values())));
    desc.expectThat(errorMsg, cache.size(), is(seen.size()));

    final long weighted = weightedSize;
    if (cache.evicts()) {
        Supplier<String> error = () -> String.format("WeightedSize != link weights [%d vs %d] {%d vs %d}",
                cache.adjustedWeightedSize(), weighted, seen.size(), cache.size());
        desc.expectThat("non-negative weight", weightedSize, is(greaterThanOrEqualTo(0L)));
        desc.expectThat(error, cache.adjustedWeightedSize(), is(weightedSize));
    }
}