Example usage for com.google.common.base Predicates not

List of usage examples for com.google.common.base Predicates not

Introduction

In this page you can find the example usage for com.google.common.base Predicates not.

Prototype

public static <T> Predicate<T> not(Predicate<T> predicate) 

Source Link

Document

Returns a predicate that evaluates to true if the given predicate evaluates to false .

Usage

From source file:org.eclipse.sirius.diagram.ui.tools.internal.layout.PinnedElementsHandler.java

/**
 * Move the specified movable parts in the specified direction enough to
 * avoid overlaps with all the specified fixed parts while not creating any
 * new overlap with other fixed parts. All the movable parts are translated
 * of the same amount, as a group. More movable parts than the ones
 * specified explicitly may be move along as they are "pushed" aside to make
 * enough room.//from  w w w.ja  va2  s  . c  om
 * 
 * @param parts
 *            the parts to move.
 * @param fixedParts
 *            the fixed parts to avoid.
 * @param dir
 *            the general direction in which to move the movable parts.
 * @param previousMovedPositionsOfSameDir
 *            the list of original position of each edit parts that have
 *            previously moved in this direction
 * @return The positions done during this step (and previous steps) to
 *         eventually used it to restore the previous position.
 */
private Map<IGraphicalEditPart, Point> moveAside(final Set<IGraphicalEditPart> parts,
        final Set<IGraphicalEditPart> fixedParts, final Direction dir,
        Map<IGraphicalEditPart, Point> previousMovedPositionsOfSameDir) {
    /*
     * First try to move just enough to avoid the explicitly specified
     * obstacles.
     */
    addSavePositions(parts, previousMovedPositionsOfSameDir);
    tryMove(parts, fixedParts, dir);
    final Set<IGraphicalEditPart> overlaps = findOverlappingParts(parts);
    if (!overlaps.isEmpty()) {
        /*
         * We created new overlaps. Try a more aggressive change, taking
         * more parts into consideration and/or moving further.
         */
        Set<IGraphicalEditPart> newMovables = parts;
        Set<IGraphicalEditPart> newFixed = fixedParts;

        final Set<IGraphicalEditPart> movableOverlaps = Sets
                .newHashSet(Collections2.filter(overlaps, Predicates.not(isPinned)));
        if (!movableOverlaps.isEmpty()) {
            /*
             * If we created new overlaps with movable parts, simply re-try
             * with an extended set of movable parts including the ones we
             * need to push along.
             */
            newMovables = Sets.union(parts, movableOverlaps);
        }

        final Set<IGraphicalEditPart> fixedOverlaps = Sets.newHashSet(Collections2.filter(overlaps, isPinned));
        if (!fixedOverlaps.isEmpty()) {
            /*
             * If we created new overlaps with other fixed parts, re-try
             * with an extended set of fixed obstacles to avoid.
             */
            newFixed = Sets.union(fixedParts, fixedOverlaps);
        }

        /*
         * Retry with the new, extended sets of parts to consider.
         */
        assert newMovables.size() > parts.size() || newFixed.size() > fixedParts.size();
        moveParts(newMovables, previousMovedPositionsOfSameDir);
        moveAside(newMovables, newFixed, dir, previousMovedPositionsOfSameDir);
    }
    /*
     * Check that the specified movable parts no longer overlap with the
     * specified fixed parts.
     */
    assert Sets.intersection(Sets.filter(findOverlappingParts(fixedParts), Predicates.not(isPinned)), parts)
            .isEmpty();
    return previousMovedPositionsOfSameDir;
}

From source file:org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor.java

/**
 * Drop every configuration property from advised configuration that is not found in the stack defaults.
 * @param advisedConfigurations advised configuration instance
 *///from  w w w  . ja v  a 2  s  . co m
private void doFilterStackDefaults(Map<String, AdvisedConfiguration> advisedConfigurations) {
    Blueprint blueprint = clusterTopology.getBlueprint();
    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
    for (Map.Entry<String, AdvisedConfiguration> adConfEntry : advisedConfigurations.entrySet()) {
        AdvisedConfiguration advisedConfiguration = adConfEntry.getValue();
        if (stackDefaultProps.containsKey(adConfEntry.getKey())) {
            Map<String, String> defaultProps = stackDefaultProps.get(adConfEntry.getKey());
            Map<String, String> outFilteredProps = Maps.filterKeys(advisedConfiguration.getProperties(),
                    Predicates.not(Predicates.in(defaultProps.keySet())));
            advisedConfiguration.getProperties().keySet()
                    .removeAll(Sets.newCopyOnWriteArraySet(outFilteredProps.keySet()));

            if (advisedConfiguration.getPropertyValueAttributes() != null) {
                Map<String, ValueAttributesInfo> outFilteredValueAttrs = Maps.filterKeys(
                        advisedConfiguration.getPropertyValueAttributes(),
                        Predicates.not(Predicates.in(defaultProps.keySet())));
                advisedConfiguration.getPropertyValueAttributes().keySet()
                        .removeAll(Sets.newCopyOnWriteArraySet(outFilteredValueAttrs.keySet()));
            }
        } else {
            advisedConfiguration.getProperties().clear();
        }
    }
}

From source file:forge.game.Game.java

private void chooseRandomCardsForAnte(final Player player, final Multimap<Player, Card> anteed) {
    final CardCollectionView lib = player.getCardsIn(ZoneType.Library);
    Predicate<Card> goodForAnte = Predicates.not(CardPredicates.Presets.BASIC_LANDS);
    Card ante = Aggregates.random(Iterables.filter(lib, goodForAnte));
    if (ante == null) {
        getGameLog().add(GameLogEntryType.ANTE, "Only basic lands found. Will ante one of them");
        ante = Aggregates.random(lib);// ww w.  jav  a  2s.c o  m
    }
    anteed.put(player, ante);
}

From source file:com.google.devtools.build.lib.runtime.BlazeCommandDispatcher.java

/**
 * Parses the options from .rc files for a command invocation. It works in one of two modes;
 * either it loads the non-config options, or the config options that are specified in the {@code
 * configs} parameter.//from  w  ww .  ja v  a 2s . co  m
 *
 * <p>This method adds every option pertaining to the specified command to the options parser. To
 * do that, it needs the command -> option mapping that is generated from the .rc files.
 *
 * <p>It is not as trivial as simply taking the list of options for the specified command because
 * commands can inherit arguments from each other, and we have to respect that (e.g. if an option
 * is specified for 'build', it needs to take effect for the 'test' command, too).
 *
 * <p>Note that the order in which the options are parsed is well-defined: all options from the
 * same rc file are parsed at the same time, and the rc files are handled in the order in which
 * they were passed in from the client.
 *
 * @param rcfileNotes note message that would be printed during parsing
 * @param commandAnnotation the command for which options should be parsed.
 * @param optionsParser parser to receive parsed options.
 * @param optionsMap .rc files in structured format: a list of pairs, where the first part is the
 *     name of the rc file, and the second part is a multimap of command name (plus config, if
 *     present) to the list of options for that command
 * @param configs the configs for which to parse options; if {@code null}, non-config options are
 *     parsed
 * @param unknownConfigs optional; a collection that the method will populate with the config
 *     values in {@code configs} that none of the .rc files had entries for
 * @throws OptionsParsingException
 */
protected static void parseOptionsForCommand(List<String> rcfileNotes, Command commandAnnotation,
        OptionsParser optionsParser, List<Pair<String, ListMultimap<String, String>>> optionsMap,
        @Nullable Collection<String> configs, @Nullable Collection<String> unknownConfigs)
        throws OptionsParsingException {
    Set<String> knownConfigs = new HashSet<>();
    for (String commandToParse : getCommandNamesToParse(commandAnnotation)) {
        for (Pair<String, ListMultimap<String, String>> entry : optionsMap) {
            List<String> allOptions = new ArrayList<>();
            if (configs == null) {
                allOptions.addAll(entry.second.get(commandToParse));
            } else {
                for (String config : configs) {
                    Collection<String> values = entry.second.get(commandToParse + ":" + config);
                    if (!values.isEmpty()) {
                        allOptions.addAll(values);
                        knownConfigs.add(config);
                    }
                }
            }
            processOptionList(optionsParser, commandToParse, commandAnnotation.name(), rcfileNotes, entry.first,
                    allOptions);
        }
    }
    if (unknownConfigs != null && configs != null && configs.size() > knownConfigs.size()) {
        Iterables.addAll(unknownConfigs,
                Iterables.filter(configs, Predicates.not(Predicates.in(knownConfigs))));
    }
}

From source file:org.apache.cassandra.db.compaction.LeveledManifest.java

/**
 * @return highest-priority sstables to compact for the given level.
 * If no compactions are possible (because of concurrent compactions or because some sstables are blacklisted
 * for prior failure), will return an empty list.  Never returns null.
 */// w  ww .j  av a  2  s.c  o  m
private Collection<SSTableReader> getCandidatesFor(int level) {
    assert !getLevel(level).isEmpty();
    logger.trace("Choosing candidates for L{}", level);

    final Set<SSTableReader> compacting = cfs.getTracker().getCompacting();

    if (level == 0) {
        Set<SSTableReader> compactingL0 = getCompacting(0);

        RowPosition lastCompactingKey = null;
        RowPosition firstCompactingKey = null;
        for (SSTableReader candidate : compactingL0) {
            if (firstCompactingKey == null || candidate.first.compareTo(firstCompactingKey) < 0)
                firstCompactingKey = candidate.first;
            if (lastCompactingKey == null || candidate.last.compareTo(lastCompactingKey) > 0)
                lastCompactingKey = candidate.last;
        }

        // L0 is the dumping ground for new sstables which thus may overlap each other.
        //
        // We treat L0 compactions specially:
        // 1a. add sstables to the candidate set until we have at least maxSSTableSizeInMB
        // 1b. prefer choosing older sstables as candidates, to newer ones
        // 1c. any L0 sstables that overlap a candidate, will also become candidates
        // 2. At most MAX_COMPACTING_L0 sstables from L0 will be compacted at once
        // 3. If total candidate size is less than maxSSTableSizeInMB, we won't bother compacting with L1,
        //    and the result of the compaction will stay in L0 instead of being promoted (see promote())
        //
        // Note that we ignore suspect-ness of L1 sstables here, since if an L1 sstable is suspect we're
        // basically screwed, since we expect all or most L0 sstables to overlap with each L1 sstable.
        // So if an L1 sstable is suspect we can't do much besides try anyway and hope for the best.
        Set<SSTableReader> candidates = new HashSet<>();
        Set<SSTableReader> remaining = new HashSet<>();
        Iterables.addAll(remaining, Iterables.filter(getLevel(0), Predicates.not(suspectP)));
        for (SSTableReader sstable : ageSortedSSTables(remaining)) {
            if (candidates.contains(sstable))
                continue;

            Sets.SetView<SSTableReader> overlappedL0 = Sets.union(Collections.singleton(sstable),
                    overlapping(sstable, remaining));
            if (!Sets.intersection(overlappedL0, compactingL0).isEmpty())
                continue;

            for (SSTableReader newCandidate : overlappedL0) {
                if (firstCompactingKey == null || lastCompactingKey == null
                        || overlapping(firstCompactingKey.getToken(), lastCompactingKey.getToken(),
                                Arrays.asList(newCandidate)).size() == 0)
                    candidates.add(newCandidate);
                remaining.remove(newCandidate);
            }

            if (candidates.size() > MAX_COMPACTING_L0) {
                // limit to only the MAX_COMPACTING_L0 oldest candidates
                candidates = new HashSet<>(ageSortedSSTables(candidates).subList(0, MAX_COMPACTING_L0));
                break;
            }
        }

        // leave everything in L0 if we didn't end up with a full sstable's worth of data
        if (SSTableReader.getTotalBytes(candidates) > maxSSTableSizeInBytes) {
            // add sstables from L1 that overlap candidates
            // if the overlapping ones are already busy in a compaction, leave it out.
            // TODO try to find a set of L0 sstables that only overlaps with non-busy L1 sstables
            Set<SSTableReader> l1overlapping = overlapping(candidates, getLevel(1));
            if (Sets.intersection(l1overlapping, compacting).size() > 0)
                return Collections.emptyList();
            if (!overlapping(candidates, compactingL0).isEmpty())
                return Collections.emptyList();
            candidates = Sets.union(candidates, l1overlapping);
        }
        if (candidates.size() < 2)
            return Collections.emptyList();
        else
            return candidates;
    }

    // for non-L0 compactions, pick up where we left off last time
    Collections.sort(getLevel(level), SSTableReader.sstableComparator);
    int start = 0; // handles case where the prior compaction touched the very last range
    for (int i = 0; i < getLevel(level).size(); i++) {
        SSTableReader sstable = getLevel(level).get(i);
        if (sstable.first.compareTo(lastCompactedKeys[level]) > 0) {
            start = i;
            break;
        }
    }

    // look for a non-suspect keyspace to compact with, starting with where we left off last time,
    // and wrapping back to the beginning of the generation if necessary
    for (int i = 0; i < getLevel(level).size(); i++) {
        SSTableReader sstable = getLevel(level).get((start + i) % getLevel(level).size());
        Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable),
                overlapping(sstable, getLevel(level + 1)));
        if (Iterables.any(candidates, suspectP))
            continue;
        if (Sets.intersection(candidates, compacting).isEmpty())
            return candidates;
    }

    // all the sstables were suspect or overlapped with something suspect
    return Collections.emptyList();
}

From source file:com.eucalyptus.compute.common.internal.tags.FilterSupport.java

/**
 * Generate a Filter for the given filters.
 *
 * @param filters The map of filter names to (multiple) values
 * @param allowInternalFilters True to allow use of internal filters
 * @return The filter representation/*w  ww  .j  a v  a 2  s.  com*/
 * @throws InvalidFilterException If a filter is invalid
 */
public Filter generate(final Map<String, Set<String>> filters, final boolean allowInternalFilters,
        final String accountId) throws InvalidFilterException {
    // Construct collection filter
    final List<Predicate<Object>> and = Lists.newArrayList();
    for (final Map.Entry<String, Set<String>> filter : Iterables.filter(filters.entrySet(),
            Predicates.not(isTagFilter()))) {
        final List<Predicate<Object>> or = Lists.newArrayList();
        for (final String value : filter.getValue()) {
            final Function<? super String, Predicate<? super RT>> predicateFunction = predicateFunctions
                    .get(filter.getKey());
            if (predicateFunction == null
                    || (!allowInternalFilters && internalFilters.contains(filter.getKey()))) {
                throw InvalidFilterException.forName(filter.getKey());
            }
            final Predicate<? super RT> valuePredicate = predicateFunction.apply(value);
            or.add(typedPredicate(valuePredicate));
        }
        and.add(Predicates.or(or));
    }

    // Construct database filter and aliases
    final Junction conjunction = Restrictions.conjunction();
    final Map<String, String> aliases = Maps.newHashMap();
    for (final Map.Entry<String, Set<String>> filter : Iterables.filter(filters.entrySet(),
            Predicates.not(isTagFilter()))) {
        final Junction disjunction = Restrictions.disjunction();
        for (final String value : filter.getValue()) {
            final PersistenceFilter persistenceFilter = persistenceFilters.get(filter.getKey());
            if (persistenceFilter != null) {
                final Object persistentValue = persistenceFilter.value(value);
                if (persistentValue != null) {
                    for (final String alias : persistenceFilter.getAliases())
                        aliases.put(alias, this.aliases.get(alias));
                    disjunction.add(buildRestriction(persistenceFilter.getProperty(), persistentValue));
                } // else, there is no valid DB filter for the given value (e.g. wildcard for integer value)
            }
        }
        conjunction.add(disjunction);
    }

    // Construct database filter and aliases for tags
    boolean tagPresent = false;
    final List<Junction> tagJunctions = Lists.newArrayList();
    for (final Map.Entry<String, Set<String>> filter : Iterables.filter(filters.entrySet(), isTagFilter())) {
        tagPresent = true;
        final Junction disjunction = Restrictions.disjunction();
        final String filterName = filter.getKey();
        for (final String value : filter.getValue()) {
            if ("tag-key".equals(filterName)) {
                disjunction.add(buildTagRestriction(value, null, true));
            } else if ("tag-value".equals(filterName)) {
                disjunction.add(buildTagRestriction(null, value, true));
            } else {
                disjunction.add(buildTagRestriction(filterName.substring(4), value, false));
            }
        }
        tagJunctions.add(disjunction);
    }
    if (tagPresent)
        conjunction.add(tagCriterion(accountId, tagJunctions));

    return new Filter(aliases, conjunction, Predicates.and(and), tagPresent);
}

From source file:com.google.devtools.build.lib.query2.SkyQueryEnvironment.java

@Override
public Set<Target> getTransitiveClosure(Set<Target> targets) throws InterruptedException {
    Set<Target> visited = new HashSet<>();
    Collection<Target> current = targets;
    while (!current.isEmpty()) {
        Collection<Target> toVisit = Collections2.filter(current, Predicates.not(Predicates.in(visited)));
        current = getFwdDeps(toVisit);// w w w  .  ja va  2  s  .  c o  m
        visited.addAll(toVisit);
    }
    return ImmutableSet.copyOf(visited);
}

From source file:org.elasticsearch.test.InternalTestCluster.java

/**
 * Ensures that at most <code>n</code> are up and running.
 * If less nodes that <code>n</code> are running this method
 * will not start any additional nodes./* ww w  .  j av  a2 s.  co  m*/
 */
public synchronized void ensureAtMostNumDataNodes(int n) throws IOException {
    int size = numDataNodes();
    if (size <= n) {
        return;
    }
    // prevent killing the master if possible and client nodes
    final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator()
            : Iterators.filter(nodes.values().iterator(), Predicates.and(new DataNodePredicate(),
                    Predicates.not(new MasterNodePredicate(getMasterName()))));

    final Iterator<NodeAndClient> limit = Iterators.limit(values, size - n);
    logger.info("changing cluster size from {} to {}, {} data nodes", size(), n + numSharedClientNodes, n);
    Set<NodeAndClient> nodesToRemove = new HashSet<>();
    while (limit.hasNext()) {
        NodeAndClient next = limit.next();
        nodesToRemove.add(next);
        removeDisruptionSchemeFromNode(next);
        next.close();
    }
    for (NodeAndClient toRemove : nodesToRemove) {
        nodes.remove(toRemove.name);
    }
    if (!nodesToRemove.isEmpty() && size() > 0) {
        assertNoTimeout(client().admin().cluster().prepareHealth()
                .setWaitForNodes(Integer.toString(nodes.size())).get());
    }
}

From source file:org.eclipse.sirius.diagram.ui.tools.internal.layout.PinnedElementsHandler.java

/**
 * Translate all the given <code>parts</code> of the same amount in the
 * specified <code>direction</code> as far as required to avoid overlaps
 * with the specified <code>fixedParts</code>. The move may create new
 * overlaps with parts other than those in <code>fixedParts</code>.
 *///w  w w  . j a  v  a2s  . c o  m
private void tryMove(final Set<IGraphicalEditPart> parts, final Set<IGraphicalEditPart> fixedParts,
        final Direction direction) {
    assert !Sets.intersection(Sets.filter(findOverlappingParts(fixedParts), Predicates.not(isPinned)), parts)
            .isEmpty();
    final Rectangle movablesBox = getBoundingBox(parts, EXCLUDE_PADDING);
    final Insets movablesPadding = getPadding(parts);
    final Rectangle fixedBox = getBoundingBox(fixedParts, EXCLUDE_PADDING);
    final Insets fixedPadding = getPadding(fixedParts);
    final Dimension move = computeMoveVector(movablesBox, movablesPadding, fixedBox, fixedPadding, direction);
    for (IGraphicalEditPart part : parts) {
        translate(part, move);
    }
    assert Sets.intersection(Sets.filter(findOverlappingParts(fixedParts), Predicates.not(isPinned)), parts)
            .isEmpty();
}

From source file:forge.ai.AiBlockController.java

/** Reinforce blockers blocking attackers with trample (should only be made if life is in danger) */
private void reinforceBlockersAgainstTrample(final Combat combat) {

    List<Card> chumpBlockers;

    List<Card> tramplingAttackers = CardLists.getKeyword(attackers, "Trample");
    tramplingAttackers = CardLists.filter(tramplingAttackers, Predicates.not(rampagesOrNeedsManyToBlock));

    // TODO - should check here for a "rampage-like" trigger that replaced
    // the keyword:
    // "Whenever CARDNAME becomes blocked, it gets +1/+1 until end of turn for each creature blocking it."

    for (final Card attacker : tramplingAttackers) {

        if (((attacker.hasStartOfKeyword("CantBeBlockedByAmount LT") || attacker.hasKeyword("Menace"))
                && !combat.isBlocked(attacker))
                || attacker.hasKeyword(/*from w  ww .  j ava  2  s . co  m*/
                        "You may have CARDNAME assign its combat damage as though it weren't blocked.")
                || attacker.hasKeyword(
                        "CARDNAME can't be blocked unless all creatures defending player controls block it.")) {
            continue;
        }

        chumpBlockers = getPossibleBlockers(combat, attacker, blockersLeft, false);
        chumpBlockers.removeAll(combat.getBlockers(attacker));
        for (final Card blocker : chumpBlockers) {
            // Add an additional blocker if the current blockers are not
            // enough and the new one would suck some of the damage
            if (ComputerUtilCombat.getAttack(attacker) > ComputerUtilCombat.totalShieldDamage(attacker,
                    combat.getBlockers(attacker)) && ComputerUtilCombat.shieldDamage(attacker, blocker) > 0
                    && CombatUtil.canBlock(attacker, blocker, combat)
                    && ComputerUtilCombat.lifeInDanger(ai, combat)) {
                combat.addBlocker(attacker, blocker);
            }
        }
    }
}