Example usage for com.google.common.base Predicates in

List of usage examples for com.google.common.base Predicates in

Introduction

In this page you can find the example usage for com.google.common.base Predicates in.

Prototype

public static <T> Predicate<T> in(Collection<? extends T> target) 

Source Link

Document

Returns a predicate that evaluates to true if the object reference being tested is a member of the given collection.

Usage

From source file:com.twitter.common.args.OptionInfo.java

/**
 * Parses the value and store result in the {@link Arg} contained in this {@code OptionInfo}.
 *//* w ww  .j  a v a2  s. com*/
void load(ParserOracle parserOracle, String optionName, String value) {
    Parser<? extends T> parser = getParser(parserOracle);

    String finalValue = value;

    // If "-arg=@file" is allowed and specified, then we read the value from the file
    // and use it as the raw value to be parsed for the argument.
    if (argFile() && !Strings.isNullOrEmpty(value) && value.startsWith(ARG_FILE_INDICATOR)) {
        finalValue = getArgFileContent(optionName, value.substring(ARG_FILE_INDICATOR.length()));
    }

    Object result = parser.parse(parserOracle, getType().getType(), finalValue); // [A]

    // If the arg type is boolean, check if the command line uses the negated boolean form.
    if (isBoolean()) {
        if (Predicates.in(Arrays.asList(getNegatedName(), getCanonicalNegatedName())).apply(optionName)) {
            result = !(Boolean) result; // [B]
        }
    }

    // We know result is T at line [A] but throw this type information away to allow negation if T
    // is Boolean at line [B]
    @SuppressWarnings("unchecked")
    T parsed = (T) result;

    setValue(parsed);
}

From source file:com.facebook.buck.js.ReactNativeDeps.java

@Override
public ImmutableList<Step> getBuildSteps(BuildContext context, final BuildableContext buildableContext) {
    ImmutableList.Builder<Step> steps = ImmutableList.builder();

    final Path output = BuildTargets.getScratchPath(getBuildTarget(), "__%s/deps.txt");
    steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), output.getParent()));

    steps.add(new ShellStep(getProjectFilesystem().getRootPath()) {
        @Override//  ww  w.j  a  v a2s.  c o  m
        protected ImmutableList<String> getShellCommandInternal(ExecutionContext context) {
            ImmutableList.Builder<String> builder = ImmutableList.builder();

            builder.add(getResolver().getAbsolutePath(jsPackager).toString(), "list-dependencies",
                    platform.toString(),
                    getProjectFilesystem().resolve(getResolver().getAbsolutePath(entryPath)).toString(),
                    "--output", getProjectFilesystem().resolve(output).toString());

            if (packagerFlags.isPresent()) {
                builder.addAll(Arrays.asList(packagerFlags.get().split(" ")));
            }

            return builder.build();
        }

        @Override
        public String getShortName() {
            return "react-native-deps";
        }
    });

    steps.add(new MakeCleanDirectoryStep(getProjectFilesystem(), outputDir));

    steps.add(new AbstractExecutionStep("hash_js_inputs") {
        @Override
        public int execute(ExecutionContext context) throws IOException {
            ImmutableList<Path> paths;
            try {
                paths = FluentIterable.from(getProjectFilesystem().readLines(output))
                        .transform(MorePaths.TO_PATH).transform(getProjectFilesystem().getRelativizer())
                        .toSortedList(Ordering.natural());
            } catch (IOException e) {
                context.logError(e, "Error reading output of the 'react-native-deps' step.");
                return 1;
            }

            FluentIterable<SourcePath> unlistedSrcs = FluentIterable.from(paths)
                    .transform(SourcePaths.toSourcePath(getProjectFilesystem()))
                    .filter(Predicates.not(Predicates.in(srcs)));
            if (!unlistedSrcs.isEmpty()) {
                context.logError(new RuntimeException(),
                        "Entry path '%s' transitively uses the following source files which were not "
                                + "included in 'srcs':\n%s",
                        entryPath, Joiner.on('\n').join(unlistedSrcs));
                return 1;
            }

            Hasher hasher = Hashing.sha1().newHasher();
            for (Path path : paths) {
                try {
                    hasher.putUnencodedChars(getProjectFilesystem().computeSha1(path));
                } catch (IOException e) {
                    context.logError(e, "Error hashing input file: %s", path);
                    return 1;
                }
            }

            String inputsHash = hasher.hash().toString();
            buildableContext.addMetadata(METADATA_KEY_FOR_INPUTS_HASH, inputsHash);
            getProjectFilesystem().writeContentsToPath(inputsHash, inputsHashFile);
            return 0;
        }
    });

    return steps.build();
}

From source file:com.google.devtools.build.lib.query2.DepsUnboundedVisitor.java

private void checkIfMissingTargets(Iterable<SkyKey> keys, Map<SkyKey, Iterable<SkyKey>> depMap) {
    if (depMap.size() != Iterables.size(keys)) {
        Iterable<Label> missingTargets = Iterables.transform(
                Iterables.filter(keys, Predicates.not(Predicates.in(depMap.keySet()))), SKYKEY_TO_LABEL);
        env.getEventHandler().handle(Event.warn("Targets were missing from graph: " + missingTargets));
    }//w  w w  .j  a  va  2  s .c om
}

From source file:com.google.jenkins.flakyTestHandler.plugin.HistoryAggregatedFlakyTestResultAction.java

/**
 * Aggregate flaky runs one previous build and put results into a map between test name and
 * its map between scm revisions and aggregated flaky stats for that revision
 *
 * @param build the build to be aggregated
 *///from  www  .  j  av  a 2  s  .c o  m
public void aggregateOneBuild(AbstractBuild<?, ?> build) {
    FlakyTestResultAction action = build.getAction(FlakyTestResultAction.class);
    if (action == null) {
        return;
    }

    FlakyRunStats runStats = action.getFlakyRunStats();

    if (runStats == null) {
        return;
    }

    Map<String, SingleTestFlakyStatsWithRevision> testFlakyStatsMap = runStats
            .getTestFlakyStatsWithRevisionMap();

    if (testFlakyStatsMap == null) {
        // Skip old build which doesn't have the map
        return;
    }

    if (build.getCause(DeflakeCause.class) == null) {
        // This is a non-deflake build, update allTests
        allTests = testFlakyStatsMap.keySet();
    }

    for (Map.Entry<String, SingleTestFlakyStatsWithRevision> testFlakyStat : testFlakyStatsMap.entrySet()) {
        String testName = testFlakyStat.getKey();
        String revision = testFlakyStat.getValue().getRevision();
        SingleTestFlakyStats stats = testFlakyStat.getValue().getStats();

        if (aggregatedTestFlakyStatsWithRevision.containsKey(testName)) {
            Map<String, SingleTestFlakyStats> testFlakyStatMap = aggregatedTestFlakyStatsWithRevision
                    .get(testName);

            if (testFlakyStatMap.containsKey(revision)) {
                // Merge flaky stats with the same test and the same revision
                testFlakyStatMap.get(revision).merge(stats);
            } else {
                // First specific revision flaky stat for a given test
                testFlakyStatMap.put(revision, new SingleTestFlakyStats(stats));
            }
        } else {
            // The first test entry
            Map<String, SingleTestFlakyStats> testFlakyStatMap = new LinkedHashMap<String, SingleTestFlakyStats>();
            testFlakyStatMap.put(revision, new SingleTestFlakyStats(stats));
            aggregatedTestFlakyStatsWithRevision.put(testName, testFlakyStatMap);

        }
    }

    aggregatedFlakyStats = Maps.filterKeys(
            Maps.transformValues(aggregatedTestFlakyStatsWithRevision, REVISION_STATS_MAP_TO_AGGREGATED_STATS),
            Predicates.in(allTests));
}

From source file:com.facebook.presto.orc.StripeReader.java

public Stripe readStripe(StripeInformation stripe) throws IOException {
    // read the stripe footer
    StripeFooter stripeFooter = readStripeFooter(stripe);
    List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings();

    // get streams for selected columns
    Map<StreamId, Stream> streams = new HashMap<>();
    boolean hasRowGroupDictionary = false;
    for (Stream stream : stripeFooter.getStreams()) {
        if (includedOrcColumns.contains(stream.getColumn())) {
            streams.put(new StreamId(stream), stream);

            ColumnEncodingKind columnEncoding = columnEncodings.get(stream.getColumn()).getColumnEncodingKind();
            if (columnEncoding == DICTIONARY && stream.getStreamKind() == StreamKind.IN_DICTIONARY) {
                hasRowGroupDictionary = true;
            }//from  w  w w  .j av a 2  s .c om
        }
    }

    // handle stripes with more than one row group or a dictionary
    if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) {
        // determine ranges of the stripe to read
        Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams());
        diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet()));

        // read the file regions
        Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges);

        // read the row index for each column
        Map<Integer, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData);

        // select the row groups matching the tuple domain
        Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes);

        // if all row groups are skipped, return null
        if (selectedRowGroups.isEmpty()) {
            return null;
        }

        // value streams
        Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

        // build the dictionary streams
        StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
                columnEncodings);

        // build the row groups
        try {
            List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams,
                    columnIndexes, selectedRowGroups, columnEncodings);

            return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources);
        } catch (InvalidCheckpointException e) {
            // The ORC file contains a corrupt checkpoint stream
            // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise,
            // we must fail because the length of the row group dictionary is contained in the checkpoint stream.
            if (hasRowGroupDictionary) {
                throw new OrcCorruptionException(e, "ORC file %s has corrupt checkpoints", orcDataSource);
            }
        }
    }

    // stripe only has one row group and no dictionary
    ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder();
    for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) {
        StreamId streamId = entry.getKey();
        if (streamId.getStreamKind() != ROW_INDEX && streams.keySet().contains(streamId)) {
            diskRangesBuilder.put(entry);
        }
    }
    ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build();

    // read the file regions
    Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges);

    // value streams
    Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

    // build the dictionary streams
    StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
            columnEncodings);

    // build the row group
    ImmutableMap.Builder<StreamId, StreamSource<?>> builder = ImmutableMap.builder();
    for (Entry<StreamId, ValueStream<?>> entry : valueStreams.entrySet()) {
        builder.put(entry.getKey(), new ValueStreamSource<>(entry.getValue()));
    }
    RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), new StreamSources(builder.build()));

    return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup),
            dictionaryStreamSources);
}

From source file:edu.buaa.satla.analysis.core.algorithm.CounterexampleCheckAlgorithm.java

@Override
public boolean run(ReachedSet reached) throws CPAException, InterruptedException {
    boolean sound = true;

    while (reached.hasWaitingState()) {
        sound &= algorithm.run(reached);
        assert ARGUtils.checkARG(reached);

        ARGState lastState = (ARGState) reached.getLastState();

        Deque<ARGState> errorStates = new ArrayDeque<>();
        if (lastState != null && lastState.isTarget()) {
            errorStates.add(lastState);//w w  w . ja  v  a2  s.c  om
        } else {
            from(reached).transform(AbstractStates.toState(ARGState.class))
                    .filter(AbstractStates.IS_TARGET_STATE)
                    .filter(Predicates.not(Predicates.in(checkedTargetStates))).copyInto(errorStates);
        }

        if (errorStates.isEmpty()) {
            // no errors, so no analysis necessary
            break;
        }

        // check counterexample
        checkTime.start();
        try {
            boolean foundCounterexample = false;
            while (!errorStates.isEmpty()) {
                ARGState errorState = errorStates.pollFirst();
                if (!reached.contains(errorState)) {
                    // errorState was already removed due to earlier loop iterations
                    continue;
                }

                sound = checkCounterexample(errorState, reached, sound);
                if (reached.contains(errorState)) {
                    checkedTargetStates.add(errorState);
                    foundCounterexample = true;
                }
            }

            if (foundCounterexample) {
                break;
            }
        } finally {
            checkTime.stop();
        }
    }
    return sound;
}

From source file:org.apache.aurora.scheduler.updater.JobDiff.java

/**
 * Calculates the diff necessary to change the current state of a job to the proposed state.
 *
 * @param taskStore Store to fetch the job's current state from.
 * @param job Job being diffed./*from www.  j av  a2s.  c  o m*/
 * @param proposedState Proposed state to move the job towards.
 * @param scope Instances to limit the diff to.
 * @return A diff of the current state compared with {@code proposedState}, within {@code scope}.
 */
public static JobDiff compute(TaskStore taskStore, IJobKey job, Map<Integer, ITaskConfig> proposedState,
        Set<IRange> scope) {

    Map<Integer, ITaskConfig> currentState = ImmutableMap.copyOf(Maps.transformValues(
            Maps.uniqueIndex(taskStore.fetchTasks(Query.jobScoped(job).active()), Tasks::getInstanceId),
            Tasks::getConfig));

    JobDiff diff = computeUnscoped(currentState, job, proposedState);
    if (scope.isEmpty()) {
        return diff;
    } else {
        Set<Integer> limit = Numbers.rangesToInstanceIds(scope);
        Map<Integer, ITaskConfig> replaced = ImmutableMap
                .copyOf(Maps.filterKeys(diff.getReplacedInstances(), Predicates.in(limit)));
        Set<Integer> replacements = ImmutableSet
                .copyOf(Sets.intersection(diff.getReplacementInstances(), limit));

        Set<Integer> unchangedIds = ImmutableSet.copyOf(Sets.difference(
                ImmutableSet.copyOf(Sets.difference(currentState.keySet(), replaced.keySet())), replacements));
        Map<Integer, ITaskConfig> unchanged = ImmutableMap
                .copyOf(Maps.filterKeys(currentState, Predicates.in(unchangedIds)));

        return new JobDiff(replaced, replacements, unchanged);
    }
}

From source file:com.facebook.presto.hive.HivePartitionManager.java

public HivePartitionResult getPartitions(ConnectorSession session, HiveMetastore metastore,
        ConnectorTableHandle tableHandle, TupleDomain<ColumnHandle> effectivePredicate) {
    HiveTableHandle hiveTableHandle = checkType(tableHandle, HiveTableHandle.class, "tableHandle");
    requireNonNull(effectivePredicate, "effectivePredicate is null");

    if (effectivePredicate.isNone()) {
        return new HivePartitionResult(ImmutableList.of(), TupleDomain.none(), TupleDomain.none());
    }//from ww w .j  a  v a2s  .com

    SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
    Table table = getTable(metastore, tableName);
    Optional<HiveBucketing.HiveBucket> bucket = getHiveBucket(table,
            TupleDomain.extractFixedValues(effectivePredicate).get());

    TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate,
            domainCompactionThreshold);

    if (table.getPartitionKeys().isEmpty()) {
        return new HivePartitionResult(
                ImmutableList.of(new HivePartition(tableName, compactEffectivePredicate, bucket)),
                effectivePredicate, TupleDomain.none());
    }

    List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(connectorId, table);
    List<String> partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns,
            effectivePredicate);

    // do a final pass to filter based on fields that could not be used to filter the partitions
    ImmutableList.Builder<HivePartition> partitions = ImmutableList.builder();
    for (String partitionName : partitionNames) {
        Optional<Map<ColumnHandle, NullableValue>> values = parseValuesAndFilterPartition(partitionName,
                partitionColumns, effectivePredicate);

        if (values.isPresent()) {
            partitions.add(new HivePartition(tableName, compactEffectivePredicate, partitionName, values.get(),
                    bucket));
        }
    }

    // All partition key domains will be fully evaluated, so we don't need to include those
    TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
    TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
    return new HivePartitionResult(partitions.build(), remainingTupleDomain, enforcedTupleDomain);
}

From source file:org.sonar.batch.bootstrap.BatchExtensionDictionnary.java

public <T> Collection<T> sort(Collection<T> extensions) {
    DirectAcyclicGraph dag = new DirectAcyclicGraph();

    for (T extension : extensions) {
        dag.add(extension);/*from w  w  w .  j a  v a 2s .co m*/
        for (Object dependency : getDependencies(extension)) {
            dag.add(extension, dependency);
        }
        for (Object generates : getDependents(extension)) {
            dag.add(generates, extension);
        }
        completePhaseDependencies(dag, extension);
    }
    List sortedList = dag.sort();

    return Collections2.filter(sortedList, Predicates.in(extensions));
}

From source file:eu.lp0.cursus.scoring.scores.impl.TopCountryRacePointsData.java

@Override
protected Map<Pilot, Integer> calculateRacePoints(Race race) {
    Map<Pilot, Integer> racePoints = new HashMap<Pilot, Integer>(scores.getPilots().size() * 2);
    List<Pilot> lapOrder = scores.getLapOrder(race);
    Collection<Pilot> scoredPilots = lazyScoredPilots.get().get(race);

    // Score everyone who completed a lap
    int points = 0;
    for (Pilot pilot : Iterables.filter(lapOrder, Predicates.in(scoredPilots))) {
        racePoints.put(pilot, points);/*from   w w w  .jav  a  2 s.  com*/
        points += (points == 0) ? 2 : 1;
    }

    // Score everyone else
    for (Pilot pilot : scores.getPilots()) {
        if (!scoredPilots.contains(pilot)) {
            racePoints.put(pilot, 0);
        } else if (!racePoints.containsKey(pilot)) {
            racePoints.put(pilot, getPointsForNoLaps(pilot, race));
        }
    }

    return racePoints;
}