List of usage examples for com.google.common.base Predicates in
public static <T> Predicate<T> in(Collection<? extends T> target)
From source file:net.automatalib.util.automata.predicates.TransitionPredicates.java
public static <S, I, T> TransitionPredicate<S, I, T> outputIn(TransitionOutput<? super T, ?> transOut, Collection<?> outputs) { return outputSatisfies(transOut, Predicates.in(outputs)); }
From source file:com.google.devtools.build.lib.generatedprojecttest.util.RuleSetUtils.java
/** * Predicate for checking if a rule class is not in excluded. *//*from ww w .j a va 2 s. c om*/ public static Predicate<String> notContainsAnyOf(final ImmutableSet<String> excluded) { return Predicates.not(Predicates.in(excluded)); }
From source file:com.enablens.dfa.datastructures.Node.java
/** * Gets the parent id./*from w ww. jav a2s . co m*/ * * @param topologyName * the topology name * @return the parent id */ public Map<String, String> getParentId() { if (type == NodeType.TOPOLOGY) { return null; } return Maps.filterKeys(data, Predicates.in(type.getParentType().getIdentityAttributes())); }
From source file:org.eclipse.sirius.business.internal.contribution.IncrementalModelContributor.java
/** * Applies all the applicable contributions found in the sources (including * the target model itself) to the target model. * /*from w w w . ja v a2s .c o m*/ * @param targetModel * the model to which the contributions should be applied. * @param contributionSources * source models, in addition to the target model itself, which * may provide contributions. * @return the modified target model, with the contributions applied. */ public EObject apply(EObject targetModel, Collection<? extends EObject> contributionSources) { Collection<EObject> referenceInputs = Lists.newArrayList(contributionSources); currentCopier = new EcoreUtil.Copier(); List<EObject> inputs = Lists.newArrayList(currentCopier.copyAll(referenceInputs)); currentCopier.copyReferences(); Map<EObject, Object> inputIds = Maps.newHashMap(); for (EObject root : referenceInputs) { for (EObject obj : AllContents.of(root, true)) { inputIds.put(currentCopier.get(obj), idFunction.apply(obj)); } } viewpointUris = Maps.newHashMap(); for (Viewpoint originalVP : Iterables.filter(currentCopier.keySet(), Viewpoint.class)) { Option<URI> uri = new ViewpointQuery(originalVP).getViewpointURI(); if (uri.some()) { viewpointUris.put((Viewpoint) currentCopier.get(originalVP), uri.get().toString()); } } EObject result = super.apply(currentCopier.get(targetModel), inputs); postProcess(result); contributions = Maps.newHashMap(Maps.filterKeys(inputIds, Predicates.in(additions))); if (model == null) { model = result; modelIds = Maps.newHashMap(Maps.filterKeys(inputIds, new Predicate<EObject>() { public boolean apply(EObject input) { return input == model || EcoreUtil.isAncestor(model, input); } })); } else { Function<EObject, Object> f = update(result, inputIds); Map<EObject, Object> newIds = Maps.newHashMap(); for (EObject obj : AllContents.of(model, true)) { newIds.put(obj, f.apply(obj)); } modelIds = newIds; } return model; }
From source file:net.automatalib.util.automata.predicates.TransitionPredicates.java
public static <S, I, T> TransitionPredicate<S, I, T> outputNotIn(TransitionOutput<? super T, ?> transOut, Collection<?> outputs) { return outputViolates(transOut, Predicates.in(outputs)); }
From source file:eu.numberfour.n4js.external.ExternalProjectsCollector.java
/** * Sugar for collecting {@link IWorkspace Eclipse workspace} projects that have any direct dependency to any * external projects. Same as {@link #collectProjectsWithDirectExternalDependencies()} but does not considers all * the available projects but only those that are given as the argument. * * @param externalProjects/*from w w w . ja va 2s . c o m*/ * the external projects that has to be considered as a possible dependency of an Eclipse workspace based * project. * @return an iterable of Eclipse workspace projects that has direct dependency to an external project given as the * argument. */ public Iterable<IProject> collectProjectsWithDirectExternalDependencies( final Iterable<? extends IProject> externalProjects) { if (!Platform.isRunning()) { return emptyList(); } final Collection<String> externalIds = from(externalProjects).transform(p -> p.getName()).toSet(); final Predicate<String> externalIdsFilter = Predicates.in(externalIds); return from(asList(getWorkspace().getRoot().getProjects())) .filter(p -> Iterables.any(getDirectExternalDependencyIds(p), externalIdsFilter)); }
From source file:org.sosy_lab.cpachecker.core.algorithm.counterexamplecheck.CounterexampleCheckAlgorithm.java
@Override public AlgorithmStatus run(ReachedSet reached) throws CPAException, InterruptedException { AlgorithmStatus status = AlgorithmStatus.SOUND_AND_PRECISE; while (reached.hasWaitingState()) { status = status.update(algorithm.run(reached)); assert ARGUtils.checkARG(reached); ARGState lastState = (ARGState) reached.getLastState(); final List<ARGState> errorStates; if (lastState != null && lastState.isTarget()) { errorStates = checkedTargetStates.contains(lastState) ? ImmutableList.of() : ImmutableList.of(lastState); } else {//from w ww.j a v a2 s. c om errorStates = from(reached).transform(AbstractStates.toState(ARGState.class)) .filter(AbstractStates.IS_TARGET_STATE) .filter(Predicates.not(Predicates.in(checkedTargetStates))).toList(); } if (errorStates.isEmpty()) { // no errors, so no analysis necessary break; } // check counterexample checkTime.start(); try { List<ARGState> infeasibleErrorPaths = new ArrayList<>(); boolean foundCounterexample = false; for (ARGState errorState : errorStates) { boolean counterexampleProvedFeasible = checkCounterexample(errorState, reached); if (counterexampleProvedFeasible) { checkedTargetStates.add(errorState); foundCounterexample = true; status = status.withPrecise(true); } else { infeasibleErrorPaths.add(errorState); status = status.withSound(false); } } if (foundCounterexample) { break; } else { assert !infeasibleErrorPaths.isEmpty(); throw new InfeasibleCounterexampleException( "Error path found, but identified as infeasible by counterexample check with " + checkerType + ".", from(infeasibleErrorPaths).transform(ARGUtils::getOnePathTo).toList()); } } finally { checkTime.stop(); } } return status; }
From source file:com.splicemachine.orc.StripeReader.java
public Stripe readStripe(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage) throws IOException { // read the stripe footer StripeFooter stripeFooter = readStripeFooter(stripe, systemMemoryUsage); List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings(); // get streams for selected columns Map<StreamId, Stream> streams = new HashMap<>(); boolean hasRowGroupDictionary = false; for (Stream stream : stripeFooter.getStreams()) { if (includedOrcColumns.contains(stream.getColumn())) { streams.put(new StreamId(stream), stream); ColumnEncodingKind columnEncoding = columnEncodings.get(stream.getColumn()).getColumnEncodingKind(); if (columnEncoding == DICTIONARY && stream.getStreamKind() == StreamKind.IN_DICTIONARY) { hasRowGroupDictionary = true; }//from w w w . j a v a 2s . c o m } } // handle stripes with more than one row group or a dictionary if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) { // determine ranges of the stripe to read Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams()); diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet())); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); // read the bloom filter for each column Map<Integer, List<HiveBloomFilter>> bloomFilterIndexes = readBloomFilterIndexes(streams, streamsData); // read the row index for each column Map<Integer, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData, bloomFilterIndexes); // select the row groups matching the tuple domain Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes); // if all row groups are skipped, return null if (selectedRowGroups.isEmpty()) { // set accounted memory usage to zero systemMemoryUsage.close(); return null; } // value streams Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row groups try { List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams, columnIndexes, selectedRowGroups, columnEncodings); return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources); } catch (InvalidCheckpointException e) { // The ORC file contains a corrupt checkpoint stream // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise, // we must fail because the length of the row group dictionary is contained in the checkpoint stream. if (hasRowGroupDictionary) { throw new OrcCorruptionException(e, "ORC file %s has corrupt checkpoints", orcDataSource); } } } // stripe only has one row group and no dictionary ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder(); for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) { StreamId streamId = entry.getKey(); if (streamId.getStreamKind() != ROW_INDEX && streams.keySet().contains(streamId)) { diskRangesBuilder.put(entry); } } ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build(); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); // value streams Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row group ImmutableMap.Builder<StreamId, StreamSource<?>> builder = ImmutableMap.builder(); for (Entry<StreamId, ValueStream<?>> entry : valueStreams.entrySet()) { builder.put(entry.getKey(), new ValueStreamSource<>(entry.getValue())); } RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), new StreamSources(builder.build())); return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup), dictionaryStreamSources); }
From source file:org.apache.whirr.ByonClusterController.java
@Override public Set<? extends NodeMetadata> getNodes(ClusterSpec clusterSpec) throws IOException, InterruptedException { ComputeServiceContext computeServiceContext = getCompute().apply(clusterSpec); ComputeService computeService = computeServiceContext.getComputeService(); return computeService.listNodesDetailsMatching(Predicates.in(computeService.listNodes())); }
From source file:com.google.cloud.trace.sdk.CloudTraceWriter.java
@Override public void writeSpans(List<TraceSpanData> spans) throws CloudTraceException { // Aggregate all the spans by trace. It's more efficient to call the API this way. Map<String, Trace> traces = new HashMap<>(); // Keep track of traces we really want to write out. Set<String> shouldWriteTraces = new HashSet<>(); for (TraceSpanData spanData : spans) { spanData.end();/* w ww .j av a2 s. co m*/ TraceSpan span = convertTraceSpanDataToSpan(spanData); if (spanData.getContext().getShouldWrite()) { shouldWriteTraces.add(spanData.getContext().getTraceId()); } if (!traces.containsKey(spanData.getContext().getTraceId())) { Trace trace = convertTraceSpanDataToTrace(spanData); traces.put(spanData.getContext().getTraceId(), trace); trace.setSpans(new ArrayList<TraceSpan>()); } traces.get(spanData.getContext().getTraceId()).getSpans().add(span); } // Only write out the ones where at least one trace span said to write. traces = Maps.filterKeys(traces, Predicates.in(shouldWriteTraces)); // Write to the API. if (!traces.isEmpty()) { writeTraces(new Traces().setTraces(new ArrayList<Trace>(traces.values()))); } }