List of usage examples for com.google.common.base Predicates in
public static <T> Predicate<T> in(Collection<? extends T> target)
From source file:net.derquinse.bocas.AbstractGuavaCachingBocas.java
@Override protected void putAll(Map<ByteString, MemoryByteSource> entries) { if (alwaysWrite) { bocas.putAll(entries.values());//from w w w . j a v a 2 s. c o m } final Map<K, MemoryByteSource> map = cache.asMap(); final Map<K, MemoryByteSource> notCached = Maps.filterKeys(toInternalEntryMap(entries), Predicates.not(Predicates.in(map.keySet()))); if (notCached.isEmpty()) { return; } if (!alwaysWrite) { bocas.putAll(notCached.values()); } map.putAll(notCached); }
From source file:org.gradle.model.internal.manage.schema.extract.ManagedImplTypeSchemaExtractionStrategySupport.java
public <R> ModelSchemaExtractionResult<R> extract(final ModelSchemaExtractionContext<R> extractionContext, ModelSchemaStore store, final ModelSchemaCache cache) { ModelType<R> type = extractionContext.getType(); Class<? super R> clazz = type.getRawClass(); if (isTarget(type)) { validateType(type, extractionContext); Iterable<Method> methods = Arrays.asList(clazz.getMethods()); if (!clazz.isInterface()) { methods = filterIgnoredMethods(methods); }/* ww w. java 2 s . c o m*/ ImmutableListMultimap<String, Method> methodsByName = Multimaps.index(methods, new Function<Method, String>() { public String apply(Method method) { return method.getName(); } }); ensureNoOverloadedMethods(extractionContext, methodsByName); List<ModelProperty<?>> properties = Lists.newLinkedList(); List<Method> handled = Lists.newArrayListWithCapacity(clazz.getMethods().length); ReturnTypeSpecializationOrdering returnTypeSpecializationOrdering = new ReturnTypeSpecializationOrdering(); for (String methodName : methodsByName.keySet()) { if (methodName.startsWith("get") && !methodName.equals("get")) { ImmutableList<Method> getterMethods = methodsByName.get(methodName); // The overload check earlier verified that all methods for are equivalent for our purposes // So, taking the first one with the most specialized return type is fine. Method sampleMethod = returnTypeSpecializationOrdering.max(getterMethods); boolean abstractGetter = Modifier.isAbstract(sampleMethod.getModifiers()); if (sampleMethod.getParameterTypes().length != 0) { throw invalidMethod(extractionContext, "getter methods cannot take parameters", sampleMethod); } Character getterPropertyNameFirstChar = methodName.charAt(3); if (!Character.isUpperCase(getterPropertyNameFirstChar)) { throw invalidMethod(extractionContext, "the 4th character of the getter method name must be an uppercase character", sampleMethod); } ModelType<?> returnType = ModelType.returnType(sampleMethod); String propertyNameCapitalized = methodName.substring(3); String propertyName = StringUtils.uncapitalize(propertyNameCapitalized); String setterName = "set" + propertyNameCapitalized; ImmutableList<Method> setterMethods = methodsByName.get(setterName); boolean isWritable = !setterMethods.isEmpty(); if (isWritable) { Method setter = setterMethods.get(0); if (!abstractGetter) { throw invalidMethod(extractionContext, "setters are not allowed for non-abstract getters", setter); } validateSetter(extractionContext, returnType, setter); handled.addAll(setterMethods); } if (abstractGetter) { ImmutableSet<ModelType<?>> declaringClasses = ImmutableSet .copyOf(Iterables.transform(getterMethods, new Function<Method, ModelType<?>>() { public ModelType<?> apply(Method input) { return ModelType.of(input.getDeclaringClass()); } })); boolean unmanaged = Iterables.any(getterMethods, new Predicate<Method>() { public boolean apply(Method input) { return input.getAnnotation(Unmanaged.class) != null; } }); properties.add(ModelProperty.of(returnType, propertyName, isWritable, declaringClasses, unmanaged)); } handled.addAll(getterMethods); } } Iterable<Method> notHandled = Iterables.filter(methodsByName.values(), Predicates.not(Predicates.in(handled))); // TODO - should call out valid getters without setters if (!Iterables.isEmpty(notHandled)) { throw invalidMethods(extractionContext, "only paired getter/setter methods are supported", notHandled); } Class<R> concreteClass = type.getConcreteClass(); final ModelSchema<R> schema = createSchema(extractionContext, store, type, properties, concreteClass); Iterable<ModelSchemaExtractionContext<?>> propertyDependencies = Iterables.transform(properties, new Function<ModelProperty<?>, ModelSchemaExtractionContext<?>>() { public ModelSchemaExtractionContext<?> apply(final ModelProperty<?> property) { return toPropertyExtractionContext(extractionContext, property, cache); } }); return new ModelSchemaExtractionResult<R>(schema, propertyDependencies); } else { return null; } }
From source file:com.google.devtools.build.lib.rules.extra.ExtraAction.java
@Nullable @Override//from w w w .j a v a 2 s . c o m public Iterable<Artifact> resolveInputsFromCache(ArtifactResolver artifactResolver, PackageRootResolver resolver, Collection<PathFragment> inputPaths) throws PackageRootResolutionException, InterruptedException { // We update the inputs directly from the shadowed action. Set<PathFragment> extraActionPathFragments = ImmutableSet .copyOf(Artifact.asPathFragments(extraActionInputs)); return shadowedAction.resolveInputsFromCache(artifactResolver, resolver, Collections2.filter(inputPaths, Predicates.in(extraActionPathFragments))); }
From source file:dagger.internal.codegen.ComponentProcessingStep.java
private ImmutableSet<Element> getElementsFromAnnotations( final SetMultimap<Class<? extends Annotation>, Element> elementsByAnnotation, ImmutableSet<? extends Class<? extends Annotation>> annotations) { return ImmutableSet.copyOf(Multimaps.filterKeys(elementsByAnnotation, Predicates.in(annotations)).values()); }
From source file:org.eclipse.sirius.diagram.sequence.ui.tool.internal.edit.validator.FrameCreationValidator.java
private void computeExpanzionZone(Range creationRange) { if (ccdTool instanceof InteractionUseCreationTool) { for (ISequenceEvent parent : localParents) { Range newExpansionZone = new Range(parent.getVerticalRange().getUpperBound() - 1, creationRange.getUpperBound()); expansionZone = expansionZone.union(newExpansionZone); }/* www.ja v a 2s . co m*/ SortedSet<ISequenceEvent> overlapped = Sets.newTreeSet(new RangeComparator()); overlapped.addAll(sequenceEventsInCreationRange.values()); for (ISequenceEvent ise : Iterables.filter(overlapped, Predicates.not(Predicates.in(localParents)))) { int lowerBound = ise.getVerticalRange().getLowerBound(); if (lowerBound >= creationRange.getLowerBound()) { Range newExpansionZone = new Range(lowerBound - 1, creationRange.getUpperBound()); expansionZone = expansionZone.union(newExpansionZone); break; } } } else if (ccdTool instanceof CombinedFragmentCreationTool) { Collection<ISequenceEvent> partialOverlaps = Lists .newArrayList(Iterables.concat(localParents, eventsToShift)); for (ISequenceEvent parent : localParents) { checkOtherLifelines(parent, partialOverlaps); int expansionCut = Math.max(creationRange.getLowerBound(), parent.getVerticalRange().getUpperBound() - 1); Range newExpansionZone = new Range(expansionCut, creationRange.getUpperBound()); expansionZone = expansionZone.union(newExpansionZone); } for (ISequenceEvent eventToShift : eventsToShift) { checkOtherLifelines(eventToShift, partialOverlaps); int expansionCut = Math.max(creationRange.getLowerBound(), eventToShift.getVerticalRange().getLowerBound() - 1); Range newExpansionZone = new Range(expansionCut, creationRange.getUpperBound()); expansionZone = expansionZone.union(newExpansionZone); } } }
From source file:org.eclipse.sirius.diagram.sequence.ui.tool.internal.edit.policy.SequenceInteractionFeedBackBuilder.java
private void feedBackResizedElements(Collection<Figure> feedbacks) { for (ISequenceEvent movedElement : Iterables.filter(validator.getResizedStartMessages(), Predicates.not(Predicates.in(validator.getEventsInError())))) { addFeedBack(movedElement, ISE_FEEDBACK_COLOR, false, feedbacks, validator.getRangeFunction().apply(movedElement)); }//from w ww . j ava 2 s . com for (ISequenceEvent movedElement : Iterables.filter(validator.getResizedEndMessages(), Predicates.not(Predicates.in(validator.getEventsInError())))) { Range feedbackRange = validator.getRangeFunction().apply(movedElement); Range expansionZone = validator.getExpansionZone(); if ((expansionZone != null && !expansionZone.isEmpty()) && feedbackRange.includes(expansionZone.getUpperBound())) { feedbackRange = new Range(feedbackRange.getLowerBound(), feedbackRange.getUpperBound() - expansionZone.width()); } addFeedBack(movedElement, ISE_FEEDBACK_COLOR, false, feedbacks, feedbackRange); } }
From source file:io.prestosql.orc.StripeReader.java
public Stripe readStripe(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage) throws IOException { // read the stripe footer StripeFooter stripeFooter = readStripeFooter(stripe, systemMemoryUsage); List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings(); // get streams for selected columns Map<StreamId, Stream> streams = new HashMap<>(); boolean hasRowGroupDictionary = false; for (Stream stream : stripeFooter.getStreams()) { if (includedOrcColumns.contains(stream.getColumn())) { streams.put(new StreamId(stream), stream); if (stream.getStreamKind() == StreamKind.IN_DICTIONARY) { ColumnEncoding columnEncoding = columnEncodings.get(stream.getColumn()); if (columnEncoding.getColumnEncodingKind() == DICTIONARY) { hasRowGroupDictionary = true; }//from w w w . j a va 2s . co m Optional<List<DwrfSequenceEncoding>> additionalSequenceEncodings = columnEncoding .getAdditionalSequenceEncodings(); if (additionalSequenceEncodings.isPresent() && additionalSequenceEncodings.get().stream() .map(DwrfSequenceEncoding::getValueEncoding) .anyMatch(encoding -> encoding.getColumnEncodingKind() == DICTIONARY)) { hasRowGroupDictionary = true; } } } } // handle stripes with more than one row group or a dictionary boolean invalidCheckPoint = false; if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) { // determine ranges of the stripe to read Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams()); diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet())); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); // read the bloom filter for each column Map<StreamId, List<HiveBloomFilter>> bloomFilterIndexes = readBloomFilterIndexes(streams, streamsData); // read the row index for each column Map<StreamId, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData, bloomFilterIndexes); if (writeValidation.isPresent()) { writeValidation.get().validateRowGroupStatistics(orcDataSource.getId(), stripe.getOffset(), columnIndexes); } // select the row groups matching the tuple domain Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes); // if all row groups are skipped, return null if (selectedRowGroups.isEmpty()) { // set accounted memory usage to zero systemMemoryUsage.close(); return null; } // value streams Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row groups try { List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams, columnIndexes, selectedRowGroups, columnEncodings); return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources); } catch (InvalidCheckpointException e) { // The ORC file contains a corrupt checkpoint stream // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise, // we must fail because the length of the row group dictionary is contained in the checkpoint stream. if (hasRowGroupDictionary) { throw new OrcCorruptionException(e, orcDataSource.getId(), "Checkpoints are corrupt"); } invalidCheckPoint = true; } } // stripe only has one row group and no dictionary ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder(); for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) { StreamId streamId = entry.getKey(); if (streams.keySet().contains(streamId)) { diskRangesBuilder.put(entry); } } ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build(); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); long minAverageRowBytes = 0; for (Entry<StreamId, Stream> entry : streams.entrySet()) { if (entry.getKey().getStreamKind() == ROW_INDEX) { List<RowGroupIndex> rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, streamsData.get(entry.getKey())); checkState(rowGroupIndexes.size() == 1 || invalidCheckPoint, "expect a single row group or an invalid check point"); long totalBytes = 0; long totalRows = 0; for (RowGroupIndex rowGroupIndex : rowGroupIndexes) { ColumnStatistics columnStatistics = rowGroupIndex.getColumnStatistics(); if (columnStatistics.hasMinAverageValueSizeInBytes()) { totalBytes += columnStatistics.getMinAverageValueSizeInBytes() * columnStatistics.getNumberOfValues(); totalRows += columnStatistics.getNumberOfValues(); } } if (totalRows > 0) { minAverageRowBytes += totalBytes / totalRows; } } } // value streams Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row group ImmutableMap.Builder<StreamId, InputStreamSource<?>> builder = ImmutableMap.builder(); for (Entry<StreamId, ValueInputStream<?>> entry : valueStreams.entrySet()) { builder.put(entry.getKey(), new ValueInputStreamSource<>(entry.getValue())); } RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), minAverageRowBytes, new InputStreamSources(builder.build())); return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup), dictionaryStreamSources); }
From source file:org.sosy_lab.cpachecker.cpa.arg.counterexamples.CEXExporter.java
/** * Export an Error Trace in different formats, for example as C-file, dot-file or automaton. * * @param pTargetState state of an ARG, used as fallback, if pCounterexampleInfo contains no targetPath. * @param pCounterexampleInfo contains further information and the (optional) targetPath. * If the targetPath is available, it will be used for the output. * Otherwise we use backwards reachable states from pTargetState. * @param cexIndex should be a unique index for the CEX and will be used to enumerate files. * @param allTargetPathEdges can be used to collect edges. All targetPath-edges are added to it. * @param reallyWriteToDisk enable/disable output to files. */// ww w .ja v a2 s .c om public void exportCounterexample(final ARGState pTargetState, @Nullable final CounterexampleInfo pCounterexampleInfo, int cexIndex, @Nullable final Set<Pair<ARGState, ARGState>> allTargetPathEdges, boolean reallyWriteToDisk) { checkNotNull(pTargetState); final ARGPath targetPath = checkNotNull(getTargetPath(pTargetState, pCounterexampleInfo)); final Set<Pair<ARGState, ARGState>> targetPathEdges = getEdgesOfPath(targetPath); if (allTargetPathEdges != null) { allTargetPathEdges.addAll(targetPathEdges); } if (reallyWriteToDisk && exportErrorPath && pCounterexampleInfo != null) { exportCounterexample(pTargetState, cexIndex, pCounterexampleInfo, targetPath, Predicates.in(targetPathEdges)); } }
From source file:org.grouplens.lenskit.core.RecommenderInstantiator.java
/** * Prune the graph, returning the set of nodes for shareable objects * (objects that will be replaced with instance satisfactions in the * final graph)./*w w w . j a v a2s. c o m*/ * * @param graph The graph to analyze. The graph is not modified. * @return The set of root nodes - nodes that need to be instantiated and * removed. These nodes are in topologically sorted order. */ private LinkedHashSet<Node> getShareableNodes(Graph graph) { LinkedHashSet<Node> shared = new LinkedHashSet<Node>(); List<Node> nodes = graph.sort(graph.getNode(null)); for (Node node : nodes) { if (!GraphtUtils.isShareable(node)) { continue; } // see if we depend on any non-shared nodes // since nodes are sorted, all shared nodes will have been seen Set<Edge> intransient = GraphtUtils.removeTransient(graph.getOutgoingEdges(node)); boolean isShared = Iterables.all(Iterables.transform(intransient, GraphtUtils.edgeTail()), Predicates.in(shared)); if (isShared) { shared.add(node); } } return shared; }
From source file:org.obm.service.contact.VCFtoContactConverter.java
private boolean hasSupportedEmailType(ezvcard.property.Email email) { return email.getTypes().isEmpty() || Iterators.any(email.getTypes().iterator(), Predicates.in(CONSIDERED_EMAIL_TYPES)); }