Example usage for com.google.common.collect Sets filter

List of usage examples for com.google.common.collect Sets filter

Introduction

In this page you can find the example usage for com.google.common.collect Sets filter.

Prototype

@GwtIncompatible("NavigableSet")
@SuppressWarnings("unchecked")
@CheckReturnValue
public static <E> NavigableSet<E> filter(NavigableSet<E> unfiltered, Predicate<? super E> predicate) 

Source Link

Document

Returns the elements of a NavigableSet , unfiltered , that satisfy a predicate.

Usage

From source file:org.brooth.jeta.apt.processors.MetaScopeProcessor.java

private Set<? extends Element> getScopeEntities(final String scopeClassStr, final boolean isDefaultScope) {
    return Sets.filter(allMetaEntities, new Predicate<Element>() {
        public boolean apply(Element input) {
            final MetaEntity a = input.getAnnotation(MetaEntity.class);
            String scope = MetacodeUtils.extractClassName(new Runnable() {
                public void run() {
                    a.scope();//from ww w .j  a v a 2  s .com
                }
            });

            if (scopeClassStr.equals(scope))
                return true;

            if (isVoid(scope)) {
                if (defaultScopeStr == null)
                    throw new ProcessingException("Scope undefined for '" + input.getSimpleName().toString()
                            + "'. "
                            + "You need to set the scope via @MetaEntity(scope) or define default one as 'inject.scope.default' property");
                if (isDefaultScope)
                    return true;
            }

            return false;
        }
    });
}

From source file:org.atlasapi.equiv.EquivModule.java

@Bean
public EquivalenceUpdater<Content> contentUpdater() {

    Set<Publisher> musicPublishers = ImmutableSet.of(BBC_MUSIC, YOUTUBE, SPOTIFY, SOUNDCLOUD, RDIO, AMAZON_UK);
    Set<Publisher> roviPublishers = ImmutableSet
            .copyOf(Sets.filter(Publisher.all(), new Predicate<Publisher>() {
                @Override//from  ww  w. ja v  a2 s. c  o m
                public boolean apply(Publisher input) {
                    return input.key().endsWith("rovicorp.com");
                }
            }));

    //Generally acceptable publishers.
    ImmutableSet<Publisher> acceptablePublishers = ImmutableSet.copyOf(Sets.difference(Publisher.all(),
            Sets.union(
                    ImmutableSet.of(PREVIEW_NETWORKS, BBC_REDUX, RADIO_TIMES, LOVEFILM, NETFLIX, YOUVIEW,
                            YOUVIEW_STAGE, YOUVIEW_BT, YOUVIEW_BT_STAGE),
                    Sets.union(musicPublishers, roviPublishers))));

    EquivalenceUpdater<Item> standardItemUpdater = standardItemUpdater(
            MoreSets.add(acceptablePublishers, LOVEFILM),
            ImmutableSet.of(new TitleMatchingItemScorer(), new SequenceItemScorer())).build();
    EquivalenceUpdater<Container> topLevelContainerUpdater = topLevelContainerUpdater(
            MoreSets.add(acceptablePublishers, LOVEFILM));

    Set<Publisher> nonStandardPublishers = ImmutableSet.copyOf(Sets.union(
            ImmutableSet.of(ITUNES, BBC_REDUX, RADIO_TIMES, FACEBOOK, LOVEFILM, NETFLIX, RTE, YOUVIEW,
                    YOUVIEW_STAGE, YOUVIEW_BT, YOUVIEW_BT_STAGE, TALK_TALK, PA, BT_VOD, BETTY),
            Sets.union(musicPublishers, roviPublishers)));
    final EquivalenceUpdaters updaters = new EquivalenceUpdaters();
    for (Publisher publisher : Iterables.filter(Publisher.all(), not(in(nonStandardPublishers)))) {
        updaters.register(publisher,
                SourceSpecificEquivalenceUpdater.builder(publisher).withItemUpdater(standardItemUpdater)
                        .withTopLevelContainerUpdater(topLevelContainerUpdater)
                        .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());
    }

    updaters.register(RADIO_TIMES,
            SourceSpecificEquivalenceUpdater.builder(RADIO_TIMES).withItemUpdater(rtItemEquivalenceUpdater())
                    .withTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get())
                    .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    registerYouViewUpdaterForPublisher(YOUVIEW,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW_STAGE)),
                    ImmutableSet.of(YOUVIEW)),
            updaters);

    registerYouViewUpdaterForPublisher(YOUVIEW_STAGE,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW)),
                    ImmutableSet.of(YOUVIEW_STAGE)),
            updaters);

    registerYouViewUpdaterForPublisher(YOUVIEW_BT,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW_BT_STAGE)),
                    ImmutableSet.of(YOUVIEW_BT)),
            updaters);

    registerYouViewUpdaterForPublisher(YOUVIEW_BT_STAGE,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW_BT)),
                    ImmutableSet.of(YOUVIEW_BT_STAGE)),
            updaters);

    registerYouViewUpdaterForPublisher(YOUVIEW_SCOTLAND_RADIO,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW_SCOTLAND_RADIO_STAGE)),
                    ImmutableSet.of(YOUVIEW_SCOTLAND_RADIO)),
            updaters);

    registerYouViewUpdaterForPublisher(YOUVIEW_SCOTLAND_RADIO_STAGE,
            Sets.union(Sets.difference(acceptablePublishers, ImmutableSet.of(YOUVIEW_SCOTLAND_RADIO)),
                    ImmutableSet.of(YOUVIEW_SCOTLAND_RADIO_STAGE)),
            updaters);

    Set<Publisher> reduxPublishers = Sets.union(acceptablePublishers, ImmutableSet.of(BBC_REDUX));

    updaters.register(BBC_REDUX,
            SourceSpecificEquivalenceUpdater.builder(BBC_REDUX)
                    .withItemUpdater(broadcastItemEquivalenceUpdater(reduxPublishers, Score.nullScore(),
                            Predicates.alwaysTrue()))
                    .withTopLevelContainerUpdater(broadcastItemContainerEquivalenceUpdater(reduxPublishers))
                    .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    updaters.register(BETTY, SourceSpecificEquivalenceUpdater.builder(BETTY)
            .withItemUpdater(aliasIdentifiedBroadcastItemEquivalenceUpdater(ImmutableSet.of(BETTY, YOUVIEW)))
            .withTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get())
            .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    Set<Publisher> facebookAcceptablePublishers = Sets.union(acceptablePublishers, ImmutableSet.of(FACEBOOK));
    updaters.register(FACEBOOK, SourceSpecificEquivalenceUpdater.builder(FACEBOOK)
            .withItemUpdater(NullEquivalenceUpdater.<Item>get())
            .withTopLevelContainerUpdater(facebookContainerEquivalenceUpdater(facebookAcceptablePublishers))
            .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    updaters.register(ITUNES,
            SourceSpecificEquivalenceUpdater.builder(ITUNES)
                    .withItemUpdater(vodItemUpdater(acceptablePublishers).build())
                    .withTopLevelContainerUpdater(vodContainerUpdater(acceptablePublishers))
                    .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    Set<Publisher> lfPublishers = Sets.union(acceptablePublishers, ImmutableSet.of(LOVEFILM));
    updaters.register(LOVEFILM, SourceSpecificEquivalenceUpdater.builder(LOVEFILM)
            .withItemUpdater(vodItemUpdater(lfPublishers).withScorer(new SeriesSequenceItemScorer()).build())
            .withTopLevelContainerUpdater(vodContainerUpdater(lfPublishers))
            .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    Set<Publisher> netflixPublishers = ImmutableSet.of(BBC, NETFLIX);
    updaters.register(NETFLIX,
            SourceSpecificEquivalenceUpdater.builder(NETFLIX)
                    .withItemUpdater(vodItemUpdater(netflixPublishers).build())
                    .withTopLevelContainerUpdater(vodContainerUpdater(netflixPublishers))
                    .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    Set<Publisher> rtePublishers = ImmutableSet.of(PA);
    updaters.register(RTE,
            SourceSpecificEquivalenceUpdater.builder(RTE)
                    .withTopLevelContainerUpdater(vodContainerUpdater(rtePublishers))
                    .withItemUpdater(NullEquivalenceUpdater.<Item>get())
                    .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());

    updaters.register(TALK_TALK,
            SourceSpecificEquivalenceUpdater.builder(TALK_TALK)
                    .withItemUpdater(vodItemUpdater(acceptablePublishers).build())
                    .withTopLevelContainerUpdater(vodContainerUpdater(acceptablePublishers))
                    .withNonTopLevelContainerUpdater(vodSeriesUpdater(acceptablePublishers)).build());

    Set<Publisher> btVodPublishers = ImmutableSet.of(PA);
    updaters.register(BT_VOD, SourceSpecificEquivalenceUpdater.builder(BT_VOD)
            .withItemUpdater(vodItemUpdater(btVodPublishers).withScorer(new SeriesSequenceItemScorer()).build())
            .withTopLevelContainerUpdater(vodContainerUpdater(btVodPublishers))
            .withNonTopLevelContainerUpdater(vodSeriesUpdater(btVodPublishers)).build());

    Set<Publisher> itunesAndMusicPublishers = Sets.union(musicPublishers, ImmutableSet.of(ITUNES));
    ContentEquivalenceUpdater<Item> muiscPublisherUpdater = ContentEquivalenceUpdater.<Item>builder()
            .withGenerator(new TitleSearchGenerator<Item>(searchResolver, Song.class, itunesAndMusicPublishers,
                    new SongTitleTransform(), 100))
            .withScorer(new CrewMemberScorer(new SongCrewMemberExtractor()))
            .withCombiner(new NullScoreAwareAveragingCombiner<Item>()).withFilter(AlwaysTrueFilter.<Item>get())
            .withExtractor(new MusicEquivalenceExtractor())
            .withHandler(new BroadcastingEquivalenceResultHandler<Item>(ImmutableList.of(
                    EpisodeFilteringEquivalenceResultHandler.relaxed(
                            new LookupWritingEquivalenceHandler<Item>(lookupWriter, itunesAndMusicPublishers),
                            equivSummaryStore),
                    new ResultWritingEquivalenceHandler<Item>(equivalenceResultStore()),
                    new EquivalenceSummaryWritingHandler<Item>(equivSummaryStore))))
            .build();

    for (Publisher publisher : musicPublishers) {
        updaters.register(publisher,
                SourceSpecificEquivalenceUpdater.builder(publisher).withItemUpdater(muiscPublisherUpdater)
                        .withTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get())
                        .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get()).build());
    }

    ImmutableSet<Publisher> roviMatchPublishers = ImmutableSet.of(Publisher.BBC, Publisher.PA,
            Publisher.YOUVIEW, Publisher.BBC_NITRO, Publisher.BBC_REDUX, Publisher.ITV, Publisher.C4,
            Publisher.C4_PMLSD, Publisher.C4_PMLSD_P06, Publisher.FIVE);
    updaters.register(Publisher.ROVI_EN_GB, roviUpdater(Publisher.ROVI_EN_GB, roviMatchPublishers));
    updaters.register(Publisher.ROVI_EN_US, roviUpdater(Publisher.ROVI_EN_US, roviMatchPublishers));

    return updaters;
}

From source file:org.estatio.dom.agreement.Agreement.java

public String validateNewRole(final AgreementRoleType art, final Party newParty, final LocalDate startDate,
        final LocalDate endDate) {

    Party currentParty = findCurrentOrMostRecentParty(art);
    if (currentParty != null
            && !Objects.equal(currentParty.getApplicationTenancy(), newParty.getApplicationTenancy())) {
        return "The application level of the new party must be the same as that of the current party";
    }/*from  www  .ja  va2 s  .  c  om*/

    if (startDate != null && endDate != null && startDate.isAfter(endDate)) {
        return "End date cannot be earlier than start date";
    }
    if (!Sets.filter(getRoles(), art.matchingRole()).isEmpty()) {
        return "Add a successor/predecessor to existing agreement role";
    }
    return null;
}

From source file:com.google.devtools.build.android.ziputils.SplitZip.java

/**
 * Parses the entries and assign each entry to an output file.
 *//*  w ww  .j  a v  a2  s .c  om*/
private void split() {
    for (ZipIn in : inputs) {
        CentralDirectory cdir = centralDirectories.get(in.getFilename());
        for (DirectoryEntry entry : cdir.list()) {
            String filename = normalizedFilename(entry.getFilename());
            if (!inputFilter.apply(filename)) {
                continue;
            }
            if (filename.endsWith(".class")) {
                // Only pass classes to the splitter, so that it can do the best job
                // possible distributing them across output files.
                classes.add(filename);
            } else if (!filename.endsWith("/")) {
                // Non class files (resources) are either assigned to the first
                // output file, or to a specified resource output file.
                assignments.put(filename, resourceOut);
            }
        }
    }
    Splitter splitter = new Splitter(outputs.size(), classes.size());
    if (filter != null) {
        // Assign files in the filter to the first output file.
        splitter.assign(Sets.filter(filter, inputFilter));
        splitter.nextShard(); // minimal initial shard
    }
    for (String path : classes) {
        // Use normalized filename so the filter file doesn't have to change
        int assignment = splitter.assign(path);
        Preconditions.checkState(assignment >= 0 && assignment < zipOuts.length);
        assignments.put(path, zipOuts[assignment]);
    }
}

From source file:com.kolich.curacao.mappers.MapperTable.java

private final ImmutableMap<Class<?>, ControllerReturnTypeMapper<?>> buildReturnTypeMapperTable(
        final Set<Class<?>> mapperSet) {
    // Using a LinkedHashMap internally because insertion order is
    // very important in this case.
    final Map<Class<?>, ControllerReturnTypeMapper<?>> mappers = Maps.newLinkedHashMap(); // Preserves insertion order.
    // Filter the incoming mapper set to only return type mappers.
    final Set<Class<?>> filtered = Sets.filter(mapperSet,
            Predicates.assignableFrom(ControllerReturnTypeMapper.class));
    logger__.debug("Found {} return type mappers annotated with @{}", filtered.size(), MAPPER_ANNOTATION_SN);
    // For each discovered mapper class...
    for (final Class<?> mapper : filtered) {
        logger__.debug("Found @{}: return type mapper {}", MAPPER_ANNOTATION_SN, mapper.getCanonicalName());
        try {/*from  ww w. j  a va 2  s.  c  om*/
            // Locate a single constructor worthy of injecting with
            // components, if any.  May be null.
            final Constructor<?> ctor = getInjectableConstructor(mapper);
            ControllerReturnTypeMapper<?> instance = null;
            if (ctor == null) {
                // Class.newInstance() is evil, so we do the ~right~ thing
                // here to instantiate a new instance of the mapper using
                // the preferred getConstructor() idiom.
                instance = (ControllerReturnTypeMapper<?>) mapper.getConstructor().newInstance();
            } else {
                final Class<?>[] types = ctor.getParameterTypes();
                final Object[] params = new Object[types.length];
                for (int i = 0, l = types.length; i < l; i++) {
                    params[i] = componentTable_.getComponentForType(types[i]);
                }
                instance = (ControllerReturnTypeMapper<?>) ctor.newInstance(params);
            }
            // Note the key in the map is the parameterized generic type
            // hanging off the mapper.
            mappers.put(getGenericType(mapper), instance);
        } catch (Exception e) {
            logger__.error("Failed to instantiate mapper instance: {}", mapper.getCanonicalName(), e);
        }
    }
    // Add the "default" mappers to the ~end~ of the linked hash map, being
    // careful not to overwrite any user-defined mappers.  That is, if a
    // user has declared their own mappers for one of our default types,
    // we should not blindly "putAll" and overwrite them.
    // <https://github.com/markkolich/curacao/issues/9>
    for (final Map.Entry<Class<?>, ControllerReturnTypeMapper<?>> entry : defaultReturnTypeMappers__
            .entrySet()) {
        // Only add the default mapper if a user-defined one does not exist.
        if (!mappers.containsKey(entry.getKey())) {
            mappers.put(entry.getKey(), entry.getValue());
        }
    }
    return ImmutableMap.copyOf(mappers);
}

From source file:org.apache.hadoop.hive.druid.DruidStorageHandler.java

public void publishSegments(Table table, boolean overwrite) throws MetaException {
    if (MetaStoreUtils.isExternalTable(table)) {
        return;/*from  ww  w . j  a  v  a2 s .  co m*/
    }
    Lifecycle lifecycle = new Lifecycle();
    LOG.info("Committing table {} to the druid metastore", table.getDbName());
    final Path tableDir = getSegmentDescriptorDir();
    try {
        List<DataSegment> segmentList = DruidStorageHandlerUtils.getPublishedSegments(tableDir, getConf());
        LOG.info("Found {} segments under path {}", segmentList.size(), tableDir);
        final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE);
        final String segmentDirectory = table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY) != null
                ? table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY)
                : HiveConf.getVar(getConf(), HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY);
        DruidStorageHandlerUtils.publishSegments(connector, druidMetadataStorageTablesConfig, dataSourceName,
                segmentList, overwrite, segmentDirectory, getConf()

        );
        final String coordinatorAddress = HiveConf.getVar(getConf(),
                HiveConf.ConfVars.HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS);
        int maxTries = HiveConf.getIntVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_MAX_TRIES);
        LOG.info("checking load status from coordinator {}", coordinatorAddress);

        String coordinatorResponse = null;
        try {
            coordinatorResponse = RetryUtils.retry(new Callable<String>() {
                @Override
                public String call() throws Exception {
                    return DruidStorageHandlerUtils.getURL(getHttpClient(),
                            new URL(String.format("http://%s/status", coordinatorAddress)));
                }
            }, new Predicate<Throwable>() {
                @Override
                public boolean apply(@Nullable Throwable input) {
                    return input instanceof IOException;
                }
            }, maxTries);
        } catch (Exception e) {
            console.printInfo("Will skip waiting for data loading");
            return;
        }
        if (Strings.isNullOrEmpty(coordinatorResponse)) {
            console.printInfo("Will skip waiting for data loading");
            return;
        }
        console.printInfo(String.format("Waiting for the loading of [%s] segments", segmentList.size()));
        long passiveWaitTimeMs = HiveConf.getLongVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_PASSIVE_WAIT_TIME);
        ImmutableSet<URL> setOfUrls = FluentIterable.from(segmentList)
                .transform(new Function<DataSegment, URL>() {
                    @Override
                    public URL apply(DataSegment dataSegment) {
                        try {
                            //Need to make sure that we are using UTC since most of the druid cluster use UTC by default
                            return new URL(
                                    String.format("http://%s/druid/coordinator/v1/datasources/%s/segments/%s",
                                            coordinatorAddress, dataSourceName,
                                            DataSegment.makeDataSegmentIdentifier(dataSegment.getDataSource(),
                                                    new DateTime(dataSegment.getInterval().getStartMillis(),
                                                            DateTimeZone.UTC),
                                                    new DateTime(dataSegment.getInterval().getEndMillis(),
                                                            DateTimeZone.UTC),
                                                    dataSegment.getVersion(), dataSegment.getShardSpec())));
                        } catch (MalformedURLException e) {
                            Throwables.propagate(e);
                        }
                        return null;
                    }
                }).toSet();

        int numRetries = 0;
        while (numRetries++ < maxTries && !setOfUrls.isEmpty()) {
            setOfUrls = ImmutableSet.copyOf(Sets.filter(setOfUrls, new Predicate<URL>() {
                @Override
                public boolean apply(URL input) {
                    try {
                        String result = DruidStorageHandlerUtils.getURL(getHttpClient(), input);
                        LOG.debug("Checking segment {} response is {}", input, result);
                        return Strings.isNullOrEmpty(result);
                    } catch (IOException e) {
                        LOG.error(String.format("Error while checking URL [%s]", input), e);
                        return true;
                    }
                }
            }));

            try {
                if (!setOfUrls.isEmpty()) {
                    Thread.sleep(passiveWaitTimeMs);
                }
            } catch (InterruptedException e) {
                Thread.interrupted();
                Throwables.propagate(e);
            }
        }
        if (!setOfUrls.isEmpty()) {
            // We are not Throwing an exception since it might be a transient issue that is blocking loading
            console.printError(
                    String.format("Wait time exhausted and we have [%s] out of [%s] segments not loaded yet",
                            setOfUrls.size(), segmentList.size()));
        }
    } catch (IOException e) {
        LOG.error("Exception while commit", e);
        Throwables.propagate(e);
    } finally {
        cleanWorkingDir();
        lifecycle.stop();
    }
}

From source file:org.estatio.dom.agreement.AgreementRole.java

public String validateAddCommunicationChannel(final AgreementRoleCommunicationChannelType type,
        final CommunicationChannel communicationChannel, final LocalDate startDate, final LocalDate endDate) {
    if (startDate != null && endDate != null && startDate.isAfter(endDate)) {
        return "End date cannot be earlier than start date";
    }/*from   w w w  .  ja  v  a2s  .  c  o  m*/
    if (!Sets.filter(getCommunicationChannels(), type.matchingCommunicationChannel()).isEmpty()) {
        return "Add a successor/predecessor from existing communication channel";
    }
    final SortedSet<CommunicationChannel> partyChannels = communicationChannelContributions
            .communicationChannels(getParty());
    if (!partyChannels.contains(communicationChannel)) {
        return "Communication channel must be one of those of this party";
    }
    return null;
}

From source file:com.axelor.db.JPA.java

/**
 * Return all the non-abstract models found in all the activated modules.
 * /*from w w w .j a v a 2s.c o  m*/
 * @return Set of model classes
 */
public static Set<Class<?>> models() {

    return Sets.filter(JpaScanner.findModels(), new Predicate<Class<?>>() {

        @Override
        public boolean apply(Class<?> input) {
            return !Modifier.isAbstract(input.getModifiers());
        }
    });
}

From source file:module.siadap.domain.util.SiadapProcessCounter.java

public static Set<Siadap> getSiadapsInState(final int year, final SiadapProcessStateEnum... states) {

    HashSet<Siadap> allSiadapsInGivenStates = new HashSet<Siadap>();

    allSiadapsInGivenStates//from   ww  w  . jav a 2s .  c om
            .addAll(Sets.filter(SiadapRootModule.getInstance().getSiadapsSet(), new Predicate<Siadap>() {

                @Override
                public boolean apply(Siadap siadapInstance) {
                    if (siadapInstance == null) {
                        return false;
                    }
                    if (siadapInstance.getYear() != year) {
                        return false;
                    }
                    for (SiadapProcessStateEnum state : states) {
                        if (siadapInstance.getState().equals(state)) {
                            return true;
                        }
                    }
                    return false;
                }
            }));

    return allSiadapsInGivenStates;

}

From source file:org.caleydo.view.search.internal.RcpSearchView.java

/**
 * implements the search logic/*from www  .  j a  va  2 s . c  o m*/
 *
 * @param query
 * @param regexSearch
 * @param caseSensitive
 * @return a table containing all matching id types and their matching ids
 */
private com.google.common.collect.Table<IDCategory, IDType, Set<?>> searchImpl(final Pattern pattern) {
    Predicate<Object> searchQuery = new Predicate<Object>() {
        @Override
        public boolean apply(Object in) {
            return in != null && pattern.matcher(in.toString()).matches();
        }
    };

    com.google.common.collect.Table<IDCategory, IDType, Set<?>> result = HashBasedTable.create();

    for (Button b : searchWithinIDType) {
        if (!b.getSelection()) // not selected skip
            continue;

        IDType idType = (IDType) b.getData();

        // find all ids and check the predicate
        IDMappingManager mappingManager = IDMappingManagerRegistry.get()
                .getIDMappingManager(idType.getIDCategory());

        Set<?> ids = mappingManager.getAllMappedIDs(idType);
        ids = new HashSet<>(Sets.filter(ids, searchQuery));
        if (ids.isEmpty())
            continue;
        result.put(idType.getIDCategory(), idType, ids);
    }
    return result;
}