Example usage for com.google.common.collect Maps filterKeys

List of usage examples for com.google.common.collect Maps filterKeys

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterKeys.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered whose keys satisfy a predicate.

Usage

From source file:eu.numberfour.n4js.ui.containers.NfarStorageMapper.java

private void updateCache(IN4JSEclipseProject project) {
    Set<URI> libArchives = knownLibArchives.get(project.getLocation());
    if (libArchives != null) {
        Map<URI, Set<URI>> filteredMap = Maps.filterKeys(knownLibArchives,
                Predicates.not(Predicates.equalTo(project.getLocation())));
        Set<URI> remainingLibs = Sets.newHashSet(Iterables.concat(filteredMap.values()));
        for (URI archive : libArchives) {
            if (!remainingLibs.contains(archive)) {
                knownEntries.remove(archive);
            }//from ww w.  j a v  a  2  s .  c om
        }
    }
    if (project.exists()) {
        libArchives = Sets.newHashSetWithExpectedSize(3);
        List<? extends IN4JSArchive> libraries = project.getLibraries();
        for (IN4JSArchive archive : libraries) {
            URI location = archive.getLocation();
            libArchives.add(location);
            if (!knownEntries.containsKey(location)) {
                Set<URI> entryURIs = Sets.newHashSet();
                traverseArchive(archive, entryURIs);
                knownEntries.put(location, Collections.unmodifiableSet(entryURIs));
            }
        }
        knownLibArchives.put(project.getLocation(), libArchives);
    } else {
        knownLibArchives.remove(project.getLocation());
    }
}

From source file:org.diqube.execution.steps.GroupIdAdjustingStep.java

private void execute(boolean checkIfDone) {
    if (!incomingGroupIdToValues.isEmpty()) {
        incomingGroupIdToValues.keySet().removeAll(allKnownGroupIds);
        List<Long> newGroupIds = new ArrayList<>();

        List<Long> incomingGroupIds = new ArrayList<Long>(
                Sets.difference(incomingGroupIdToValues.keySet(), allKnownGroupIds));
        incomingGroupIdToValues.keySet().removeAll(allKnownGroupIds);
        List<Long> groupIdsWorkedOn = new ArrayList<Long>();
        for (Long groupId : incomingGroupIds) {
            Map<String, Object> values = incomingGroupIdToValues.get(groupId);
            if (Sets.difference(groupedColumnNames, values.keySet()).isEmpty()) {
                values = Maps.filterKeys(new HashMap<String, Object>(values),
                        colName -> groupedColumnNames.contains(colName));
                if (valuesToGroupId.containsKey(values)) {
                    // we found a new groupId mapping!
                    long availableGroupId = valuesToGroupId.get(values);
                    groupIdMap.put(groupId, availableGroupId);
                    logger.trace("Mapping new group ID {} to group ID {}", groupId, availableGroupId);
                } else {
                    // new group found
                    valuesToGroupId.put(values, groupId);
                    groupIdMap.put(groupId, groupId);
                    newGroupIds.add(groupId);
                    logger.trace("Found new group ID {}", groupId);
                }// w  w w . j ava2 s .c  om
                groupIdsWorkedOn.add(groupId);
            }
        }
        for (Long groupIdDone : groupIdsWorkedOn) {
            incomingGroupIdToValues.remove(groupIdDone);
            allKnownGroupIds.add(groupIdDone);
        }

        if (!newGroupIds.isEmpty())
            forEachOutputConsumerOfType(RowIdConsumer.class,
                    c -> c.consume(newGroupIds.stream().toArray(l -> new Long[l])));
    }

    processIncomingGroupIntermediaries();

    if (checkIfDone) {
        if ((groupInputIsDone.get() && isEmpty(incomingGroupIntermediaries)) || // all groups processed.
        // all inputs done, we though might not have processed everything yet.
                (groupInputIsDone.get() && columnValueSourceIsDone.get())) {

            if (groupInputIsDone.get() && columnValueSourceIsDone.get())
                // make sure we have processed everything, so lets execute one additional time.
                execute(false);

            forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
            doneProcessing();
        }
    }
}

From source file:org.jfrog.hudson.release.UnifiedPromoteBuildAction.java

/**
 * Form submission is calling this method
 *//*from  w  w  w.j  av a 2s  .com*/
@SuppressWarnings({ "UnusedDeclaration" })
public void doSubmit(StaplerRequest req, StaplerResponse resp) throws IOException, ServletException {
    getACL().checkPermission(getPermission());

    bindParameters(req);
    // current user is bound to the thread and will be lost in the perform method
    User user = User.current();
    String ciUser = (user == null) ? "anonymous" : user.getId();

    JSONObject formData = req.getSubmittedForm();
    if (formData.has("promotionPlugin")) {
        JSONObject pluginSettings = formData.getJSONObject("promotionPlugin");
        if (pluginSettings.has("pluginName")) {
            String pluginName = pluginSettings.getString("pluginName");
            if (!UserPluginInfo.NO_PLUGIN_KEY.equals(pluginName)) {
                PluginSettings settings = new PluginSettings();
                Map<String, String> paramMap = Maps.newHashMap();
                settings.setPluginName(pluginName);
                Map<String, Object> filteredPluginSettings = Maps.filterKeys(pluginSettings,
                        new Predicate<String>() {
                            public boolean apply(String input) {
                                return StringUtils.isNotBlank(input) && !"pluginName".equals(input);
                            }
                        });
                for (Map.Entry<String, Object> settingsEntry : filteredPluginSettings.entrySet()) {
                    String key = settingsEntry.getKey();
                    paramMap.put(key, pluginSettings.getString(key));
                }
                paramMap.put("ciUser", ciUser);
                if (!paramMap.isEmpty()) {
                    settings.setParamMap(paramMap);
                }
                setPromotionPlugin(settings);
            }
        }
    }

    ArtifactoryServer server = configurator.getArtifactoryServer();

    new PromoteWorkerThread(server, CredentialManager.getPreferredDeployer(configurator, server), ciUser)
            .start();

    resp.sendRedirect(".");
}

From source file:com.mgmtp.jfunk.common.util.Configuration.java

/**
 * If properties are present which start with {@link JFunkConstants#SYSTEM_PROPERTIES} the
 * corresponding values are taken as property files and loaded here.
 *///from   www  .j  a va  2 s . co m
private void loadExtraFiles(final String filterPrefix, final boolean preserveExisting) {
    Map<String, String> view = Maps.filterKeys(this, Predicates.startsWith(filterPrefix));
    while (true) {
        if (view.isEmpty()) {
            break;
        }

        Queue<String> fileKeys = Queues.newArrayDeque(view.values());

        // we need to keep them separately in order to be able to reload them (see put method)
        extraFileProperties.addAll(fileKeys);

        // Remove original keys in order to prevent a stack overflow
        view.clear();

        for (String fileNameKey = null; (fileNameKey = fileKeys.poll()) != null;) {
            // Recursion
            String fileName = processPropertyValue(fileNameKey);
            if (PLACEHOLDER_PATTERN.matcher(fileName).find()) {
                // not all placeholders were resolved, so we enqueue it again to process another file first
                fileKeys.offer(fileName);
            } else {
                load(fileName, preserveExisting);
            }
        }
    }
}

From source file:io.prestosql.plugin.hive.HivePartitionManager.java

public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore,
        ConnectorTableHandle tableHandle, Constraint<ColumnHandle> constraint) {
    HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
    TupleDomain<ColumnHandle> effectivePredicate = constraint.getSummary();

    SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
    Table table = getTable(metastore, tableName);
    Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(table);

    List<HiveColumnHandle> partitionColumns = getPartitionKeyColumnHandles(table);

    if (effectivePredicate.isNone()) {
        return new HivePartitionResult(partitionColumns, ImmutableList.of(), none(), none(), none(),
                hiveBucketHandle, Optional.empty());
    }//  w  w w. j ava 2  s  . co  m

    Optional<HiveBucketFilter> bucketFilter = getHiveBucketFilter(table, effectivePredicate);
    TupleDomain<HiveColumnHandle> compactEffectivePredicate = toCompactTupleDomain(effectivePredicate,
            domainCompactionThreshold);

    if (partitionColumns.isEmpty()) {
        return new HivePartitionResult(partitionColumns, ImmutableList.of(new HivePartition(tableName)),
                compactEffectivePredicate, effectivePredicate, none(), hiveBucketHandle, bucketFilter);
    }

    List<Type> partitionTypes = partitionColumns.stream()
            .map(column -> typeManager.getType(column.getTypeSignature())).collect(toList());

    List<String> partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns,
            effectivePredicate);

    Iterable<HivePartition> partitionsIterable = () -> partitionNames.stream()
            // Apply extra filters which could not be done by getFilteredPartitionNames
            .map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns,
                    partitionTypes, constraint))
            .filter(Optional::isPresent).map(Optional::get).iterator();

    // All partition key domains will be fully evaluated, so we don't need to include those
    TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), not(Predicates.in(partitionColumns))));
    TupleDomain<ColumnHandle> enforcedTupleDomain = TupleDomain.withColumnDomains(
            Maps.filterKeys(effectivePredicate.getDomains().get(), Predicates.in(partitionColumns)));
    return new HivePartitionResult(partitionColumns, partitionsIterable, compactEffectivePredicate,
            remainingTupleDomain, enforcedTupleDomain, hiveBucketHandle, bucketFilter);
}

From source file:org.diqube.execution.steps.FilterRequestedColumnsAndActiveRowIdsStep.java

/**
 * Filters all values of all columns that have one of the specified rowIds and informs {@link ColumnValueConsumer}s
 * about them.//from  w  w  w  .  j ava 2  s .  c  om
 */
private void processValues(Map<String, Map<Long, Object>> values, Set<Long> rowIds) {
    for (Entry<String, Map<Long, Object>> valueEntry : values.entrySet()) {
        Set<Long> activeValueRowIds = Sets.intersection(valueEntry.getValue().keySet(), rowIds);
        if (!activeValueRowIds.isEmpty()) {
            Map<Long, Object> newValues = Maps.filterKeys(valueEntry.getValue(),
                    rowId -> activeValueRowIds.contains(rowId));

            logger.trace("Sending out values for {}, rowIds (limit) {}", valueEntry.getKey(),
                    Iterables.limit(activeValueRowIds, 100));

            forEachOutputConsumerOfType(ColumnValueConsumer.class,
                    c -> c.consume(valueEntry.getKey(), newValues));
        }
    }
}

From source file:net.derquinse.bocas.AbstractGuavaCachingBocas.java

@Override
protected void putAll(Map<ByteString, MemoryByteSource> entries) {
    if (alwaysWrite) {
        bocas.putAll(entries.values());/*from ww w. j  a va  2s. co  m*/
    }
    final Map<K, MemoryByteSource> map = cache.asMap();
    final Map<K, MemoryByteSource> notCached = Maps.filterKeys(toInternalEntryMap(entries),
            Predicates.not(Predicates.in(map.keySet())));
    if (notCached.isEmpty()) {
        return;
    }
    if (!alwaysWrite) {
        bocas.putAll(notCached.values());
    }
    map.putAll(notCached);
}

From source file:com.google.devtools.build.skyframe.SkyFunctionEnvironment.java

private Map<SkyKey, ? extends NodeEntry> batchPrefetch(SkyKey requestor, GroupedList<SkyKey> depKeys,
        Set<SkyKey> oldDeps, boolean assertDone, SkyKey keyForDebugging) throws InterruptedException {
    Iterable<SkyKey> depKeysAsIterable = Iterables.concat(depKeys);
    Iterable<SkyKey> keysToPrefetch = depKeysAsIterable;
    if (PREFETCH_OLD_DEPS) {
        ImmutableSet.Builder<SkyKey> keysToPrefetchBuilder = ImmutableSet.builder();
        keysToPrefetchBuilder.addAll(depKeysAsIterable).addAll(oldDeps);
        keysToPrefetch = keysToPrefetchBuilder.build();
    }/*  ww  w .  j av a 2s  .  c o m*/
    Map<SkyKey, ? extends NodeEntry> batchMap = evaluatorContext.getBatchValues(requestor, Reason.PREFETCH,
            keysToPrefetch);
    if (PREFETCH_OLD_DEPS) {
        batchMap = ImmutableMap.<SkyKey, NodeEntry>copyOf(
                Maps.filterKeys(batchMap, Predicates.in(ImmutableSet.copyOf(depKeysAsIterable))));
    }
    if (batchMap.size() != depKeys.numElements()) {
        throw new IllegalStateException("Missing keys for " + keyForDebugging + ": "
                + Sets.difference(depKeys.toSet(), batchMap.keySet()));
    }
    if (assertDone) {
        for (Map.Entry<SkyKey, ? extends NodeEntry> entry : batchMap.entrySet()) {
            Preconditions.checkState(entry.getValue().isDone(), "%s had not done %s", keyForDebugging, entry);
        }
    }
    return batchMap;
}

From source file:org.apache.beam.runners.core.construction.graph.GreedyPCollectionFusers.java

/**
 * A ParDo can be fused into a stage if it executes in the same Environment as that stage, and no
 * transform that are upstream of any of its side input are present in that stage.
 *
 * <p>A ParDo that consumes a side input cannot process an element until all of the side inputs
 * contain data for the side input window that contains the element.
 *///from w  w  w .  jav a2s.c  o  m
private static boolean canFuseParDo(PTransformNode parDo, Environment environment, PCollectionNode candidate,
        Collection<PCollectionNode> stagePCollections, QueryablePipeline pipeline) {
    Optional<Environment> env = pipeline.getEnvironment(parDo);
    checkArgument(env.isPresent(), "A %s must have an %s associated with it",
            ParDoPayload.class.getSimpleName(), Environment.class.getSimpleName());
    if (!env.get().equals(environment)) {
        // The PCollection's producer and this ParDo execute in different environments, so fusion
        // is never possible.
        return false;
    }
    try {
        ParDoPayload payload = ParDoPayload.parseFrom(parDo.getTransform().getSpec().getPayload());
        if (Maps.filterKeys(parDo.getTransform().getInputsMap(), s -> payload.getTimerSpecsMap().containsKey(s))
                .values().contains(candidate.getId())) {
            // Allow fusion across timer PCollections because they are a self loop.
            return true;
        } else if (payload.getStateSpecsCount() > 0 || payload.getTimerSpecsCount() > 0) {
            // Inputs to a ParDo that uses State or Timers must be key-partitioned, and elements for
            // a key must execute serially. To avoid checking if the rest of the stage is
            // key-partitioned and preserves keys, these ParDos do not fuse into an existing stage.
            return false;
        } else if (!pipeline.getSideInputs(parDo).isEmpty()) {
            // At execution time, a Runner is required to only provide inputs to a PTransform that, at
            // the time the PTransform processes them, the associated window is ready in all side inputs
            // that the PTransform consumes. For an arbitrary stage, it is significantly complex for the
            // runner to determine this for each input. As a result, we break fusion to simplify this
            // inspection. In general, a ParDo which consumes side inputs cannot be fused into an
            // executable stage alongside any transforms which are upstream of any of its side inputs.
            return false;
        }
    } catch (InvalidProtocolBufferException e) {
        throw new IllegalArgumentException(e);
    }
    return true;
}

From source file:com.isotrol.impe3.web20.impl.MigrationServiceImpl.java

private CommunityEntity fillCommunity(CommunityEntity entity, CommunityDTO dto) {
    final Calendar date = Calendar.getInstance();
    date.setTime(dto.getDate());/*from  w  ww  .j  av a  2s.c om*/
    entity.setDate(date);
    entity.setDescription(dto.getDescription());
    entity.setCode(dto.getCode());
    entity.setName(dto.getName());

    final Map<String, String> properties = entity.getProperties();
    properties.clear();
    final Map<String, String> dtopr = dto.getProperties();
    if (dtopr != null) {
        properties.putAll(Maps.filterKeys(Maps.filterValues(dtopr, notNull()), notNull()));
    }

    return entity;
}