Example usage for com.google.common.collect Maps filterKeys

List of usage examples for com.google.common.collect Maps filterKeys

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterKeys.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered whose keys satisfy a predicate.

Usage

From source file:org.eclipse.sirius.business.internal.contribution.IncrementalModelContributor.java

/**
 * Applies all the applicable contributions found in the sources (including
 * the target model itself) to the target model.
 * /*  w  ww .j  av a  2 s .  c o  m*/
 * @param targetModel
 *            the model to which the contributions should be applied.
 * @param contributionSources
 *            source models, in addition to the target model itself, which
 *            may provide contributions.
 * @return the modified target model, with the contributions applied.
 */
public EObject apply(EObject targetModel, Collection<? extends EObject> contributionSources) {
    Collection<EObject> referenceInputs = Lists.newArrayList(contributionSources);
    currentCopier = new EcoreUtil.Copier();
    List<EObject> inputs = Lists.newArrayList(currentCopier.copyAll(referenceInputs));
    currentCopier.copyReferences();

    Map<EObject, Object> inputIds = Maps.newHashMap();
    for (EObject root : referenceInputs) {
        for (EObject obj : AllContents.of(root, true)) {
            inputIds.put(currentCopier.get(obj), idFunction.apply(obj));
        }
    }

    viewpointUris = Maps.newHashMap();
    for (Viewpoint originalVP : Iterables.filter(currentCopier.keySet(), Viewpoint.class)) {
        Option<URI> uri = new ViewpointQuery(originalVP).getViewpointURI();
        if (uri.some()) {
            viewpointUris.put((Viewpoint) currentCopier.get(originalVP), uri.get().toString());
        }
    }

    EObject result = super.apply(currentCopier.get(targetModel), inputs);
    postProcess(result);
    contributions = Maps.newHashMap(Maps.filterKeys(inputIds, Predicates.in(additions)));

    if (model == null) {
        model = result;
        modelIds = Maps.newHashMap(Maps.filterKeys(inputIds, new Predicate<EObject>() {
            public boolean apply(EObject input) {
                return input == model || EcoreUtil.isAncestor(model, input);
            }
        }));
    } else {
        Function<EObject, Object> f = update(result, inputIds);
        Map<EObject, Object> newIds = Maps.newHashMap();
        for (EObject obj : AllContents.of(model, true)) {
            newIds.put(obj, f.apply(obj));
        }
        modelIds = newIds;
    }
    return model;
}

From source file:org.fusesource.process.fabric.child.ChildProcessManager.java

protected Map<String, String> getProcessLayout(Profile profile, String layoutPath) {
    return ByteToStringValues.INSTANCE
            .apply(Maps.filterKeys(profile.getFileConfigurations(), new LayOutPredicate(layoutPath)));
}

From source file:org.graylog2.plugin.Message.java

public Message(final Map<String, Object> fields) {
    this((String) fields.get(FIELD_ID), Maps.filterKeys(fields, not(equalTo(FIELD_ID))));
}

From source file:org.ulyssis.ipp.snapshot.TeamTagMap.java

public TeamTagMap removeTag(TagId tag) {
    if (!tagToTeam.containsKey(tag)) {
        return this;
    }//from w  w  w .j  a v a2s.  c  o  m
    return new TeamTagMap(ImmutableMap.copyOf(Maps.filterKeys(tagToTeam, t -> !tag.equals(t))));
}

From source file:brooklyn.location.jclouds.ComputeServiceRegistryImpl.java

@Override
public ComputeService findComputeService(ConfigBag conf, boolean allowReuse) {
    String provider = checkNotNull(conf.get(CLOUD_PROVIDER), "provider must not be null");
    String identity = checkNotNull(conf.get(ACCESS_IDENTITY), "identity must not be null");
    String credential = checkNotNull(conf.get(ACCESS_CREDENTIAL), "credential must not be null");

    Properties properties = new Properties();
    properties.setProperty(Constants.PROPERTY_TRUST_ALL_CERTS, Boolean.toString(true));
    properties.setProperty(Constants.PROPERTY_RELAX_HOSTNAME, Boolean.toString(true));
    properties.setProperty("jclouds.ssh.max-retries",
            conf.getStringKey("jclouds.ssh.max-retries") != null
                    ? conf.getStringKey("jclouds.ssh.max-retries").toString()
                    : "50");
    // Enable aws-ec2 lazy image fetching, if given a specific imageId; otherwise customize for specific owners; or all as a last resort
    // See https://issues.apache.org/jira/browse/WHIRR-416
    if ("aws-ec2".equals(provider)) {
        // TODO convert AWS-only flags to config keys
        if (groovyTruth(conf.get(IMAGE_ID))) {
            properties.setProperty(PROPERTY_EC2_AMI_QUERY, "");
            properties.setProperty(PROPERTY_EC2_CC_AMI_QUERY, "");
        } else if (groovyTruth(conf.getStringKey("imageOwner"))) {
            properties.setProperty(PROPERTY_EC2_AMI_QUERY,
                    "owner-id=" + conf.getStringKey("imageOwner") + ";state=available;image-type=machine");
        } else if (groovyTruth(conf.getStringKey("anyOwner"))) {
            // set `anyOwner: true` to override the default query (which is restricted to certain owners as per below), 
            // allowing the AMI query to bind to any machine
            // (note however, we sometimes pick defaults in JcloudsLocationFactory);
            // (and be careful, this can give a LOT of data back, taking several minutes,
            // and requiring extra memory allocated on the command-line)
            properties.setProperty(PROPERTY_EC2_AMI_QUERY, "state=available;image-type=machine");
            /*/*from   ww w  . j  av a 2 s  .  co m*/
             * by default the following filters are applied:
             * Filter.1.Name=owner-id&Filter.1.Value.1=137112412989&
             * Filter.1.Value.2=063491364108&
             * Filter.1.Value.3=099720109477&
             * Filter.1.Value.4=411009282317&
             * Filter.2.Name=state&Filter.2.Value.1=available&
             * Filter.3.Name=image-type&Filter.3.Value.1=machine&
             */
        }
    }

    // FIXME Deprecated mechanism, should have a ConfigKey for overrides
    Map<String, Object> extra = Maps.filterKeys(conf.getAllConfig(), Predicates.containsPattern("^jclouds\\."));
    if (extra.size() > 0) {
        LOG.warn("Jclouds using deprecated property overrides: " + Entities.sanitize(extra));
    }
    properties.putAll(extra);

    String endpoint = conf.get(CLOUD_ENDPOINT);
    if (!groovyTruth(endpoint))
        endpoint = getDeprecatedProperty(conf, Constants.PROPERTY_ENDPOINT);
    if (groovyTruth(endpoint))
        properties.setProperty(Constants.PROPERTY_ENDPOINT, endpoint);

    Map<?, ?> cacheKey = MutableMap.builder().putAll(properties).put("provider", provider)
            .put("identity", identity).put("credential", credential).putIfNotNull("endpoint", endpoint).build()
            .asUnmodifiable();

    if (allowReuse) {
        ComputeService result = cachedComputeServices.get(cacheKey);
        if (result != null) {
            LOG.trace("jclouds ComputeService cache hit for compute service, for "
                    + Entities.sanitize(properties));
            return result;
        }
        LOG.debug("jclouds ComputeService cache miss for compute service, creating, for "
                + Entities.sanitize(properties));
    }

    Iterable<Module> modules = getCommonModules();

    // Synchronizing to avoid deadlock from sun.reflect.annotation.AnnotationType.
    // See https://github.com/brooklyncentral/brooklyn/issues/974
    ComputeServiceContext computeServiceContext;
    synchronized (createComputeServicesMutex) {
        computeServiceContext = ContextBuilder.newBuilder(provider).modules(modules)
                .credentials(identity, credential).overrides(properties).build(ComputeServiceContext.class);
    }
    final ComputeService computeService = computeServiceContext.getComputeService();
    if (allowReuse) {
        synchronized (cachedComputeServices) {
            ComputeService result = cachedComputeServices.get(cacheKey);
            if (result != null) {
                LOG.debug("jclouds ComputeService cache recovery for compute service, for "
                        + Entities.sanitize(cacheKey));
                //keep the old one, discard the new one
                computeService.getContext().close();
                return result;
            }
            LOG.debug("jclouds ComputeService created " + computeService + ", adding to cache, for "
                    + Entities.sanitize(properties));
            cachedComputeServices.put(cacheKey, computeService);
        }
    }
    return computeService;
}

From source file:org.apache.helix.api.config.NamespacedConfig.java

/**
 * Remove all fields from this config that are not prefixed
 *//*from  www. j a  v a 2 s  .co  m*/
private void filterNonPrefixedFields() {
    // filter out any configuration that isn't user-defined
    Predicate<String> keyFilter = new Predicate<String>() {
        @Override
        public boolean apply(String key) {
            return key.contains(_prefix);
        }
    };
    super.setMapFields(Maps.filterKeys(super.getMapFields(), keyFilter));
    super.setListFields(Maps.filterKeys(super.getListFields(), keyFilter));
    super.setSimpleFields(Maps.filterKeys(super.getSimpleFields(), keyFilter));
}

From source file:com.eviware.loadui.impl.addon.AddonRegistryImpl.java

@Override
public synchronized void registerAddonHolder(final AddonHolder addonHolder) {
    registeredHolders.add(addonHolder);/*from w w  w  . jav a 2s  .  c o m*/
    final Predicate<Class<?>> typeMatcher = new Predicate<Class<?>>() {
        @Override
        public boolean apply(Class<?> input) {
            return input.isInstance(addonHolder);
        }
    };

    for (Addon.Factory<?> factory : Iterables
            .concat(Maps.filterKeys(eagerAddons.asMap(), typeMatcher).values())) {
        loadAddon(addonHolder, factory);
    }
}

From source file:com.splicemachine.orc.StripeReader.java

public Stripe readStripe(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage)
        throws IOException {
    // read the stripe footer
    StripeFooter stripeFooter = readStripeFooter(stripe, systemMemoryUsage);
    List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings();

    // get streams for selected columns
    Map<StreamId, Stream> streams = new HashMap<>();
    boolean hasRowGroupDictionary = false;
    for (Stream stream : stripeFooter.getStreams()) {
        if (includedOrcColumns.contains(stream.getColumn())) {
            streams.put(new StreamId(stream), stream);

            ColumnEncodingKind columnEncoding = columnEncodings.get(stream.getColumn()).getColumnEncodingKind();
            if (columnEncoding == DICTIONARY && stream.getStreamKind() == StreamKind.IN_DICTIONARY) {
                hasRowGroupDictionary = true;
            }//  w w  w.j  a  v  a2  s.c om
        }
    }

    // handle stripes with more than one row group or a dictionary
    if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) {
        // determine ranges of the stripe to read
        Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams());
        diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet()));

        // read the file regions
        Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges,
                systemMemoryUsage);

        // read the bloom filter for each column
        Map<Integer, List<HiveBloomFilter>> bloomFilterIndexes = readBloomFilterIndexes(streams, streamsData);

        // read the row index for each column
        Map<Integer, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData,
                bloomFilterIndexes);

        // select the row groups matching the tuple domain
        Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes);

        // if all row groups are skipped, return null
        if (selectedRowGroups.isEmpty()) {
            // set accounted memory usage to zero
            systemMemoryUsage.close();
            return null;
        }

        // value streams
        Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

        // build the dictionary streams
        StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
                columnEncodings);

        // build the row groups
        try {
            List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams,
                    columnIndexes, selectedRowGroups, columnEncodings);

            return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources);
        } catch (InvalidCheckpointException e) {
            // The ORC file contains a corrupt checkpoint stream
            // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise,
            // we must fail because the length of the row group dictionary is contained in the checkpoint stream.
            if (hasRowGroupDictionary) {
                throw new OrcCorruptionException(e, "ORC file %s has corrupt checkpoints", orcDataSource);
            }
        }
    }

    // stripe only has one row group and no dictionary
    ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder();
    for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) {
        StreamId streamId = entry.getKey();
        if (streamId.getStreamKind() != ROW_INDEX && streams.keySet().contains(streamId)) {
            diskRangesBuilder.put(entry);
        }
    }
    ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build();

    // read the file regions
    Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges,
            systemMemoryUsage);

    // value streams
    Map<StreamId, ValueStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);

    // build the dictionary streams
    StreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams,
            columnEncodings);

    // build the row group
    ImmutableMap.Builder<StreamId, StreamSource<?>> builder = ImmutableMap.builder();
    for (Entry<StreamId, ValueStream<?>> entry : valueStreams.entrySet()) {
        builder.put(entry.getKey(), new ValueStreamSource<>(entry.getValue()));
    }
    RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), new StreamSources(builder.build()));

    return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup),
            dictionaryStreamSources);
}

From source file:com.edmunds.zookeeper.treewatcher.ZooKeeperTreeDelta.java

private void walkTree(ZooKeeperTreeNode oldNode, ZooKeeperTreeNode newNode, ZooKeeperTreeDeltaResult result) {

    final ImmutableMap<String, ZooKeeperTreeNode> oldChildren = oldNode.getChildren();
    final ImmutableMap<String, ZooKeeperTreeNode> newChildren = newNode.getChildren();

    final ImmutableSet<String> oldKeys = oldChildren.keySet();
    final ImmutableSet<String> newKeys = newChildren.keySet();

    // Special case if the user has asked for a leaf first walk.
    if (walkPrimary == LEAF_FIRST) {
        walkChildren(oldChildren, newChildren, result);
    }/*from w ww  .ja v  a2 s.  c  om*/

    // Calculate which nodes have been inserted.
    for (ZooKeeperTreeNode node : Maps.filterKeys(newChildren, not(in(oldKeys))).values()) {
        addNode(walkInserted, INSERT, node, result);
    }

    // The primary walk will change the order for updates so always use ROOT_ONLY
    if (!Arrays.equals(oldNode.getData(), newNode.getData())) {
        addNode(ROOT_ONLY, UPDATE, newNode, result);
    }

    // Calculate the deleted nodes.
    for (ZooKeeperTreeNode node : Maps.filterKeys(oldChildren, not(in(newKeys))).values()) {
        addNode(walkDeleted, DELETE, node, result);
    }

    // This is the normal case.
    if (walkPrimary == ROOT_FIRST) {
        walkChildren(oldChildren, newChildren, result);
    }
}

From source file:com.google.cloud.trace.sdk.CloudTraceWriter.java

@Override
public void writeSpans(List<TraceSpanData> spans) throws CloudTraceException {
    // Aggregate all the spans by trace. It's more efficient to call the API this way.
    Map<String, Trace> traces = new HashMap<>();

    // Keep track of traces we really want to write out.
    Set<String> shouldWriteTraces = new HashSet<>();

    for (TraceSpanData spanData : spans) {
        spanData.end();// w w w.j  a va  2  s  . com
        TraceSpan span = convertTraceSpanDataToSpan(spanData);
        if (spanData.getContext().getShouldWrite()) {
            shouldWriteTraces.add(spanData.getContext().getTraceId());
        }

        if (!traces.containsKey(spanData.getContext().getTraceId())) {
            Trace trace = convertTraceSpanDataToTrace(spanData);
            traces.put(spanData.getContext().getTraceId(), trace);
            trace.setSpans(new ArrayList<TraceSpan>());
        }
        traces.get(spanData.getContext().getTraceId()).getSpans().add(span);
    }

    // Only write out the ones where at least one trace span said to write.
    traces = Maps.filterKeys(traces, Predicates.in(shouldWriteTraces));

    // Write to the API.
    if (!traces.isEmpty()) {
        writeTraces(new Traces().setTraces(new ArrayList<Trace>(traces.values())));
    }
}