Example usage for com.google.common.collect Maps filterKeys

List of usage examples for com.google.common.collect Maps filterKeys

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterKeys.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered whose keys satisfy a predicate.

Usage

From source file:com.facebook.presto.cassandra.CassandraPartitionManager.java

public CassandraPartitionResult getPartitions(ConnectorTableHandle tableHandle,
        TupleDomain<ColumnHandle> tupleDomain) {
    CassandraTableHandle cassandraTableHandle = (CassandraTableHandle) tableHandle;

    CassandraTable table = cassandraSession.getTable(cassandraTableHandle.getSchemaTableName());
    List<CassandraColumnHandle> partitionKeys = table.getPartitionKeyColumns();

    // fetch the partitions
    List<CassandraPartition> allPartitions = getCassandraPartitions(table, tupleDomain);
    log.debug("%s.%s #partitions: %d", cassandraTableHandle.getSchemaName(),
            cassandraTableHandle.getTableName(), allPartitions.size());

    // do a final pass to filter based on fields that could not be used to build the prefix
    List<CassandraPartition> partitions = allPartitions.stream()
            .filter(partition -> tupleDomain.overlaps(partition.getTupleDomain())).collect(toList());

    // All partition key domains will be fully evaluated, so we don't need to include those
    TupleDomain<ColumnHandle> remainingTupleDomain = TupleDomain.none();
    if (!tupleDomain.isNone()) {
        if (partitions.size() == 1 && partitions.get(0).isUnpartitioned()) {
            remainingTupleDomain = tupleDomain;
        } else {// w w  w . j  a  v  a 2s  . c om
            @SuppressWarnings({ "rawtypes", "unchecked" })
            List<ColumnHandle> partitionColumns = (List) partitionKeys;
            remainingTupleDomain = TupleDomain.withColumnDomains(
                    Maps.filterKeys(tupleDomain.getDomains().get(), not(in(partitionColumns))));
        }
    }

    // push down indexed column fixed value predicates only for unpartitioned partition which uses token range query
    if ((partitions.size() == 1) && partitions.get(0).isUnpartitioned()) {
        Map<ColumnHandle, Domain> domains = tupleDomain.getDomains().get();
        List<ColumnHandle> indexedColumns = new ArrayList<>();
        // compose partitionId by using indexed column
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ColumnHandle, Domain> entry : domains.entrySet()) {
            CassandraColumnHandle column = (CassandraColumnHandle) entry.getKey();
            Domain domain = entry.getValue();
            if (column.isIndexed() && domain.isSingleValue()) {
                sb.append(CassandraCqlUtils.validColumnName(column.getName())).append(" = ")
                        .append(CassandraCqlUtils.cqlValue(
                                toCQLCompatibleString(entry.getValue().getSingleValue()),
                                column.getCassandraType()));
                indexedColumns.add(column);
                // Only one indexed column predicate can be pushed down.
                break;
            }
        }
        if (sb.length() > 0) {
            CassandraPartition partition = partitions.get(0);
            TupleDomain<ColumnHandle> filterIndexedColumn = TupleDomain.withColumnDomains(
                    Maps.filterKeys(remainingTupleDomain.getDomains().get(), not(in(indexedColumns))));
            partitions = new ArrayList<>();
            partitions
                    .add(new CassandraPartition(partition.getKey(), sb.toString(), filterIndexedColumn, true));
            return new CassandraPartitionResult(partitions, filterIndexedColumn);
        }
    }
    return new CassandraPartitionResult(partitions, remainingTupleDomain);
}

From source file:io.druid.indexing.overlord.setup.WorkerSelectUtils.java

/**
 * Return workers not assigned to any affinity pool at all.
 *
 * @param affinityConfig affinity config
 * @param workerMap      map of worker hostname to worker info
 *
 * @return map of worker hostname to worker info
 *//*from   w w  w  .  ja v a 2  s. c  o m*/
private static ImmutableMap<String, ImmutableWorkerInfo> getNonAffinityWorkers(
        final AffinityConfig affinityConfig, final Map<String, ImmutableWorkerInfo> workerMap) {
    return ImmutableMap.copyOf(Maps.filterKeys(workerMap,
            workerHost -> !affinityConfig.getAffinityWorkers().contains(workerHost)));
}

From source file:org.gradle.model.internal.manage.schema.extract.CandidateMethods.java

/**
 * @param methodName Method name//from  w  ww.java2  s. co  m
 * @param excludes Signature equivalences to exclude from the returned index
 * @return Overloaded candidate methods named {@literal methodName} indexed by signature equivalence except thoses
 * matching any of the signature equivalence provided in {@literal excludes} or {@literal null} if none
 */
public Map<Equivalence.Wrapper<Method>, Collection<Method>> overloadedMethodsNamed(String methodName,
        Collection<Equivalence.Wrapper<Method>> excludes) {
    return Maps.filterKeys(overloadedMethodsNamed(methodName), Predicates.not(Predicates.in(excludes)));
}

From source file:org.geogit.storage.memory.HeapRefDatabase.java

/**
 * @return all known references under the "refs" namespace (i.e. not top level ones like HEAD,
 *         etc), key'ed by ref name//from   w w w . ja va  2 s  .com
 */
@Override
public Map<String, String> getAll() {

    Predicate<String> keyPredicate = new Predicate<String>() {

        @Override
        public boolean apply(String refName) {
            return refName.startsWith("refs/");
        }
    };
    return Maps.filterKeys(ImmutableMap.copyOf(this.refs), keyPredicate);
}

From source file:org.jclouds.vagrant.internal.MachineConfig.java

public void save(Map<String, Object> config) {
    File parent = configPath.getParentFile();
    if (!parent.exists() && !parent.mkdirs()) {
        if (!parent.exists()) {
            throw new IllegalStateException("Failure creating folder " + parent.getAbsolutePath());
        }// www  .  ja va2s  .  com
    }

    Map<String, Object> configWithoutVersion = Maps.filterKeys(config,
            Predicates.not(Predicates.equalTo(VagrantConstants.CONFIG_JCLOUDS_VERSION)));
    String version = VagrantConstants.CONFIG_JCLOUDS_VERSION + ": " + JcloudsVersion.get().toString() + "\n";
    String output = version + Joiner.on("\n").withKeyValueSeparator(": ").join(configWithoutVersion);

    FileOutputStream fileOut = null;
    BufferedWriter out = null;

    try {
        fileOut = new FileOutputStream(configPath);
        out = new BufferedWriter(new OutputStreamWriter(fileOut, Charsets.UTF_8));
        out.write(output);
    } catch (IOException e) {
        throw new IllegalStateException("Failed writing to machine config file " + configPath.getAbsolutePath(),
                e);
    } finally {
        if (out != null) {
            Closeables2.closeQuietly(out);
        } else if (fileOut != null) {
            Closeables2.closeQuietly(fileOut);
        }
    }
}

From source file:org.apache.druid.storage.s3.S3DataSegmentMover.java

@Override
public DataSegment move(DataSegment segment, Map<String, Object> targetLoadSpec)
        throws SegmentLoadingException {
    try {/*from  w w  w  .ja  v  a  2 s . co m*/
        Map<String, Object> loadSpec = segment.getLoadSpec();
        String s3Bucket = MapUtils.getString(loadSpec, "bucket");
        String s3Path = MapUtils.getString(loadSpec, "key");
        String s3DescriptorPath = S3Utils.descriptorPathForSegmentPath(s3Path);

        final String targetS3Bucket = MapUtils.getString(targetLoadSpec, "bucket");
        final String targetS3BaseKey = MapUtils.getString(targetLoadSpec, "baseKey");

        final String targetS3Path = S3Utils.constructSegmentPath(targetS3BaseKey,
                DataSegmentPusher.getDefaultStorageDir(segment, false));
        final String targetS3DescriptorPath = S3Utils.descriptorPathForSegmentPath(targetS3Path);

        if (targetS3Bucket.isEmpty()) {
            throw new SegmentLoadingException("Target S3 bucket is not specified");
        }
        if (targetS3Path.isEmpty()) {
            throw new SegmentLoadingException("Target S3 baseKey is not specified");
        }

        safeMove(s3Bucket, s3Path, targetS3Bucket, targetS3Path);
        safeMove(s3Bucket, s3DescriptorPath, targetS3Bucket, targetS3DescriptorPath);

        return segment.withLoadSpec(ImmutableMap.<String, Object>builder()
                .putAll(Maps.filterKeys(loadSpec, new Predicate<String>() {
                    @Override
                    public boolean apply(String input) {
                        return !("bucket".equals(input) || "key".equals(input));
                    }
                })).put("bucket", targetS3Bucket).put("key", targetS3Path).build());
    } catch (AmazonServiceException e) {
        throw new SegmentLoadingException(e, "Unable to move segment[%s]: [%s]", segment.getIdentifier(), e);
    }
}

From source file:com.isotrol.impe3.users.impl.PortalUsersServiceImpl.java

private PortalUserEntity fill(PortalUserEntity entity, PortalUserDTO dto) {
    entity.setName(dto.getUsername());/*w  w  w. j a  va 2 s . co  m*/
    entity.setDisplayName(dto.getDisplayName());
    entity.setEmail(dto.getEmail());
    entity.setActive(dto.isActive());
    final Map<String, String> properties = entity.getProperties();
    properties.clear();
    final Map<String, String> dtop = dto.getProperties();
    if (dtop != null) {
        properties.putAll(Maps.filterKeys(Maps.filterValues(dtop, notNull()), notNull()));

    }
    final Set<String> roles = entity.getRoles();
    roles.clear();
    final Set<String> dtor = dto.getRoles();
    if (dtor != null) {
        roles.addAll(Sets.filter(dtor, notNull()));
    }
    return entity;
}

From source file:ninja.leaping.permissionsex.backend.memory.MemoryDataStore.java

@Override
public Iterable<String> getAllIdentifiers(final String type) {
    return Iterables.transform(Maps.filterKeys(data, input -> {
        return input.getKey().equals(type);
    }).keySet(), Map.Entry::getValue);
}

From source file:com.facebook.buck.remoteexecution.util.MultiThreadedBlobUploader.java

/** Uploads missing items to the CAS. */
public ListenableFuture<Void> addMissing(ImmutableMap<Digest, UploadDataSupplier> data) {
    data = ImmutableMap.copyOf(Maps.filterKeys(data, k -> !containedHashes.contains(k.getHash())));
    if (data.isEmpty()) {
        return Futures.immediateFuture(null);
    }/*from   w  w  w  .j  a  va2s.c o m*/
    return enqueue(data);
}

From source file:org.apache.brooklyn.core.location.cloud.AbstractCloudMachineProvisioningLocation.java

protected ConfigBag extractSshConfig(ConfigBag setup, ConfigBag alt) {
    ConfigBag sshConfig = new ConfigBag();

    for (HasConfigKey<?> key : SshMachineLocation.ALL_SSH_CONFIG_KEYS) {
        String keyName = key.getConfigKey().getName();
        if (setup.containsKey(keyName)) {
            sshConfig.putStringKey(keyName, setup.getStringKey(keyName));
        } else if (alt.containsKey(keyName)) {
            sshConfig.putStringKey(keyName, setup.getStringKey(keyName));
        }//w  w  w .java 2s  .  co m
    }

    Map<String, Object> sshToolClassProperties = Maps.filterKeys(setup.getAllConfig(),
            StringPredicates.startsWith(SshMachineLocation.SSH_TOOL_CLASS_PROPERTIES_PREFIX));
    sshConfig.putAll(sshToolClassProperties);

    // Special cases (preserving old code!)
    if (setup.containsKey(PASSWORD)) {
        sshConfig.copyKeyAs(setup, PASSWORD, SshTool.PROP_PASSWORD);
    } else if (alt.containsKey(PASSWORD)) {
        sshConfig.copyKeyAs(alt, PASSWORD, SshTool.PROP_PASSWORD);
    }

    if (setup.containsKey(PRIVATE_KEY_DATA)) {
        sshConfig.copyKeyAs(setup, PRIVATE_KEY_DATA, SshTool.PROP_PRIVATE_KEY_DATA);
    } else if (setup.containsKey(PRIVATE_KEY_FILE)) {
        sshConfig.copyKeyAs(setup, PRIVATE_KEY_FILE, SshTool.PROP_PRIVATE_KEY_FILE);
    } else if (alt.containsKey(PRIVATE_KEY_DATA)) {
        sshConfig.copyKeyAs(setup, PRIVATE_KEY_DATA, SshTool.PROP_PRIVATE_KEY_DATA);
    }

    if (setup.containsKey(PRIVATE_KEY_PASSPHRASE)) {
        // NB: not supported in jclouds (but it is by our ssh tool)
        sshConfig.copyKeyAs(setup, PRIVATE_KEY_PASSPHRASE, SshTool.PROP_PRIVATE_KEY_PASSPHRASE);
    }

    return sshConfig;
}