Example usage for com.google.common.collect Maps filterEntries

List of usage examples for com.google.common.collect Maps filterEntries

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterEntries.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterEntries(BiMap<K, V> unfiltered,
        Predicate<? super Entry<K, V>> entryPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered that satisfy a predicate.

Usage

From source file:org.apache.gobblin.data.management.conversion.hive.watermarker.PartitionLevelWatermarker.java

/**
 * Adds watermark workunits to <code>workunits</code>. A watermark workunit is a dummy workunit that is skipped by extractor/converter/writer.
 * It stores a map of watermarks. The map has one entry per partition with partition watermark as value.
 * <ul>/*from  w  ww  .  jav a  2 s. co  m*/
 * <li>Add one NoOp watermark workunit for each {@link Table}
 * <li>The workunit has an identifier property {@link #IS_WATERMARK_WORKUNIT_KEY} set to true.
 * <li>Watermarks for all {@link Partition}s that belong to this {@link Table} are added as {@link Map}
 * <li>A maximum of {@link #maxPartitionsPerDataset} are persisted. Watermarks are ordered by most recently modified {@link Partition}s
 *
 * </ul>
 * {@inheritDoc}
 * @see org.apache.gobblin.data.management.conversion.hive.watermarker.HiveSourceWatermarker#onGetWorkunitsEnd(java.util.List)
 */
@Override
public void onGetWorkunitsEnd(List<WorkUnit> workunits) {
    try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
        for (Map.Entry<String, Map<String, Long>> tableWatermark : this.expectedHighWatermarks.entrySet()) {

            String tableKey = tableWatermark.getKey();
            Map<String, Long> partitionWatermarks = tableWatermark.getValue();

            // Watermark workunits are required only for Partitioned tables
            // tableKey is table complete name in the format db@table
            if (!HiveUtils.isPartitioned(new org.apache.hadoop.hive.ql.metadata.Table(
                    client.get().getTable(tableKey.split("@")[0], tableKey.split("@")[1])))) {
                continue;
            }
            // We only keep watermarks for partitions that were updated after leastWatermarkToPersistInState
            Map<String, Long> expectedPartitionWatermarks = ImmutableMap
                    .copyOf(Maps.filterEntries(partitionWatermarks, new Predicate<Map.Entry<String, Long>>() {

                        @Override
                        public boolean apply(@Nonnull Map.Entry<String, Long> input) {
                            return Long.compare(input.getValue(),
                                    PartitionLevelWatermarker.this.leastWatermarkToPersistInState) >= 0;
                        }
                    }));

            // Create dummy workunit to track all the partition watermarks for this table
            WorkUnit watermarkWorkunit = WorkUnit.createEmpty();
            watermarkWorkunit.setProp(IS_WATERMARK_WORKUNIT_KEY, true);
            watermarkWorkunit.setProp(ConfigurationKeys.DATASET_URN_KEY, tableKey);

            watermarkWorkunit.setWatermarkInterval(
                    new WatermarkInterval(new MultiKeyValueLongWatermark(this.previousWatermarks.get(tableKey)),
                            new MultiKeyValueLongWatermark(expectedPartitionWatermarks)));

            workunits.add(watermarkWorkunit);
        }
    } catch (IOException | TException e) {
        Throwables.propagate(e);
    }
}

From source file:com.netxforge.netxstudio.server.logic.reporting.ResourceReportingEngine.java

private int tsColumnForValue(Value v) {
    final Date toLookup = NonModelUtils.fromXMLDate(v.getTimeStamp());
    Map<Integer, Date> filterEntries = Maps.filterEntries(columnTS, new Predicate<Entry<Integer, Date>>() {

        public boolean apply(Entry<Integer, Date> input) {
            Date value = input.getValue();
            return value.compareTo(toLookup) == 0;
        }/*ww  w. j  a v  a2 s .c  om*/

    });

    // there should only be one entry, ugly hack.
    // http://work.netxforge.com/issues/292
    if (filterEntries.size() == 1) {
        return filterEntries.keySet().iterator().next() - 1;
    }
    return -1;
}

From source file:org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.java

/**
 * @param config/*from   w  ww .j  a  v a2 s  .  com*/
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = config.getHadoopConfiguration().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile,
            Maps.filterEntries(config.getSiteConfig(),
                    v -> org.apache.accumulo.core.client.ClientConfiguration.ClientProperty
                            .getPropertyByKey(v.getKey()) != null));

    Map<String, String> clientProps = config.getClientProps();
    clientProps.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), config.getZooKeepers());
    clientProps.put(ClientProperty.INSTANCE_NAME.getKey(), config.getInstanceName());
    if (!clientProps.containsKey(ClientProperty.AUTH_TYPE.getKey())) {
        clientProps.put(ClientProperty.AUTH_TYPE.getKey(), "password");
        clientProps.put(ClientProperty.AUTH_PRINCIPAL.getKey(), config.getRootUserName());
        clientProps.put(ClientProperty.AUTH_TOKEN.getKey(), config.getRootPassword());
    }

    File clientPropsFile = config.getClientPropsFile();
    writeConfigProperties(clientPropsFile, clientProps);

    File siteFile = new File(config.getConfDir(), "accumulo.properties");
    writeConfigProperties(siteFile, config.getSiteConfig());
    siteConfig = new SiteConfiguration(siteFile);

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly
        // escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }
    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.java

/**
 * @param config/*from  w ww  .  j  a v  a  2s. c om*/
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getWalogDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile, Maps.filterEntries(config.getSiteConfig(),
            v -> ClientConfiguration.ClientProperty.getPropertyByKey(v.getKey()) != null));

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }

    // disable audit logging for mini....
    InputStream auditStream = this.getClass().getResourceAsStream("/auditLog.xml");

    if (auditStream != null) {
        FileUtils.copyInputStreamToFile(auditStream, new File(config.getConfDir(), "auditLog.xml"));
    }

    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.geogit.cli.GeogitCLI.java

/**
 * Return all commands with a command name at a levenshtein distance of less than 3, as
 * potential candidates for a mistyped command
 * /*from w ww .ja va  2  s.  c  o m*/
 * @param commands the list of all available commands
 * @param commandName the command name
 * @return a map filtered according to distance between command names
 */
private Map<String, JCommander> spellCheck(Map<String, JCommander> commands, final String commandName) {
    Map<String, JCommander> candidates = Maps.filterEntries(commands,
            new Predicate<Map.Entry<String, JCommander>>() {
                @Override
                public boolean apply(@Nullable Entry<String, JCommander> entry) {
                    char[] s1 = entry.getKey().toCharArray();
                    char[] s2 = commandName.toCharArray();
                    int[] prev = new int[s2.length + 1];
                    for (int j = 0; j < s2.length + 1; j++) {
                        prev[j] = j;
                    }
                    for (int i = 1; i < s1.length + 1; i++) {
                        int[] curr = new int[s2.length + 1];
                        curr[0] = i;
                        for (int j = 1; j < s2.length + 1; j++) {
                            int d1 = prev[j] + 1;
                            int d2 = curr[j - 1] + 1;
                            int d3 = prev[j - 1];
                            if (s1[i - 1] != s2[j - 1]) {
                                d3 += 1;
                            }
                            curr[j] = Math.min(Math.min(d1, d2), d3);
                        }
                        prev = curr;
                    }
                    return prev[s2.length] < 3;
                }
            });
    return candidates;
}

From source file:edu.buaa.satla.analysis.cfa.CProgramScope.java

private static Map<String, CSimpleDeclaration> extractUniqueSimpleDeclarations(
        Map<String, CSimpleDeclaration> pQualifiedDeclarations) {
    return Maps/*from  ww w  . jav  a 2  s.  c o m*/
            .transformEntries(Maps.filterEntries(from(pQualifiedDeclarations.values()).index(GET_NAME).asMap(),
                    new Predicate<Map.Entry<String, Collection<CSimpleDeclaration>>>() {

                        @Override
                        public boolean apply(Entry<String, Collection<CSimpleDeclaration>> pArg0) {
                            return pArg0.getValue().size() == 1;
                        }

                    }),
                    new Maps.EntryTransformer<String, Collection<CSimpleDeclaration>, CSimpleDeclaration>() {

                        @Override
                        public CSimpleDeclaration transformEntry(String pArg0,
                                @Nonnull Collection<CSimpleDeclaration> pArg1) {
                            return pArg1.iterator().next();
                        }

                    });
}

From source file:org.apache.abdera2.activities.model.ASBase.java

public Map<String, Object> toMap(Selector<Map.Entry<String, Object>> filter) {
    return Maps.filterEntries(exts, filter);
}

From source file:de.sanandrew.mods.turretmod.entity.turret.TargetProcessor.java

@Override
public List<Class<? extends Entity>> getEnabledEntityTargets() {
    Collection<Class<? extends Entity>> enabledClasses = Maps
            .filterEntries(this.entityTargetList, input -> input != null && input.getValue()).keySet();

    return new ArrayList<>(enabledClasses);
}

From source file:org.basepom.mojo.duplicatefinder.DuplicateFinderMojo.java

private void checkForDuplicates(final ConflictType type, final ResultCollector resultCollector,
        final ClasspathDescriptor classpathDescriptor, final ArtifactFileResolver artifactFileResolver)
        throws MojoExecutionException, OverConstrainedVersionException {
    // only look at entries with a size > 1.
    final Map<String, Collection<File>> filteredMap = ImmutableMap
            .copyOf(Maps.filterEntries(classpathDescriptor.getClasspathElementLocations(type),
                    new Predicate<Entry<String, Collection<File>>>() {

                        @Override
                        public boolean apply(@Nonnull final Entry<String, Collection<File>> entry) {
                            checkNotNull(entry, "entry is null");
                            checkState(entry.getValue() != null, "Entry '%s' is invalid", entry);

                            return entry.getValue().size() > 1;
                        }/*from w ww . java2 s . c  om*/

                    }));

    for (final Map.Entry<String, Collection<File>> entry : filteredMap.entrySet()) {
        final String name = entry.getKey();
        final Collection<File> elements = entry.getValue();

        // Map which contains a printable name for the conflicting entry (which is either the printable name for an artifact or
        // a folder name for a project folder) as keys and a classpath element as value.
        final SortedSet<ClasspathElement> conflictingClasspathElements = artifactFileResolver
                .getClasspathElementsForElements(elements);

        ImmutableSet.Builder<Artifact> artifactBuilder = ImmutableSet.builder();

        boolean bootClasspathConflict = false;
        for (ClasspathElement conflictingClasspathElement : conflictingClasspathElements) {
            bootClasspathConflict |= conflictingClasspathElement.isBootClasspathElement();

            if (conflictingClasspathElement.hasArtifact()) {
                artifactBuilder.add(conflictingClasspathElement.getArtifact());
            } else if (conflictingClasspathElement.isLocalFolder()) {
                artifactBuilder.add(project.getArtifact());
            }
        }

        final boolean excepted = isExcepted(type, name, bootClasspathConflict, artifactBuilder.build());
        final ConflictState conflictState = DuplicateFinderMojo.determineConflictState(type, name, elements);

        resultCollector.addConflict(type, name, conflictingClasspathElements, excepted, conflictState);
    }
}

From source file:de.sanandrew.mods.turretmod.entity.turret.TargetProcessor.java

@Override
public UUID[] getEnabledPlayerTargets() {
    Collection<UUID> enabledUUIDs = Maps
            .filterEntries(this.playerTargetList, input -> input != null && input.getValue()).keySet();

    return enabledUUIDs.toArray(new UUID[enabledUUIDs.size()]);
}