Example usage for org.apache.commons.configuration Configuration containsKey

List of usage examples for org.apache.commons.configuration Configuration containsKey

Introduction

In this page you can find the example usage for org.apache.commons.configuration Configuration containsKey.

Prototype

boolean containsKey(String key);

Source Link

Document

Check if the configuration contains the specified key.

Usage

From source file:org.apache.accumulo.core.client.impl.ClientContext.java

/**
 * A utility method for converting client configuration to a standard configuration object for use internally.
 *
 * @param config//from  w  w w  . j  a  va2s  .  c  o  m
 *          the original {@link ClientConfiguration}
 * @return the client configuration presented in the form of an {@link AccumuloConfiguration}
 */
public static AccumuloConfiguration convertClientConfig(final Configuration config) {

    final AccumuloConfiguration defaults = DefaultConfiguration.getInstance();

    return new AccumuloConfiguration() {
        @Override
        public String get(Property property) {
            final String key = property.getKey();

            // Attempt to load sensitive properties from a CredentialProvider, if configured
            if (property.isSensitive()) {
                org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
                if (null != hadoopConf) {
                    try {
                        char[] value = CredentialProviderFactoryShim.getValueFromCredentialProvider(hadoopConf,
                                key);
                        if (null != value) {
                            log.trace("Loaded sensitive value for {} from CredentialProvider", key);
                            return new String(value);
                        } else {
                            log.trace(
                                    "Tried to load sensitive value for {} from CredentialProvider, but none was found",
                                    key);
                        }
                    } catch (IOException e) {
                        log.warn(
                                "Failed to extract sensitive property ({}) from Hadoop CredentialProvider, falling back to base AccumuloConfiguration",
                                key, e);
                    }
                }
            }

            if (config.containsKey(key))
                return config.getString(key);
            else {
                // Reconstitute the server kerberos property from the client config
                if (Property.GENERAL_KERBEROS_PRINCIPAL == property) {
                    if (config.containsKey(ClientProperty.KERBEROS_SERVER_PRIMARY.getKey())) {
                        // Avoid providing a realm since we don't know what it is...
                        return config.getString(ClientProperty.KERBEROS_SERVER_PRIMARY.getKey()) + "/_HOST@"
                                + SaslConnectionParams.getDefaultRealm();
                    }
                }
                return defaults.get(property);
            }
        }

        @Override
        public void getProperties(Map<String, String> props, Predicate<String> filter) {
            defaults.getProperties(props, filter);

            Iterator<?> keyIter = config.getKeys();
            while (keyIter.hasNext()) {
                String key = keyIter.next().toString();
                if (filter.test(key))
                    props.put(key, config.getString(key));
            }

            // Two client props that don't exist on the server config. Client doesn't need to know about the Kerberos instance from the principle, but servers do
            // Automatically reconstruct the server property when converting a client config.
            if (props.containsKey(ClientProperty.KERBEROS_SERVER_PRIMARY.getKey())) {
                final String serverPrimary = props.remove(ClientProperty.KERBEROS_SERVER_PRIMARY.getKey());
                if (filter.test(Property.GENERAL_KERBEROS_PRINCIPAL.getKey())) {
                    // Use the _HOST expansion. It should be unnecessary in "client land".
                    props.put(Property.GENERAL_KERBEROS_PRINCIPAL.getKey(),
                            serverPrimary + "/_HOST@" + SaslConnectionParams.getDefaultRealm());
                }
            }

            // Attempt to load sensitive properties from a CredentialProvider, if configured
            org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
            if (null != hadoopConf) {
                try {
                    for (String key : CredentialProviderFactoryShim.getKeys(hadoopConf)) {
                        if (!Property.isValidPropertyKey(key) || !Property.isSensitive(key)) {
                            continue;
                        }

                        if (filter.test(key)) {
                            char[] value = CredentialProviderFactoryShim
                                    .getValueFromCredentialProvider(hadoopConf, key);
                            if (null != value) {
                                props.put(key, new String(value));
                            }
                        }
                    }
                } catch (IOException e) {
                    log.warn(
                            "Failed to extract sensitive properties from Hadoop CredentialProvider, falling back to accumulo-site.xml",
                            e);
                }
            }
        }

        private org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
            String credProviderPaths = config
                    .getString(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
            if (null != credProviderPaths && !credProviderPaths.isEmpty()) {
                org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
                hadoopConf.set(CredentialProviderFactoryShim.CREDENTIAL_PROVIDER_PATH, credProviderPaths);
                return hadoopConf;
            }

            log.trace("Did not find credential provider configuration in ClientConfiguration");

            return null;
        }
    };

}

From source file:org.apache.accumulo.core.client.impl.ServerConfigurationUtil.java

public static AccumuloConfiguration convertClientConfig(final AccumuloConfiguration base,
        final Configuration config) {

    return new AccumuloConfiguration() {
        @Override/* www .j  a  va 2  s . com*/
        public String get(Property property) {
            if (config.containsKey(property.getKey()))
                return config.getString(property.getKey());
            else
                return base.get(property);
        }

        @Override
        public void getProperties(Map<String, String> props, PropertyFilter filter) {

            base.getProperties(props, filter);

            @SuppressWarnings("unchecked")
            Iterator<String> keyIter = config.getKeys();
            while (keyIter.hasNext()) {
                String key = keyIter.next();
                if (filter.accept(key))
                    props.put(key, config.getString(key));
            }
        }
    };

}

From source file:org.apache.atlas.ha.HAConfiguration.java

/**
 * Return whether HA is enabled or not.//  w  ww.j a  va2 s . com
 * @param configuration underlying configuration instance
 * @return
 */
public static boolean isHAEnabled(Configuration configuration) {
    boolean ret = false;

    if (configuration.containsKey(HAConfiguration.ATLAS_SERVER_HA_ENABLED_KEY)) {
        ret = configuration.getBoolean(ATLAS_SERVER_HA_ENABLED_KEY);
    } else {
        String[] ids = configuration.getStringArray(HAConfiguration.ATLAS_SERVER_IDS);

        ret = ids != null && ids.length > 1;
    }

    return ret;
}

From source file:org.apache.atlas.ha.HAConfiguration.java

public static ZookeeperProperties getZookeeperProperties(Configuration configuration) {
    String zookeeperConnectString = configuration.getString("atlas.kafka." + ZOOKEEPER_PREFIX + "connect");
    if (configuration.containsKey(HA_ZOOKEEPER_CONNECT)) {
        zookeeperConnectString = configuration.getString(HA_ZOOKEEPER_CONNECT);
    }/*from  www  .  ja  v  a 2s.  c o m*/

    String zkRoot = configuration.getString(ATLAS_SERVER_HA_ZK_ROOT_KEY, ATLAS_SERVER_ZK_ROOT_DEFAULT);
    int retriesSleepTimeMillis = configuration.getInt(HA_ZOOKEEPER_RETRY_SLEEPTIME_MILLIS,
            DEFAULT_ZOOKEEPER_CONNECT_SLEEPTIME_MILLIS);

    int numRetries = configuration.getInt(HA_ZOOKEEPER_NUM_RETRIES, DEFAULT_ZOOKEEPER_CONNECT_NUM_RETRIES);

    int sessionTimeout = configuration.getInt(HA_ZOOKEEPER_SESSION_TIMEOUT_MS,
            DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_MILLIS);

    String acl = configuration.getString(HA_ZOOKEEPER_ACL);
    String auth = configuration.getString(HA_ZOOKEEPER_AUTH);
    return new ZookeeperProperties(zookeeperConnectString, zkRoot, retriesSleepTimeMillis, numRetries,
            sessionTimeout, acl, auth);
}

From source file:org.apache.atlas.web.resources.AdminResource.java

private String getEditableEntityTypes(Configuration config) {
    String ret = DEFAULT_EDITABLE_ENTITY_TYPES;

    if (config != null && config.containsKey(editableEntityTypes)) {
        Object value = config.getProperty(editableEntityTypes);

        if (value instanceof String) {
            ret = (String) value;
        } else if (value instanceof Collection) {
            StringBuilder sb = new StringBuilder();

            for (Object elem : ((Collection) value)) {
                if (sb.length() > 0) {
                    sb.append(",");
                }/* w w  w.j  a  va  2s  .c  o  m*/

                sb.append(elem.toString());
            }

            ret = sb.toString();
        }
    }

    return ret;
}

From source file:org.apache.bookkeeper.common.conf.ConfigKey.java

/**
 * Validate the setting is valid in the provided config <tt>conf</tt>.
 *
 * @param conf configuration to test//from w  w w.  j a va 2s.com
 */
public void validate(Configuration conf) throws ConfigException {
    if (conf.containsKey(name()) && validator() != null) {
        Object value = get(conf);
        if (!validator().validate(name(), value)) {
            throw new ConfigException("Invalid setting of '" + name() + "' found the configuration: value = '"
                    + value + "', requirement = '" + validator + "'");
        }
    } else if (required()) { // missing config on a required field
        throw new ConfigException("Setting '" + name() + "' is required but missing in the configuration");
    }
}

From source file:org.apache.juddi.v3.client.config.ClientConfig.java

private Map<String, UDDIClerk> readClerkConfig(Configuration config, Map<String, UDDINode> uddiNodes)
        throws ConfigurationException {
    clientName = config.getString("client[@name]");
    clientCallbackUrl = config.getString("client[@callbackUrl]");
    Map<String, UDDIClerk> clerks = new HashMap<String, UDDIClerk>();
    if (config.containsKey("client.clerks.clerk[@name]")) {
        String[] names = config.getStringArray("client.clerks.clerk[@name]");

        log.debug("clerk names=" + names.length);
        for (int i = 0; i < names.length; i++) {
            UDDIClerk uddiClerk = new UDDIClerk();
            uddiClerk.setManagerName(clientName);
            uddiClerk.setName(config.getString("client.clerks.clerk(" + i + ")[@name]"));
            String nodeRef = config.getString("client.clerks.clerk(" + i + ")[@node]");
            if (!uddiNodes.containsKey(nodeRef))
                throw new ConfigurationException("Could not find Node with name=" + nodeRef);
            UDDINode uddiNode = uddiNodes.get(nodeRef);
            uddiClerk.setUDDINode(uddiNode);
            uddiClerk.setPublisher(config.getString("client.clerks.clerk(" + i + ")[@publisher]"));
            uddiClerk.setPassword(config.getString("client.clerks.clerk(" + i + ")[@password]"));
            uddiClerk.setIsPasswordEncrypted(
                    config.getBoolean("client.clerks.clerk(" + i + ")[@isPasswordEncrypted]", false));
            uddiClerk.setCryptoProvider(config.getString("client.clerks.clerk(" + i + ")[@cryptoProvider]"));

            String clerkBusinessKey = config.getString("client.clerks.clerk(" + i + ")[@businessKey]");
            String clerkBusinessName = config.getString("client.clerks.clerk(" + i + ")[@businessName]");
            String clerkKeyDomain = config.getString("client.clerks.clerk(" + i + ")[@keyDomain]");

            String[] classes = config.getStringArray("client.clerks.clerk(" + i + ").class");
            uddiClerk.setClassWithAnnotations(classes);

            int numberOfWslds = config.getStringArray("client.clerks.clerk(" + i + ").wsdl").length;
            if (numberOfWslds > 0) {
                UDDIClerk.WSDL[] wsdls = new UDDIClerk.WSDL[numberOfWslds];
                for (int w = 0; w < wsdls.length; w++) {
                    UDDIClerk.WSDL wsdl = uddiClerk.new WSDL();
                    String fileName = config.getString("client.clerks.clerk(" + i + ").wsdl(" + w + ")");
                    wsdl.setFileName(fileName);
                    String businessKey = config
                            .getString("client.clerks.clerk(" + i + ").wsdl(" + w + ")[@businessKey]");
                    String businessName = config
                            .getString("client.clerks.clerk(" + i + ").wsdl(" + w + ")[@businessName]");
                    String keyDomain = config
                            .getString("client.clerks.clerk(" + i + ").wsdl(" + w + ")[@keyDomain]");
                    if (businessKey == null)
                        businessKey = clerkBusinessKey;
                    if (businessKey == null)
                        businessKey = uddiClerk.getUDDINode().getProperties().getProperty("businessKey");
                    if (businessKey == null) {
                        //use key convention to build the businessKey
                        if (businessName == null)
                            businessName = clerkBusinessName;
                        if (keyDomain == null)
                            keyDomain = clerkKeyDomain;
                        if (keyDomain == null)
                            keyDomain = uddiClerk.getUDDINode().getProperties().getProperty("keyDomain");
                        if ((businessName == null
                                && !uddiClerk.getUDDINode().getProperties().containsKey("businessName"))
                                || keyDomain == null
                                        && !uddiClerk.getUDDINode().getProperties().containsKey("keyDomain"))
                            throw new ConfigurationException("Either the wsdl(" + wsdls[w] + ") or clerk ("
                                    + uddiClerk.name
                                    + ") elements require a businessKey, or businessName & keyDomain attributes");
                        else {
                            Properties properties = new Properties(uddiClerk.getUDDINode().getProperties());
                            if (businessName != null)
                                properties.put("businessName", businessName);
                            if (keyDomain != null)
                                properties.put("keyDomain", keyDomain);
                            businessKey = UDDIKeyConvention.getBusinessKey(properties);
                        }/*from   w ww.  ja v a 2  s.c o  m*/
                    }
                    if (!businessKey.toLowerCase().startsWith("uddi:")
                            || !businessKey.substring(5).contains(":")) {
                        throw new ConfigurationException("The businessKey " + businessKey
                                + " does not implement a valid UDDI v3 key format.");
                    }
                    wsdl.setBusinessKey(businessKey);
                    if (keyDomain == null) {
                        keyDomain = businessKey.split(":")[1];
                    }
                    wsdl.setKeyDomain(keyDomain);
                    wsdls[w] = wsdl;
                }
                uddiClerk.setWsdls(wsdls);
            }

            clerks.put(names[i], uddiClerk);
        }
    }
    return clerks;
}

From source file:org.apache.marmotta.loader.berkeley.BerkeleyDBLoaderBackend.java

/**
 * Create the RDFHandler to be used for bulk-loading, optionally using the configuration passed as argument.
 *
 * @param configuration/* w w w . ja  v a  2 s  .c  o  m*/
 * @return a newly created RDFHandler instance
 */
@Override
public LoaderHandler createLoader(Configuration configuration) {

    Configuration titanCfg = new MapConfiguration(new HashMap<String, Object>());
    titanCfg.setProperty("storage.backend", "berkeleyje");
    //titanCfg.setProperty("storage.batch-loading", true);

    if (configuration.containsKey("backend.berkeley.storage-directory")) {
        titanCfg.setProperty("storage.directory",
                configuration.getString("backend.berkeley.storage-directory"));
    }

    titanCfg.setProperty("storage.buffer-size", 100000);

    return new TitanLoaderHandler(titanCfg);
}

From source file:org.apache.marmotta.loader.hbase.HBaseLoaderBackend.java

/**
 * Create the RDFHandler to be used for bulk-loading, optionally using the configuration passed as argument.
 *
 * @param configuration//w w  w.  ja v a  2s. c o  m
 * @return a newly created RDFHandler instance
 */
@Override
public LoaderHandler createLoader(Configuration configuration) {

    Configuration titanCfg = new MapConfiguration(new HashMap<String, Object>());
    titanCfg.setProperty("storage.backend", "hbase");
    //titanCfg.setProperty("storage.batch-loading", true);

    if (configuration.containsKey("backend.hbase.host")) {
        titanCfg.setProperty("storage.hostname", configuration.getString("backend.hbase.host"));
    }
    if (configuration.containsKey("backend.hbase.port")) {
        titanCfg.setProperty("storage.port", configuration.getInt("backend.hbase.port"));
    }
    if (configuration.containsKey("backend.hbase.table")) {
        titanCfg.setProperty("storage.tablename", configuration.getString("backend.hbase.table"));
    }

    titanCfg.setProperty("ids.block-size", configuration.getInt("backend.hbase.id-block-size", 500000));

    titanCfg.setProperty("storage.buffer-size", 100000);

    return new TitanLoaderHandler(titanCfg);
}

From source file:org.apache.marmotta.platform.core.util.FallbackConfiguration.java

@Override
public List<Object> getList(String key, List<?> defaultValue) {
    final Configuration mem = getInMemoryConfiguration();
    if (mem.containsKey(key))
        return mem.getList(key, defaultValue);
    else/*from  w w  w .jav  a2 s  .  com*/
        return super.getList(key, defaultValue);
}