List of usage examples for org.apache.commons.configuration Configuration getStringArray
String[] getStringArray(String key);
From source file:com.caricah.iotracah.core.init.ServersInitializer.java
/** * <code>configure</code> allows the initializer to configure its self * Depending on the implementation conditional operation can be allowed * So as to make the system instance more specialized. * <p>// w w w .j av a 2 s .com * For example: via the configurations the implementation may decide to * shutdown backend services and it just works as a server application to receive * and route requests to the workers which are in turn connected to the backend/datastore servers... * * @param configuration * @throws UnRetriableException */ @Override public void configure(Configuration configuration) throws UnRetriableException { boolean configWorkerEnabled = configuration.getBoolean(CORE_CONFIG_ENGINE_SERVER_IS_ENABLED, CORE_CONFIG_ENGINE_SERVER_IS_ENABLED_DEFAULT_VALUE); log.debug(" configure : The server function is configured to be enabled [{}]", configWorkerEnabled); setServerEngineEnabled(configWorkerEnabled); String executorName = configuration.getString(CORE_CONFIG_DEFAULT_ENGINE_EXCECUTOR_NAME, CORE_CONFIG_DEFAULT_ENGINE_EXCECUTOR_NAME_DEFAULT_VALUE); setExecutorDefaultName(executorName); executorName = configuration.getString(CORE_CONFIG_ENGINE_EXCECUTOR_EVENT_NAME, getExecutorDefaultName()); setExecutorEventerName(executorName); executorName = configuration.getString(CORE_CONFIG_ENGINE_EXCECUTOR_DATASTORE_NAME, getExecutorDefaultName()); setExecutorDatastoreName(executorName); executorName = configuration.getString(CORE_CONFIG_ENGINE_EXCECUTOR_WORKER_NAME, getExecutorDefaultName()); setExecutorWorkerName(executorName); executorName = configuration.getString(CORE_CONFIG_ENGINE_EXCECUTOR_SERVER_NAME, getExecutorDefaultName()); setExecutorServerName(executorName); boolean excecutorClusterSeparated = configuration.getBoolean( CORE_CONFIG_ENGINE_EXCECUTOR_IS_CLUSTER_SEPARATED, CORE_CONFIG_ENGINE_EXCECUTOR_IS_CLUSTER_SEPARATED_DEFAULT_VALUE); setExecutorClusterSeparated(excecutorClusterSeparated); String[] discoveryAddresses = configuration.getStringArray(CORE_CONFIG_ENGINE_CLUSTER_DISCOVERY_ADDRESSES); setDiscoveryAddresses(discoveryAddresses); }
From source file:edu.berkeley.sparrow.examples.HeterogeneousFrontend.java
public void run(String[] args) { try {/*from w ww. ja v a 2 s .c om*/ OptionParser parser = new OptionParser(); parser.accepts("c", "configuration file").withRequiredArg().ofType(String.class); parser.accepts("help", "print help statement"); OptionSet options = parser.parse(args); if (options.has("help")) { parser.printHelpOn(System.out); System.exit(-1); } // Logger configuration: log to the console BasicConfigurator.configure(); LOG.setLevel(Level.DEBUG); Configuration conf = new PropertiesConfiguration(); if (options.has("c")) { String configFile = (String) options.valueOf("c"); conf = new PropertiesConfiguration(configFile); } double warmupLambda = conf.getDouble("warmup_job_arrival_rate_s", DEFAULT_WARMUP_JOB_ARRIVAL_RATE_S); int warmupDurationS = conf.getInt("warmup_s", DEFAULT_WARMUP_S); int postWarmupS = conf.getInt("post_warmup_s", DEFAULT_POST_WARMUP_S); double lambda = conf.getDouble("job_arrival_rate_s", DEFAULT_JOB_ARRIVAL_RATE_S); int experimentDurationS = conf.getInt("experiment_s", DEFAULT_EXPERIMENT_S); LOG.debug("Using arrival rate of " + lambda + " tasks per second and running experiment for " + experimentDurationS + " seconds."); int tasksPerJob = conf.getInt("tasks_per_job", DEFAULT_TASKS_PER_JOB); int numPreferredNodes = conf.getInt("num_preferred_nodes", DEFAULT_NUM_PREFERRED_NODES); LOG.debug("Using " + numPreferredNodes + " preferred nodes for each task."); int benchmarkIterations = conf.getInt("benchmark.iterations", DEFAULT_BENCHMARK_ITERATIONS); int benchmarkId = conf.getInt("benchmark.id", DEFAULT_TASK_BENCHMARK); List<String> backends = new ArrayList<String>(); if (numPreferredNodes > 0) { /* Attempt to parse the list of slaves, which we'll need to (randomly) select preferred * nodes. */ if (!conf.containsKey(BACKENDS)) { LOG.fatal("Missing configuration backend list, which is needed to randomly select " + "preferred nodes (num_preferred_nodes set to " + numPreferredNodes + ")"); } for (String node : conf.getStringArray(BACKENDS)) { backends.add(node); } if (backends.size() < numPreferredNodes) { LOG.fatal("Number of backends smaller than number of preferred nodes!"); } } List<UserInfo> users = new ArrayList<UserInfo>(); if (conf.containsKey(USERS)) { for (String userSpecification : conf.getStringArray(USERS)) { LOG.debug("Reading user specification: " + userSpecification); String[] parts = userSpecification.split(":"); if (parts.length != 3) { LOG.error("Unexpected user specification string: " + userSpecification + "; ignoring user"); continue; } users.add(new UserInfo(parts[0], Integer.parseInt(parts[1]), Integer.parseInt(parts[2]))); } } if (users.size() == 0) { // Add a dummy user. users.add(new UserInfo("defaultUser", 1, 0)); } SparrowFrontendClient client = new SparrowFrontendClient(); int schedulerPort = conf.getInt("scheduler_port", SchedulerThrift.DEFAULT_SCHEDULER_THRIFT_PORT); client.initialize(new InetSocketAddress("localhost", schedulerPort), APPLICATION_ID, this); if (warmupDurationS > 0) { LOG.debug("Warming up for " + warmupDurationS + " seconds at arrival rate of " + warmupLambda + " jobs per second"); launchTasks(users, warmupLambda, warmupDurationS, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); LOG.debug("Waiting for queues to drain after warmup (waiting " + postWarmupS + " seconds)"); Thread.sleep(postWarmupS * 1000); } LOG.debug("Launching experiment for " + experimentDurationS + " seconds"); launchTasks(users, lambda, experimentDurationS, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); } catch (Exception e) { LOG.error("Fatal exception", e); } }
From source file:edu.berkeley.sparrow.examples.FairnessTestingFrontend.java
public void run(String[] args) { try {/*from www .ja v a 2s . co m*/ OptionParser parser = new OptionParser(); parser.accepts("c", "configuration file").withRequiredArg().ofType(String.class); parser.accepts("help", "print help statement"); OptionSet options = parser.parse(args); if (options.has("help")) { parser.printHelpOn(System.out); System.exit(-1); } // Logger configuration: log to the console BasicConfigurator.configure(); LOG.setLevel(Level.DEBUG); Configuration conf = new PropertiesConfiguration(); if (options.has("c")) { String configFile = (String) options.valueOf("c"); conf = new PropertiesConfiguration(configFile); } double warmup_lambda = conf.getDouble("warmup_job_arrival_rate_s", DEFAULT_WARMUP_JOB_ARRIVAL_RATE_S); int warmup_duration_s = conf.getInt("warmup_s", DEFAULT_WARMUP_S); int post_warmup_s = conf.getInt("post_warmup_s", DEFAULT_POST_WARMUP_S); // We use this to represent the the rate to fully load the cluster. This is a hack. double lambda = conf.getDouble("job_arrival_rate_s", DEFAULT_JOB_ARRIVAL_RATE_S); int experiment_duration_s = conf.getInt("experiment_s", DEFAULT_EXPERIMENT_S); LOG.debug("Using arrival rate of " + lambda + " tasks per second and running experiment for " + experiment_duration_s + " seconds."); int tasksPerJob = conf.getInt("tasks_per_job", DEFAULT_TASKS_PER_JOB); int numPreferredNodes = conf.getInt("num_preferred_nodes", DEFAULT_NUM_PREFERRED_NODES); LOG.debug("Using " + numPreferredNodes + " preferred nodes for each task."); int benchmarkIterations = conf.getInt("benchmark.iterations", DEFAULT_BENCHMARK_ITERATIONS); int benchmarkId = conf.getInt("benchmark.id", DEFAULT_TASK_BENCHMARK); List<String> backends = new ArrayList<String>(); if (numPreferredNodes > 0) { /* Attempt to parse the list of slaves, which we'll need to (randomly) select preferred * nodes. */ if (!conf.containsKey(BACKENDS)) { LOG.fatal("Missing configuration backend list, which is needed to randomly select " + "preferred nodes (num_preferred_nodes set to " + numPreferredNodes + ")"); } for (String node : conf.getStringArray(BACKENDS)) { backends.add(node); } if (backends.size() < numPreferredNodes) { LOG.fatal("Number of backends smaller than number of preferred nodes!"); } } List<SubExperiment> experiments = new ArrayList<SubExperiment>(); double fullyUtilizedArrivalRate = lambda; // For the first twenty seconds, the first user submits at a rate to fully utilize the cluster. List<UserInfo> onlyUser0 = new ArrayList<UserInfo>(); onlyUser0.add(new UserInfo("user0", 1, 0)); experiments.add(new SubExperiment(onlyUser0, 20, fullyUtilizedArrivalRate)); // For the next 10 seconds, user1 increases her rate to 25% of the cluster. List<UserInfo> user1QuarterDemand = new ArrayList<UserInfo>(); user1QuarterDemand.add(new UserInfo("user0", 4, 0)); user1QuarterDemand.add(new UserInfo("user1", 5, 0)); experiments.add(new SubExperiment(user1QuarterDemand, 10, 1.25 * fullyUtilizedArrivalRate)); // For the next 10 seconds, user 1 increases her rate to 50% of the cluster (using exactly // her share, but no more). List<UserInfo> user1HalfDemand = new ArrayList<UserInfo>(); user1HalfDemand.add(new UserInfo("user0", 2, 0)); user1HalfDemand.add(new UserInfo("user1", 3, 0)); experiments.add(new SubExperiment(user1HalfDemand, 10, 1.5 * fullyUtilizedArrivalRate)); // Next user 1 goes back down to 25%. experiments.add(new SubExperiment(user1QuarterDemand, 10, 1.25 * fullyUtilizedArrivalRate)); // Finally user 1 goes back to 0. experiments.add(new SubExperiment(onlyUser0, 20, fullyUtilizedArrivalRate)); SparrowFrontendClient client = new SparrowFrontendClient(); int schedulerPort = conf.getInt("scheduler_port", SchedulerThrift.DEFAULT_SCHEDULER_THRIFT_PORT); client.initialize(new InetSocketAddress("localhost", schedulerPort), APPLICATION_ID, this); if (warmup_duration_s > 0) { List<SubExperiment> warmupExperiment = new ArrayList<SubExperiment>(); List<UserInfo> warmupUsers = new ArrayList<UserInfo>(); warmupUsers.add(new UserInfo("warmupUser", 1, 0)); warmupExperiment.add(new SubExperiment(warmupUsers, warmup_duration_s, warmup_lambda)); LOG.debug("Warming up for " + warmup_duration_s + " seconds at arrival rate of " + warmup_lambda + " jobs per second"); launchTasks(warmupExperiment, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); LOG.debug("Waiting for queues to drain after warmup (waiting " + post_warmup_s + " seconds)"); Thread.sleep(post_warmup_s * 1000); } LOG.debug("Launching experiment for " + experiment_duration_s + " seconds"); launchTasks(experiments, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); } catch (Exception e) { LOG.error("Fatal exception", e); } }
From source file:dk.itst.oiosaml.sp.service.LoginHandler.java
public void handleGet(RequestContext context) throws ServletException, IOException { if (log.isDebugEnabled()) log.debug("Go to login..."); IdpMetadata idpMetadata = context.getIdpMetadata(); Configuration conf = context.getConfiguration(); HttpServletRequest request = context.getRequest(); HttpServletResponse response = context.getResponse(); Metadata metadata;/* w w w . ja va 2 s . c o m*/ if (idpMetadata.enableDiscovery()) { log.debug("Discovery profile is active"); String samlIdp = request.getParameter(Constants.DISCOVERY_ATTRIBUTE); if (samlIdp == null) { String discoveryLocation = conf.getString(Constants.DISCOVERY_LOCATION); log.debug("No _saml_idp discovery value found, redirecting to discovery service at " + discoveryLocation); String url = request.getRequestURL().toString(); if (request.getQueryString() != null) { url += "?" + request.getQueryString(); } Audit.log(Operation.DISCOVER, true, "", discoveryLocation); HTTPUtils.sendMetaRedirect(response, discoveryLocation, "r=" + URLEncoder.encode(url, "UTF-8"), true); return; } else if ("".equals(samlIdp)) { String defaultIdP = conf.getString(Constants.PROP_DISCOVERY_DEFAULT_IDP, null); if (defaultIdP != null) { log.debug("No IdP discovered, using default IdP from configuration: " + defaultIdP); metadata = idpMetadata.getMetadata(defaultIdP); } else { if (conf.getBoolean(Constants.PROP_DISCOVERY_PROMPT, false)) { String url = request.getRequestURL().toString(); url += "?RelayState=" + request.getParameter(Constants.SAML_RELAYSTATE); promptIdp(context, url); return; } else { log.debug("No IdP discovered, using first from metadata"); metadata = idpMetadata.getFirstMetadata(); } } } else { String[] entityIds = SAMLUtil.decodeDiscoveryValue(samlIdp); Audit.log(Operation.DISCOVER, false, "", Arrays.asList(entityIds).toString()); metadata = idpMetadata.findSupportedEntity(entityIds); log.debug("Discovered idp " + metadata.getEntityID()); } } else { metadata = idpMetadata.getFirstMetadata(); } Audit.log(Operation.DISCOVER, metadata.getEntityID()); Endpoint signonLocation = metadata .findLoginEndpoint(conf.getStringArray(Constants.PROP_SUPPORTED_BINDINGS)); if (signonLocation == null) { String msg = "Could not find a valid IdP signon location. Supported bindings: " + conf.getString(Constants.PROP_SUPPORTED_BINDINGS) + ", available: " + metadata.getSingleSignonServices(); log.error(msg); throw new RuntimeException(msg); } log.debug("Signing on at " + signonLocation); BindingHandler bindingHandler = context.getBindingHandlerFactory() .getBindingHandler(signonLocation.getBinding()); log.info("Using idp " + metadata.getEntityID() + " at " + signonLocation.getLocation() + " with binding " + signonLocation.getBinding()); HttpSession session = context.getSession(); UserAssertion ua = (UserAssertion) session.getAttribute(Constants.SESSION_USER_ASSERTION); session.removeAttribute(Constants.SESSION_USER_ASSERTION); UserAssertionHolder.set(null); String relayState = context.getRequest().getParameter(Constants.SAML_RELAYSTATE); OIOAuthnRequest authnRequest = OIOAuthnRequest.buildAuthnRequest(signonLocation.getLocation(), context.getSpMetadata().getEntityID(), context.getSpMetadata().getDefaultAssertionConsumerService().getBinding(), context.getSessionHandler(), relayState, context.getSpMetadata().getDefaultAssertionConsumerService().getLocation()); authnRequest.setNameIDPolicy(conf.getString(Constants.PROP_NAMEID_POLICY, null), conf.getBoolean(Constants.PROP_NAMEID_POLICY_ALLOW_CREATE, false)); authnRequest.setForceAuthn(isForceAuthnEnabled(request, conf)); if (ua == null) { authnRequest.setPasive(conf.getBoolean(Constants.PROP_PASSIVE, false)); } Audit.log(Operation.AUTHNREQUEST_SEND, true, authnRequest.getID(), authnRequest.toXML()); context.getSessionHandler().registerRequest(authnRequest.getID(), metadata.getEntityID()); bindingHandler.handle(request, response, context.getCredential(), authnRequest); }
From source file:ezbake.IntentQuery.Sample.MongoDatasource.Server.MongoExternalDataSourceHandler.java
/*********** <tablesMetaData>/*from w w w . ja va2s . co m*/ <num_table>1</num_table> <tables> <table> <name>impalaTest_users</name> <num_columns>6</num_columns> <init_string></init_string> <columns> <column> <name>user_id</name> <primitiveType>INT</primitiveType> <len></len> <precision></precision> <scale></scale> <ops>LT,LE,EQ,NEQ,GE,GT</ops> </column> ... <column> </column> <columns> </table> <table> </table> </tables> </tablesMetaData> ***********/ private void parseDataSourceMetadata() { table_metadata_map = new HashMap<String, TableMetadata>(); try { Configuration xmlConfig = new XMLConfiguration(impalaExternalDsConfigFile); dbHost = xmlConfig.getString("mongodb.host"); dbPort = xmlConfig.getInt("mongodb.port"); dbName = xmlConfig.getString("mongodb.database_name"); String key = null; String table_name = null; int num_columns = -1; String init_str = null; List<TableColumnDesc> table_col_desc_list = new ArrayList<TableColumnDesc>(); int total_tables = xmlConfig.getInt("tablesMetaData.num_table"); for (int i = 0; i < total_tables; i++) { // get table name key = String.format("tablesMetaData.tables.table(%d).name", i); table_name = xmlConfig.getString(key); // get table number of columns key = String.format("tablesMetaData.tables.table(%d).num_columns", i); num_columns = xmlConfig.getInt(key); // get table init_string key = String.format("tablesMetaData.tables.table(%d).init_string", i); init_str = xmlConfig.getString(key); // get columns String col_name = null; String col_type = null; int col_len = 0; int col_precision = 0; int col_scale = 0; Set<String> col_ops = null; for (int j = 0; j < num_columns; j++) { key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).name", i, j); col_name = xmlConfig.getString(key); key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).primitiveType", i, j); col_type = xmlConfig.getString(key); if (col_type.equals("CHAR")) { key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).len", i, j); col_len = xmlConfig.getInt(key); } else if (col_type.equals("DECIMAL")) { key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).precision", i, j); col_precision = xmlConfig.getInt(key); key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).scale", i, j); col_scale = xmlConfig.getInt(key); } key = String.format("tablesMetaData.tables.table(%d).columns.column(%d).ops", i, j); //List<String> opsList = xmlConfig.getList(key); String[] opsArray = xmlConfig.getStringArray(key); col_ops = new HashSet<String>(Arrays.asList(opsArray)); TableColumnDesc tableColumnDesc = new TableColumnDesc(col_name, col_type, col_len, col_precision, col_scale, col_ops); table_col_desc_list.add(tableColumnDesc); } TableMetadata tableMetadata = new TableMetadata(table_name, num_columns, init_str, table_col_desc_list); System.out.println(tableMetadata); table_metadata_map.put(table_name, tableMetadata); } } catch (ConfigurationException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:com.vvote.verifier.component.votePacking.VotePackingConfig.java
/** * Constructor for a VotePackingConfig object from a String * //from w w w. j av a 2 s .com * @param configLocation * The filename or filepath of the config in string format * @throws ConfigException */ public VotePackingConfig(String configLocation) throws ConfigException { logger.debug("Reading in Vote Packing specific configuration data"); if (configLocation == null) { logger.error("Cannot successfully create a VotePackingConfig"); throw new ConfigException("Cannot successfully create a VotePackingConfig"); } if (configLocation.length() == 0) { logger.error("Cannot successfully create a VotePackingConfig"); throw new ConfigException("Cannot successfully create a VotePackingConfig"); } try { Configuration config = new PropertiesConfiguration(configLocation); if (config.containsKey(ConfigFileConstants.VotePackingConfig.CURVE)) { this.curve = config.getString(ConfigFileConstants.VotePackingConfig.CURVE); } else { logger.error( "Cannot successfully create a VotePackingConfig - must contain the name of the curve used"); throw new ConfigException( "Cannot successfully create a VotePackingConfig - must contain the name of the curve used"); } if (config.containsKey(ConfigFileConstants.VotePackingConfig.PADDING_FILE)) { this.paddingFile = config.getString(ConfigFileConstants.VotePackingConfig.PADDING_FILE); } else { logger.error( "Cannot successfully create a VotePackingConfig - must contain the name of the padding file"); throw new ConfigException( "Cannot successfully create a VotePackingConfig - must contain the name of the padding file"); } if (config.containsKey(ConfigFileConstants.VotePackingConfig.TABLE_LA_LINE_LENGTH)) { this.laLineLength = config.getInt(ConfigFileConstants.VotePackingConfig.TABLE_LA_LINE_LENGTH); } else { this.laLineLength = -1; } if (config.containsKey(ConfigFileConstants.VotePackingConfig.TABLE_LA_PACKING)) { this.laPacking = config.getInt(ConfigFileConstants.VotePackingConfig.TABLE_LA_PACKING); } else { this.laPacking = -1; } if (config.containsKey(ConfigFileConstants.VotePackingConfig.TABLE_BTL_LINE_LENGTH)) { this.lcBTLLineLength = config.getInt(ConfigFileConstants.VotePackingConfig.TABLE_BTL_LINE_LENGTH); } else { this.lcBTLLineLength = -1; } if (config.containsKey(ConfigFileConstants.VotePackingConfig.TABLE_BTL_PACKING)) { this.lcBTLPacking = config.getInt(ConfigFileConstants.VotePackingConfig.TABLE_BTL_PACKING); } else { this.lcBTLPacking = -1; } if (config.containsKey(ConfigFileConstants.VotePackingConfig.CANDIDATE_TABLES)) { this.candidateTablesFolder = config .getString(ConfigFileConstants.VotePackingConfig.CANDIDATE_TABLES); } else { logger.error( "Cannot successfully create a VotePackingConfig - must contain the name of the candidates table"); throw new ConfigException( "Cannot successfully create a VotePackingConfig - must contain the candidates table"); } this.useDirect = new HashMap<RaceType, Boolean>(); this.useDirect.put(RaceType.LA, false); this.useDirect.put(RaceType.LC_ATL, false); this.useDirect.put(RaceType.LC_BTL, false); if (config.containsKey(ConfigFileConstants.VotePackingConfig.USE_DIRECT)) { String[] directlyUsed = config.getStringArray(ConfigFileConstants.VotePackingConfig.USE_DIRECT); for (String race : directlyUsed) { RaceType raceType = RaceType.fromString(race); if (raceType != null) { this.useDirect.remove(raceType); this.useDirect.put(raceType, true); } else { logger.error( "Cannot successfully create a VotePackingConfig - misformed use direct race type"); throw new ConfigException( "Cannot successfully create a VotePackingConfig - misformed use direct race type"); } } } } catch (ConfigurationException e) { logger.error("Cannot successfully create a VotePackingConfig", e); throw new ConfigException("Cannot successfully create a VotePackingConfig", e); } }
From source file:com.evolveum.midpoint.wf.impl.WfConfiguration.java
@PostConstruct void initialize() { Configuration c = midpointConfiguration.getConfiguration(WF_CONFIG_SECTION); checkAllowedKeys(c, KNOWN_KEYS, DEPRECATED_KEYS); enabled = c.getBoolean(KEY_ENABLED, true); if (!enabled) { LOGGER.info("Workflows are disabled."); return;/*from w w w .ja va2 s .co m*/ } // activiti properties related to database connection will be taken from SQL repository SqlRepositoryConfiguration sqlConfig = null; String defaultJdbcUrlPrefix = null; try { RepositoryFactory repositoryFactory = (RepositoryFactory) beanFactory.getBean("repositoryFactory"); if (!(repositoryFactory.getFactory() instanceof SqlRepositoryFactory)) { // it may be null as well LOGGER.debug( "SQL configuration cannot be found; Activiti database configuration (if any) will be taken from 'workflow' configuration section only"); if (LOGGER.isTraceEnabled()) { LOGGER.trace("repositoryFactory.getFactory() = " + repositoryFactory); } } else { SqlRepositoryFactory sqlRepositoryFactory = (SqlRepositoryFactory) repositoryFactory.getFactory(); sqlConfig = sqlRepositoryFactory.getSqlConfiguration(); if (sqlConfig.isEmbedded()) { defaultJdbcUrlPrefix = sqlRepositoryFactory.prepareJdbcUrlPrefix(sqlConfig); } } } catch (NoSuchBeanDefinitionException e) { LOGGER.debug( "SqlRepositoryFactory is not available, Activiti database configuration (if any) will be taken from 'workflow' configuration section only."); LOGGER.trace("Reason is", e); } catch (RepositoryServiceFactoryException e) { LoggingUtils.logException(LOGGER, "Cannot determine default JDBC URL for embedded database", e); } String explicitJdbcUrl = c.getString(KEY_JDBC_URL, null); if (explicitJdbcUrl == null) { if (sqlConfig.isEmbedded()) { jdbcUrl = defaultJdbcUrlPrefix + "-activiti;DB_CLOSE_ON_EXIT=FALSE"; } else { jdbcUrl = sqlConfig.getJdbcUrl(); } } else { jdbcUrl = explicitJdbcUrl; } dataSource = c.getString(KEY_DATA_SOURCE, null); if (dataSource == null && explicitJdbcUrl == null) { dataSource = sqlConfig.getDataSource(); // we want to use wf-specific JDBC if there is one (i.e. we do not want to inherit data source from repo in such a case) } if (dataSource != null) { LOGGER.info("Activiti database is at " + dataSource + " (a data source)"); } else { LOGGER.info("Activiti database is at " + jdbcUrl + " (a JDBC URL)"); } activitiSchemaUpdate = c.getBoolean(KEY_ACTIVITI_SCHEMA_UPDATE, true); jdbcDriver = c.getString(KEY_JDBC_DRIVER, sqlConfig != null ? sqlConfig.getDriverClassName() : null); jdbcUser = c.getString(KEY_JDBC_USERNAME, sqlConfig != null ? sqlConfig.getJdbcUsername() : null); jdbcPassword = c.getString(KEY_JDBC_PASSWORD, sqlConfig != null ? sqlConfig.getJdbcPassword() : null); processCheckInterval = c.getInt(KEY_PROCESS_CHECK_INTERVAL, 10); // todo set to bigger default for production use autoDeploymentFrom = c.getStringArray(KEY_AUTO_DEPLOYMENT_FROM); if (autoDeploymentFrom.length == 0) { autoDeploymentFrom = new String[] { AUTO_DEPLOYMENT_FROM_DEFAULT }; } allowApproveOthersItems = c.getBoolean(KEY_ALLOW_APPROVE_OTHERS_ITEMS, false); if (allowApproveOthersItems) { LOGGER.info( "allowApproveOthersItems parameter is set to true, therefore authorized users CAN approve/reject work items assigned to other users."); } // hibernateDialect = sqlConfig != null ? sqlConfig.getHibernateDialect() : ""; validate(); }
From source file:nl.tudelft.graphalytics.configuration.ConfigurationUtil.java
public static String[] getStringArray(Configuration config, String property) throws InvalidConfigurationException { ensureConfigurationKeyExists(config, property); return config.getStringArray(property); }
From source file:org.apache.atlas.AtlasAdminClient.java
private int run(String[] args) throws AtlasException { CommandLine commandLine = parseCommandLineOptions(args); Configuration configuration = ApplicationProperties.get(); String[] atlasServerUri = configuration.getStringArray(AtlasConstants.ATLAS_REST_ADDRESS_KEY); if (atlasServerUri == null || atlasServerUri.length == 0) { atlasServerUri = new String[] { AtlasConstants.DEFAULT_ATLAS_REST_ADDRESS }; }/* w w w . j a v a 2s . c o m*/ AtlasClient atlasClient = null; if (!AuthenticationUtil.isKerberosAuthenticationEnabled()) { String[] basicAuthUsernamePassword = AuthenticationUtil.getBasicAuthenticationInput(); atlasClient = new AtlasClient(atlasServerUri, basicAuthUsernamePassword); } else { atlasClient = new AtlasClient(atlasServerUri, null); } return handleCommand(commandLine, atlasServerUri, atlasClient); }
From source file:org.apache.atlas.examples.QuickStart.java
static String[] getServerUrl(String[] args) throws AtlasException { if (args.length > 0) { return args[0].split(","); }//w w w . j a va 2 s. c o m Configuration configuration = ApplicationProperties.get(); String[] urls = configuration.getStringArray(ATLAS_REST_ADDRESS); if (urls == null || urls.length == 0) { System.out.println( "Usage: quick_start_v1.py <atlas endpoint of format <http/https>://<atlas-fqdn>:<atlas port> like http://localhost:21000>"); System.exit(-1); } return urls; }