Example usage for org.apache.commons.configuration Configuration getInteger

List of usage examples for org.apache.commons.configuration Configuration getInteger

Introduction

In this page you can find the example usage for org.apache.commons.configuration Configuration getInteger.

Prototype

Integer getInteger(String key, Integer defaultValue);

Source Link

Document

Get an Integer associated with the given configuration key.

Usage

From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java

@Test
public void testInt() throws Exception {
    Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test");

    final String key = UUID.randomUUID().toString();
    final Random random = new Random();
    final int val1 = random.nextInt();
    final Integer val2 = random.nextInt();

    assertThat(config.getProperty(key), nullValue());

    config.setProperty(key, val1);
    assertEquals(val1, config.getInt(key));
    assertEquals(new Integer(val1), config.getInteger(key, val2));

    config.setProperty(key, val2);
    assertEquals(val2.intValue(), config.getInt(key));
    assertEquals(val2, config.getInteger(key, val1));
}

From source file:com.wizecommerce.hecuba.hector.HectorBasedHecubaClientManager.java

private void configureHectorSpecificProperties() {

    hectorClientConfiguration = new HectorClientConfiguration();
    final Configuration configuration = ConfigUtils.getInstance().getConfiguration();
    hectorClientConfiguration/*ww w .  jav  a  2s. c o m*/
            .setLoadBalancingPolicy(configuration.getString(HecubaConstants.HECTOR_LOAD_BALANCING_POLICY,
                    HecubaConstants.HECTOR_LOAD_BALANCY_POLICIES.DynamicLoadBalancingPolicy.name()));
    hectorClientConfiguration
            .setMaxActive(configuration.getInteger(HecubaConstants.HECTOR_MAX_ACTIVE_POOLS, 50));
    hectorClientConfiguration.setMaxIdle(configuration.getInteger(HecubaConstants.HECTOR_MAX_IDLE, -1));
    hectorClientConfiguration
            .setRetryDownedHosts(configuration.getBoolean(HecubaConstants.HECTOR_RETRY_DOWN_HOST, true));
    hectorClientConfiguration.setRetryDownedHostsDelayInSeconds(
            configuration.getInteger(HecubaConstants.HECTOR_RETRY_DOWN_HOST_DELAY, 30));
    hectorClientConfiguration.setThriftSocketTimeout(
            configuration.getInteger(HecubaConstants.HECTOR_THRIFT_SOCKET_TIMEOUT, 100));
    hectorClientConfiguration.setUseThriftFramedTransport(
            configuration.getBoolean(HecubaConstants.HECTOR_USE_THRIFT_FRAME_TRANSPORT, true));
}

From source file:com.wizecommerce.hecuba.astyanax.AstyanaxBasedHecubaClientManager.java

public void initialize(String clusterName, String locationUrls, String port, String keyspaceName) {
    astyanaxConfigurationImpl = new AstyanaxConfigurationImpl();
    final Configuration configuration = ConfigUtils.getInstance().getConfiguration();
    astyanaxConfigurationImpl.setDiscoveryType(NodeDiscoveryType
            .valueOf(configuration.getString(HecubaConstants.ASTYANAX_NODE_DISCOVERY_TYPE, "RING_DESCRIBE")));
    astyanaxConfigurationImpl.setConnectionPoolType(ConnectionPoolType
            .valueOf(configuration.getString(HecubaConstants.ASTYANAX_CONNECTION_POOL_TYPE, "TOKEN_AWARE")));

    connectionPoolConfigurationImpl = new ConnectionPoolConfigurationImpl("MyConnectionPool")
            .setPort(Integer.parseInt(port)).setSeeds(getListOfNodesAndPorts(locationUrls, port))
            .setMaxConnsPerHost(configuration.getInteger(HecubaConstants.ASTYANAX_MAX_CONNS_PER_HOST, 3));

    // Will resort hosts per token partition every 10 seconds
    SmaLatencyScoreStrategyImpl smaLatencyScoreStrategyImpl = new SmaLatencyScoreStrategyImpl(
            configuration.getInteger(HecubaConstants.ASTYANAX_LATENCY_AWARE_UPDATE_INTERVAL, 10000),
            configuration.getInteger(HecubaConstants.ASTYANAX_LATENCY_AWARE_RESET_INTERVAL, 10000),
            configuration.getInteger(HecubaConstants.ASTYANAX_LATENCY_AWARE_WINDOW_SIZE, 100),
            configuration.getFloat(HecubaConstants.ASTYANAX_LATENCY_AWARE_BADNESS_INTERVAL, 0.5f));
    // Enabled SMA. Omit this to use round robin with a token range.
    connectionPoolConfigurationImpl.setLatencyScoreStrategy(smaLatencyScoreStrategyImpl);

    if (StringUtils.isNotBlank(username) && StringUtils.isNotBlank(password)) {

        SimpleAuthenticationCredentials simpleAuth = new SimpleAuthenticationCredentials(username, password);
        connectionPoolConfigurationImpl.setAuthenticationCredentials(simpleAuth);
    }/*w w w. ja va 2  s.  co m*/

    connectionPoolMonitor = new CountingConnectionPoolMonitor();
    context = new AstyanaxContext.Builder().forCluster(clusterName).forKeyspace(keyspaceName)
            .withAstyanaxConfiguration(astyanaxConfigurationImpl)
            .withConnectionPoolConfiguration(connectionPoolConfigurationImpl)
            .withConnectionPoolMonitor(connectionPoolMonitor).buildKeyspace(ThriftFamilyFactory.getInstance());

    context.start();
    keyspace = context.getEntity();

    if (clusterContext == null) {
        initiateClusterContext(clusterName);
    }
}

From source file:edu.lternet.pasta.client.DataPackageManagerClientTest.java

/**
 * Initialize objects before any tests are run.
 *///from   ww  w.  j  a v a  2  s .c  o  m
@BeforeClass
public static void setUpClass() {
    ConfigurationListener.configure();
    Configuration options = ConfigurationListener.getOptions();

    if (options == null) {
        fail("Failed to load the DataPortal properties file: 'dataportal.properties'");
    } else {
        testUser = options.getString("eventservice.uid");
        if (testUser == null) {
            fail("No value found for property: 'eventservice.uid'");
        }
        password = options.getString("eventservice.password");
        if (password == null) {
            fail("No value found for property: 'eventservice.password'");
        }
        testScope = options.getString("dataportal.test.scope");
        if (testScope == null) {
            fail("No value found for DataPortal property 'dataportal.test.scope'");
        }
        testIdentifierStr = options.getString("dataportal.test.identifier");
        if (testIdentifierStr == null) {
            fail("No value found for DataPortal property 'dataportal.test.identifier'");
        }
        testRevisionStr = options.getString("dataportal.test.revision");
        if (testRevisionStr == null) {
            fail("No value found for DataPortal property 'dataportal.test.revision'");
        }
        testUpdateRevisionStr = options.getString("dataportal.test.revision.update");
        if (testUpdateRevisionStr == null) {
            fail("No value found for DataPortal property 'dataportal.test.revision.update'");
        }
        testEntityId = options.getString("dataportal.test.entity.id");
        if (testEntityId == null) {
            fail("No value found for DataPortal property 'dataportal.test.entity.id'");
        }
        testEntityName = options.getString("dataportal.test.entity.name");
        if (testEntityName == null) {
            fail("No value found for DataPortal property 'dataportal.test.entity.name'");
        }
        testEntitySize = options.getInteger("dataportal.test.entity.size", null);
        if (testEntitySize == null) {
            fail("No value found for DataPortal property 'dataportal.test.entity.size'");
        }
        testPath = options.getString("dataportal.test.path");
        if (testPath == null) {
            fail("No value found for DataPortal property 'dataportal.test.path'");
        } else {
            testEmlFileName = options.getString("dataportal.test.emlFileName");
            if (testEmlFileName == null) {
                fail("No value found for DataPortal property 'dataportal.test.emlFileName'");
            } else {
                testEmlFile = new File(testPath, testEmlFileName);
            }
        }
    }

    /*
     * Authenticate the test user
     */
    try {
        LoginClient loginClient = new LoginClient(testUser, password);
        System.err.println("User '" + testUser + "' authenticated.");
    } catch (PastaAuthenticationException e) {
        fail("User '" + testUser + "' failed to authenticate.");
    }

    /*
     * Determine the test identifier value and modify the test
     * EML packageId attribute accordingly
     */
    try {
        dpmClient = new DataPackageManagerClient(testUser);
        testIdentifier = DataPackageManagerClient.determineTestIdentifier(dpmClient, testScope,
                testIdentifierStr);
        String testPackageId = testScope + "." + testIdentifier + "." + testRevisionStr;
        System.err.println("testPackageId: " + testPackageId);
        DataPackageManagerClient.modifyTestEmlFile(testEmlFile, testScope, testPackageId);
    } catch (Exception e) {
        fail(String.format("%s: %s",
                "Error encountered while initializing identifier value prior to running JUnit test.",
                e.getMessage()));
    }
}

From source file:dk.dma.ais.abnormal.analyzer.analysis.FreeFlowAnalysis.java

@Inject
public FreeFlowAnalysis(Configuration configuration, AppStatisticsService statisticsService,
        EventEmittingTracker trackingService, EventRepository eventRepository) {
    super(eventRepository, trackingService, null);
    this.statisticsService = statisticsService;

    this.xL = configuration.getInt(CONFKEY_ANALYSIS_FREEFLOW_XL, 8);
    this.xB = configuration.getInt(CONFKEY_ANALYSIS_FREEFLOW_XB, 8);
    this.dCog = configuration.getFloat(CONFKEY_ANALYSIS_FREEFLOW_DCOG, 15f);
    this.minReportingIntervalMillis = configuration
            .getInt(CONFKEY_ANALYSIS_FREEFLOW_MIN_REPORTING_PERIOD_MINUTES, 60) * 60 * 1000;

    String csvFileNameTmp = configuration.getString(CONFKEY_ANALYSIS_FREEFLOW_CSVFILE, null);
    if (csvFileNameTmp == null || isBlank(csvFileNameTmp)) {
        this.csvFileName = null;
        LOG.warn("Writing of free flow events to CSV file is disabled");
    } else {//w  w w  .j a v a 2s  .  c  o  m
        this.csvFileName = csvFileNameTmp.trim();
        LOG.info("Free flow events are appended to CSV file: " + this.csvFileName);
    }

    List<Object> bboxConfig = configuration.getList(CONFKEY_ANALYSIS_FREEFLOW_BBOX);
    if (bboxConfig != null) {
        final double n = Double.valueOf(bboxConfig.get(0).toString());
        final double e = Double.valueOf(bboxConfig.get(1).toString());
        final double s = Double.valueOf(bboxConfig.get(2).toString());
        final double w = Double.valueOf(bboxConfig.get(3).toString());
        this.areaToBeAnalysed = BoundingBox.create(Position.create(n, e), Position.create(s, w),
                CoordinateSystem.CARTESIAN);
    }

    setTrackPredictionTimeMax(configuration.getInteger(CONFKEY_ANALYSIS_FREEFLOW_PREDICTIONTIME_MAX, -1));
    setAnalysisPeriodMillis(configuration.getInt(CONFKEY_ANALYSIS_FREEFLOW_RUN_PERIOD, 30000) * 1000);

    LOG.info(this.getClass().getSimpleName() + " created (" + this + ").");
}

From source file:eu.socialsensor.main.BenchmarkConfiguration.java

public BenchmarkConfiguration(Configuration appconfig) {
    if (appconfig == null) {
        throw new IllegalArgumentException("appconfig may not be null");
    }/*from ww  w.ja va  2s  .  co  m*/

    Configuration eu = appconfig.subset("eu");
    Configuration socialsensor = eu.subset("socialsensor");

    //metrics
    final Configuration metrics = socialsensor.subset(GraphDatabaseConfiguration.METRICS_NS.getName());

    final Configuration graphite = metrics.subset(GRAPHITE);
    this.graphiteHostname = graphite.getString(GRAPHITE_HOSTNAME, null);
    this.graphiteReportingInterval = graphite.getLong(GraphDatabaseConfiguration.GRAPHITE_INTERVAL.getName(),
            1000 /*default 1sec*/);

    final Configuration csv = metrics.subset(CSV);
    this.csvReportingInterval = metrics.getLong(CSV_INTERVAL, 1000 /*ms*/);
    this.csvDir = csv.containsKey(CSV_DIR)
            ? new File(csv.getString(CSV_DIR, System.getProperty("user.dir") /*default*/))
            : null;

    Configuration dynamodb = socialsensor.subset("dynamodb");
    this.dynamodbWorkerThreads = dynamodb.getInt("workers", 25);
    Configuration credentials = dynamodb.subset(CREDENTIALS);
    this.dynamodbPrecreateTables = dynamodb.getBoolean("precreate-tables", Boolean.FALSE);
    this.dynamodbTps = Math.max(1, dynamodb.getLong(TPS, 750 /*default*/));
    this.dynamodbConsistentRead = dynamodb.containsKey(CONSISTENT_READ) ? dynamodb.getBoolean(CONSISTENT_READ)
            : false;
    this.dynamodbDataModel = dynamodb.containsKey("data-model")
            ? BackendDataModel.valueOf(dynamodb.getString("data-model"))
            : null;
    this.dynamodbCredentialsFqClassName = credentials.containsKey(CLASS_NAME)
            ? credentials.getString(CLASS_NAME)
            : null;
    this.dynamodbCredentialsCtorArguments = credentials.containsKey(CONSTRUCTOR_ARGS)
            ? credentials.getString(CONSTRUCTOR_ARGS)
            : null;
    this.dynamodbEndpoint = dynamodb.containsKey(ENDPOINT) ? dynamodb.getString(ENDPOINT) : null;
    this.dynamodbTablePrefix = dynamodb.containsKey(TABLE_PREFIX) ? dynamodb.getString(TABLE_PREFIX)
            : Constants.DYNAMODB_TABLE_PREFIX.getDefaultValue();

    Configuration orient = socialsensor.subset("orient");
    orientLightweightEdges = orient.containsKey(LIGHTWEIGHT_EDGES) ? orient.getBoolean(LIGHTWEIGHT_EDGES)
            : null;

    Configuration sparksee = socialsensor.subset("sparksee");
    sparkseeLicenseKey = sparksee.containsKey(LICENSE_KEY) ? sparksee.getString(LICENSE_KEY) : null;

    Configuration titan = socialsensor.subset(TITAN); //TODO(amcp) move dynamodb ns into titan
    bufferSize = titan.getInt(BUFFER_SIZE, GraphDatabaseConfiguration.BUFFER_SIZE.getDefaultValue());
    blocksize = titan.getInt(IDS_BLOCKSIZE, GraphDatabaseConfiguration.IDS_BLOCK_SIZE.getDefaultValue());
    pageSize = titan.getInt(PAGE_SIZE, GraphDatabaseConfiguration.PAGE_SIZE.getDefaultValue());

    // database storage directory
    if (!socialsensor.containsKey(DATABASE_STORAGE_DIRECTORY)) {
        throw new IllegalArgumentException("configuration must specify database-storage-directory");
    }
    dbStorageDirectory = new File(socialsensor.getString(DATABASE_STORAGE_DIRECTORY));
    dataset = validateReadableFile(socialsensor.getString(DATASET), DATASET);

    // load the dataset
    DatasetFactory.getInstance().getDataset(dataset);

    if (!socialsensor.containsKey(PERMUTE_BENCHMARKS)) {
        throw new IllegalArgumentException("configuration must set permute-benchmarks to true or false");
    }
    permuteBenchmarks = socialsensor.getBoolean(PERMUTE_BENCHMARKS);

    List<?> benchmarkList = socialsensor.getList("benchmarks");
    benchmarkTypes = new ArrayList<BenchmarkType>();
    for (Object str : benchmarkList) {
        benchmarkTypes.add(BenchmarkType.valueOf(str.toString()));
    }

    selectedDatabases = new TreeSet<GraphDatabaseType>();
    for (Object database : socialsensor.getList("databases")) {
        if (!GraphDatabaseType.STRING_REP_MAP.keySet().contains(database.toString())) {
            throw new IllegalArgumentException(
                    String.format("selected database %s not supported", database.toString()));
        }
        selectedDatabases.add(GraphDatabaseType.STRING_REP_MAP.get(database));
    }
    scenarios = permuteBenchmarks ? Ints.checkedCast(CombinatoricsUtils.factorial(selectedDatabases.size()))
            : 1;

    resultsPath = new File(System.getProperty("user.dir"), socialsensor.getString("results-path"));
    if (!resultsPath.exists() && !resultsPath.mkdirs()) {
        throw new IllegalArgumentException("unable to create results directory");
    }
    if (!resultsPath.canWrite()) {
        throw new IllegalArgumentException("unable to write to results directory");
    }

    randomNodes = socialsensor.getInteger(RANDOM_NODES, new Integer(100));

    if (this.benchmarkTypes.contains(BenchmarkType.CLUSTERING)) {
        if (!socialsensor.containsKey(NODES_COUNT)) {
            throw new IllegalArgumentException("the CW benchmark requires nodes-count integer in config");
        }
        nodesCount = socialsensor.getInt(NODES_COUNT);

        if (!socialsensor.containsKey(RANDOMIZE_CLUSTERING)) {
            throw new IllegalArgumentException("the CW benchmark requires randomize-clustering bool in config");
        }
        randomizedClustering = socialsensor.getBoolean(RANDOMIZE_CLUSTERING);

        if (!socialsensor.containsKey(ACTUAL_COMMUNITIES)) {
            throw new IllegalArgumentException("the CW benchmark requires a file with actual communities");
        }
        actualCommunities = validateReadableFile(socialsensor.getString(ACTUAL_COMMUNITIES),
                ACTUAL_COMMUNITIES);

        final boolean notGenerating = socialsensor.containsKey(CACHE_VALUES);
        if (notGenerating) {
            List<?> objects = socialsensor.getList(CACHE_VALUES);
            cacheValues = new ArrayList<Integer>(objects.size());
            cacheValuesCount = null;
            cacheIncrementFactor = null;
            for (Object o : objects) {
                cacheValues.add(Integer.valueOf(o.toString()));
            }
        } else if (socialsensor.containsKey(CACHE_VALUES_COUNT)
                && socialsensor.containsKey(CACHE_INCREMENT_FACTOR)) {
            cacheValues = null;
            // generate the cache values with parameters
            if (!socialsensor.containsKey(CACHE_VALUES_COUNT)) {
                throw new IllegalArgumentException(
                        "the CW benchmark requires cache-values-count int in config when cache-values not specified");
            }
            cacheValuesCount = socialsensor.getInt(CACHE_VALUES_COUNT);

            if (!socialsensor.containsKey(CACHE_INCREMENT_FACTOR)) {
                throw new IllegalArgumentException(
                        "the CW benchmark requires cache-increment-factor int in config when cache-values not specified");
            }
            cacheIncrementFactor = socialsensor.getDouble(CACHE_INCREMENT_FACTOR);
        } else {
            throw new IllegalArgumentException(
                    "when doing CW benchmark, must provide cache-values or parameters to generate them");
        }
    } else {
        randomizedClustering = null;
        nodesCount = null;
        cacheValuesCount = null;
        cacheIncrementFactor = null;
        cacheValues = null;
        actualCommunities = null;
    }
}

From source file:org.apache.atlas.util.AtlasRepositoryConfiguration.java

public static int getTypeUpdateLockMaxWaitTimeInSeconds() {
    Integer ret = typeUpdateLockMaxWaitTimeInSeconds;

    if (ret == null) {
        try {//from  w w  w. j av  a  2s.  c  o  m
            Configuration config = ApplicationProperties.get();

            ret = config.getInteger(CONFIG_TYPE_UPDATE_LOCK_MAX_WAIT_TIME_IN_SECONDS,
                    DEFAULT_TYPE_UPDATE_LOCK_MAX_WAIT_TIME_IN_SECONDS);

            typeUpdateLockMaxWaitTimeInSeconds = ret;
        } catch (AtlasException e) {
            // ignore
        }
    }

    return ret == null ? DEFAULT_TYPE_UPDATE_LOCK_MAX_WAIT_TIME_IN_SECONDS : ret;
}

From source file:org.apache.bookkeeper.stats.CodahaleMetricsServletProvider.java

@Override
public void start(Configuration conf) {
    super.start(conf);
    Integer httpPort = conf.getInteger("codahaleStatsHttpPort", null);
    if (null != httpPort) {
        servletReporter = new ServletReporter(getMetrics(), healthCheckRegistry, httpPort);
        try {/*  w ww .ja va 2 s.  co m*/
            servletReporter.start();
        } catch (Exception e) {
            logger.warn("Encountered error on starting the codahale metrics servlet", e);
        }
    }
}

From source file:org.apache.james.backends.cassandra.init.configuration.CassandraConfiguration.java

public static CassandraConfiguration from(Configuration propertiesConfiguration) {
    return builder()
            .aclMaxRetry(Optional.ofNullable(propertiesConfiguration.getInteger(MAILBOX_MAX_RETRY_ACL, null)))
            .modSeqMaxRetry(//from   ww  w  . j av  a 2s.c o  m
                    Optional.ofNullable(propertiesConfiguration.getInteger(MAILBOX_MAX_RETRY_MODSEQ, null)))
            .uidMaxRetry(Optional.ofNullable(propertiesConfiguration.getInteger(MAILBOX_MAX_RETRY_UID, null)))
            .flagsUpdateMessageMaxRetry(Optional.ofNullable(
                    propertiesConfiguration.getInteger(MAILBOX_MAX_RETRY_MESSAGE_FLAGS_UPDATE, null)))
            .flagsUpdateMessageIdMaxRetry(Optional.ofNullable(
                    propertiesConfiguration.getInteger(MAILBOX_MAX_RETRY_MESSAGE_ID_FLAGS_UPDATE, null)))
            .fetchNextPageInAdvanceRow(
                    Optional.ofNullable(propertiesConfiguration.getInteger(FETCH_ADVANCE_ROW_COUNT, null)))
            .flagsUpdateChunkSize(
                    Optional.ofNullable(propertiesConfiguration.getInteger(CHUNK_SIZE_FLAGS_UPDATE, null)))
            .messageReadChunkSize(
                    Optional.ofNullable(propertiesConfiguration.getInteger(CHUNK_SIZE_MESSAGE_READ, null)))
            .expungeChunkSize(Optional.ofNullable(propertiesConfiguration.getInteger(CHUNK_SIZE_EXPUNGE, null)))
            .blobPartSize(Optional.ofNullable(propertiesConfiguration.getInteger(BLOB_PART_SIZE, null)))
            .attachmentV2MigrationReadTimeout(Optional
                    .ofNullable(propertiesConfiguration.getInteger(ATTACHMENT_V2_MIGRATION_READ_TIMEOUT, null)))
            .messageAttachmentIdsReadTimeout(Optional
                    .ofNullable(propertiesConfiguration.getInteger(MESSAGE_ATTACHMENTID_READ_TIMEOUT, null)))
            .build();
}

From source file:org.apache.james.backends.cassandra.init.configuration.ClusterConfiguration.java

public static ClusterConfiguration from(Configuration configuration) {
    AbstractConfiguration.setDefaultListDelimiter(',');
    return ClusterConfiguration.builder().hosts(listCassandraServers(configuration))
            .keyspace(Optional.ofNullable(configuration.getString(CASSANDRA_KEYSPACE, null)))
            .replicationFactor(Optional.ofNullable(configuration.getInteger(REPLICATION_FACTOR, null)))
            .minDelay(Optional.ofNullable(configuration.getInteger(CONNECTION_RETRY_MIN_DELAY, null)))
            .maxRetry(Optional.ofNullable(configuration.getInteger(CONNECTION_MAX_RETRY, null)))
            .queryLoggerConfiguration(QueryLoggerConfiguration.from(configuration))
            .poolingOptions(readPoolingOptions(configuration))
            .readTimeoutMillis(Optional.ofNullable(configuration.getInteger(READ_TIMEOUT_MILLIS, null)))
            .connectTimeoutMillis(Optional.ofNullable(configuration.getInteger(CONNECT_TIMEOUT_MILLIS, null)))
            .build();/*ww w . j av  a 2s  . c o  m*/
}