Example usage for org.apache.commons.configuration Configuration setProperty

List of usage examples for org.apache.commons.configuration Configuration setProperty

Introduction

In this page you can find the example usage for org.apache.commons.configuration Configuration setProperty.

Prototype

void setProperty(String key, Object value);

Source Link

Document

Set a property, this will replace any previously set values.

Usage

From source file:com.amazon.janusgraph.ScenarioTests.java

private JanusGraph createGraphWithSchema(final boolean useNativeLocking,
        final boolean useVersionPropertyLocking, final boolean useMetadataVersionIndexLocking,
        final long waitMillis) throws InterruptedException, ExecutionException {
    //http://stackoverflow.com/questions/42090616/titan-dynamodb-doesnt-release-all-acquired-locks-on-commit-via-gremlin/43742619#43742619
    //BEGIN code from second code listing (simplified index setup)
    final Configuration config = TestGraphUtil.instance.createTestGraphConfig(BackendDataModel.MULTI);
    //default lock expiry time is 300*1000 ms = 5 minutes. Set to 200ms.
    config.setProperty("storage.lock.expiry-time", waitMillis);
    if (!useNativeLocking) {
        //Use JanusGraph locking
        config.setProperty("storage.dynamodb.native-locking", false);

        //Edgestore lock table
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.data-model", BackendDataModel.MULTI.name());
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.scan-limit", 10000);
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.initial-read-capacity", 10);
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.read-rate", 10);
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.initial-write-capacity", 10);
        config.setProperty("storage.dynamodb.stores.edgestore_lock_.write-rate", 10);

        //Graphindex lock table
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.data-model",
                BackendDataModel.MULTI.name());
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.scan-limit", 10000);
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.initial-read-capacity", 10);
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.read-rate", 10);
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.initial-write-capacity", 10);
        config.setProperty("storage.dynamodb.stores.graphindex_lock_.write-rate", 10);

        //system_properties lock table
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.data-model",
                BackendDataModel.MULTI.name());
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.scan-limit", 10000);
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.initial-read-capacity", 1);
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.read-rate", 1);
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.initial-write-capacity", 1);
        config.setProperty("storage.dynamodb.stores.system_properties_lock_.write-rate", 1);
    }/*from w  ww  .  ja va  2s  . c o  m*/
    final JanusGraph graph = JanusGraphFactory.open(config);

    //Management transaction one
    ManagementSystem mgmt = (ManagementSystem) graph.openManagement();
    if (useNativeLocking) {
        System.out.println("mgmt tx one " + getStoreTransaction(mgmt).getId());
    }
    final PropertyKey propertyKey;
    if (mgmt.containsPropertyKey(VERSION_PROPERTY)) {
        propertyKey = mgmt.getPropertyKey(VERSION_PROPERTY);
    } else {
        propertyKey = mgmt.makePropertyKey(VERSION_PROPERTY).dataType(String.class)
                .cardinality(Cardinality.SINGLE).make();
    }
    final VertexLabel labelObj;
    if (mgmt.containsVertexLabel(DATABASE_METADATA_LABEL)) {
        labelObj = mgmt.getVertexLabel(DATABASE_METADATA_LABEL);
    } else {
        labelObj = mgmt.makeVertexLabel(DATABASE_METADATA_LABEL).make();
    }
    final JanusGraphIndex index = mgmt.buildIndex(BY_DATABASE_METADATA_VERSION, Vertex.class)
            .addKey(propertyKey).unique().indexOnly(labelObj).buildCompositeIndex();
    if (useVersionPropertyLocking) {
        mgmt.setConsistency(propertyKey, ConsistencyModifier.LOCK);
    }
    if (useMetadataVersionIndexLocking) {
        mgmt.setConsistency(index, ConsistencyModifier.LOCK);
    }
    mgmt.commit();

    //Management transaction two
    mgmt = (ManagementSystem) graph.openManagement();
    if (useNativeLocking) {
        System.out.println("mgmt tx two " + getStoreTransaction(mgmt).getId());
    }
    final JanusGraphIndex indexAfterFirstCommit = mgmt.getGraphIndex(BY_DATABASE_METADATA_VERSION);
    final PropertyKey propertyKeySecond = mgmt.getPropertyKey(VERSION_PROPERTY);
    if (indexAfterFirstCommit.getIndexStatus(propertyKeySecond) == SchemaStatus.INSTALLED) {
        ((ManagementSystem) mgmt).awaitGraphIndexStatus(graph, BY_DATABASE_METADATA_VERSION)
                .status(SchemaStatus.REGISTERED).timeout(10, java.time.temporal.ChronoUnit.MINUTES).call();
    }
    mgmt.commit();

    //Management transaction three
    mgmt = (ManagementSystem) graph.openManagement();
    if (useNativeLocking) {
        System.out.println("mgmt tx three " + getStoreTransaction(mgmt).getId());
    }
    final JanusGraphIndex indexAfterSecondCommit = mgmt.getGraphIndex(BY_DATABASE_METADATA_VERSION);
    final PropertyKey propertyKeyThird = mgmt.getPropertyKey(VERSION_PROPERTY);
    if (indexAfterSecondCommit.getIndexStatus(propertyKeyThird) != SchemaStatus.ENABLED) {
        mgmt.commit();
        mgmt = (ManagementSystem) graph.openManagement();
        if (useNativeLocking) {
            System.out.println("mgmt tx four " + getStoreTransaction(mgmt).getId());
        }
        mgmt.updateIndex(mgmt.getGraphIndex(BY_DATABASE_METADATA_VERSION), SchemaAction.ENABLE_INDEX).get();
        mgmt.commit();
        mgmt = (ManagementSystem) graph.openManagement();
        if (useNativeLocking) {
            System.out.println("mgmt tx five " + getStoreTransaction(mgmt).getId());
        }
        ((ManagementSystem) mgmt).awaitGraphIndexStatus(graph, BY_DATABASE_METADATA_VERSION)
                .status(SchemaStatus.ENABLED).timeout(10, java.time.temporal.ChronoUnit.MINUTES).call();
    }
    mgmt.commit();
    //END code from second code listing (simplified index setup)
    return graph;
}

From source file:co.turnus.analysis.profiler.orcc.dynamic.OrccDynamicProfilerOptions.java

private static void setOutputPath(Configuration conf, String customPath) {
    // store the root path for the output.
    File outPath = null;/*ww w .  j  a v a 2  s.  co  m*/
    boolean traceProject = conf.getBoolean(CREATE_TRACE_PROJECT);
    if (customPath != null && !customPath.isEmpty()) {
        outPath = new File(customPath);
    } else {
        String project = conf.getString(ORCC_PROJECT);
        IProject pojo = EcoreHelper.getProject(project);
        outPath = pojo.getRawLocation().makeAbsolute().toFile();
        outPath = new File(outPath, "turnus");

        if (traceProject) {
            outPath = new File(outPath, "traces");
        } else {
            outPath = new File(outPath, "profiling");
        }
    }

    if (traceProject) {
        String trace = conf.getString(TRACE_PROJECT_NAME);

        // make path os friendly...
        trace = trace.replace(" ", "_");

        File tmpPath = new File(outPath, trace);
        if (tmpPath.exists()) {
            tmpPath = TurnusUtils.findNextAvailable(outPath, trace, "_v", 2);
        }
        outPath = tmpPath;
    }

    conf.setProperty(OUTPUT_PATH, outPath.getAbsolutePath());

}

From source file:com.linkedin.pinot.controller.helix.core.rebalance.ReplicaGroupRebalanceStrategyTest.java

@Test
public void testReplicaGroupRebalanceStrategy() throws Exception {
    Configuration rebalanceUserConfig = new PropertyListConfiguration();
    rebalanceUserConfig.setProperty(RebalanceUserConfigConstants.DRYRUN, false);

    int numInstancesPerPartition = 3;
    ReplicaGroupStrategyConfig replicaGroupStrategyConfig = new ReplicaGroupStrategyConfig();
    replicaGroupStrategyConfig.setNumInstancesPerPartition(numInstancesPerPartition);
    replicaGroupStrategyConfig.setMirrorAssignmentAcrossReplicaGroups(true);

    String tableNameWithType = TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME);
    TableConfig tableConfig = _helixResourceManager.getTableConfig(TABLE_NAME,
            CommonConstants.Helix.TableType.OFFLINE);
    tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupStrategyConfig);
    tableConfig.getValidationConfig().setSegmentAssignmentStrategy("ReplicaGroupSegmentAssignmentStrategy");
    tableConfig.getValidationConfig().setReplication("2");

    _helixResourceManager.setExistingTableConfig(tableConfig, tableNameWithType,
            CommonConstants.Helix.TableType.OFFLINE);

    // Test rebalancing after migration from non-replica to replica group table
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);//  w  ww  .j a va2s  .  co  m
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Upload 10 more segments and validate the segment assignment
    addNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) {
        Thread.sleep(100);
    }
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());

    // Clean up new segments
    removeNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) {
        Thread.sleep(100);
    }

    // Test replace
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Upload 10 more segments and validate the segment assignment
    addNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) {
        Thread.sleep(100);
    }
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());

    // Test replace again
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS + 10));

    // Clean up new segments
    removeNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) {
        Thread.sleep(100);
    }

    // Test adding servers to each replica group
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_d", OFFLINE_TENENT_NAME);

    int targetNumInstancePerPartition = 5;
    int targetNumReplicaGroup = 2;
    updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Test removing servers to each replica group
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME);
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_d", OFFLINE_TENENT_NAME);
    targetNumInstancePerPartition = 4;
    targetNumReplicaGroup = 2;
    updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Upload 10 more segments and validate the segment assignment
    addNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) {
        Thread.sleep(100);
    }
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());

    // Clean up new segments
    removeNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) {
        Thread.sleep(100);
    }

    // Test removing two more servers to each replica group with force run
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME);
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME);

    targetNumInstancePerPartition = 3;
    targetNumReplicaGroup = 2;
    updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Test adding a replica group
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME);
    _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME);

    targetNumInstancePerPartition = 3;
    targetNumReplicaGroup = 3;
    updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));

    // Upload 10 more segments and validate the segment assignment
    addNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) {
        Thread.sleep(100);
    }
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());

    // Clean up segments
    removeNewSegments();
    while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) {
        Thread.sleep(100);
    }

    // Test removing a replica group
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME);
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_1", OFFLINE_TENENT_NAME);
    _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_2", OFFLINE_TENENT_NAME);

    targetNumInstancePerPartition = 3;
    targetNumReplicaGroup = 2;
    updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup);
    _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE,
            rebalanceUserConfig);
    Assert.assertTrue(validateTableLevelReplicaGroupRebalance());
    Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS));
}

From source file:com.linkedin.pinot.tools.perf.PerfBenchmarkDriver.java

private void startBroker() throws Exception {
    if (!_conf.isStartBroker()) {
        LOGGER.info("Skipping start broker step. Assumes broker is already started.");
        return;//from   www .j  a v a2  s  . c om
    }
    Configuration brokerConfiguration = new PropertiesConfiguration();
    String brokerInstanceName = "Broker_localhost_" + CommonConstants.Helix.DEFAULT_BROKER_QUERY_PORT;
    brokerConfiguration.setProperty("instanceId", brokerInstanceName);
    LOGGER.info("Starting broker instance: {}", brokerInstanceName);
    new HelixBrokerStarter(_clusterName, _zkAddress, brokerConfiguration);
}

From source file:com.evolveum.midpoint.repo.sql.testing.TestSqlRepositoryFactory.java

private void updateConfigurationStringProperty(Configuration configuration, Properties properties,
        String propertyName) {/*from  w w w  . j  a  v a2 s . c o m*/
    String value = properties != null ? properties.getProperty(propertyName) : System.getProperty(propertyName);
    if (value == null) {
        return;
    }
    LOGGER.info("Overriding loaded configuration with value read from system properties: {}={}", propertyName,
            value);
    configuration.setProperty(propertyName, value);
}

From source file:edu.jhuapl.tinkerpop.AccumuloGraphConfigurationTest.java

@Test
public void testConfigurationInterface() throws Exception {
    Configuration conf = AccumuloGraphTestUtils.generateGraphConfig("setPropsValid");
    for (String key : AccumuloGraphConfiguration.getValidInternalKeys()) {
        // This is bad... but we should allow them if they are valid keys.
        conf.setProperty(key, "value");
    }// w  ww  .  jav  a 2  s  .co  m

    conf = AccumuloGraphTestUtils.generateGraphConfig("setPropsInvalid");
    try {
        conf.setProperty("invalidKey", "value");
        fail();
    } catch (Exception e) {
    }
}

From source file:co.turnus.analysis.buffers.MpcBoundedScheduling.java

public MpcBoundedScheduling(TraceProject traceProject) {
    // unload trace if loaded and not sensitive to token dependencies
    if (traceProject.isTraceLoaded()) {
        if (!traceProject.getTrace().isSensitive(Kind.TOKENS)) {
            traceProject.unloadTrace();// w  w w .j ava 2s.  c o  m
        }
    }

    if (!traceProject.isTraceLoaded()) {
        Configuration config = new BaseConfiguration();
        config.setProperty(SENS_FSM, false);
        config.setProperty(SENS_GUARD, false);
        config.setProperty(SENS_PORT, false);
        config.setProperty(SENS_STATEVAR, false);
        config.setProperty(SENS_TOKENS, true);
        traceProject.loadTrace(config);
    }

    this.traceProject = traceProject;
}

From source file:com.linkedin.pinot.tools.admin.command.StartBrokerCommand.java

@Override
public boolean execute() throws Exception {
    if (_brokerHost == null) {
        _brokerHost = NetUtil.getHostAddress();
    }//from w  ww .j av a2s .co  m

    Configuration configuration = readConfigFromFile(_configFileName);
    if (configuration == null) {
        if (_configFileName != null) {
            LOGGER.error("Error: Unable to find file {}.", _configFileName);
            return false;
        }

        configuration = new PropertiesConfiguration();
        configuration.addProperty(CommonConstants.Helix.KEY_OF_BROKER_QUERY_PORT, _brokerPort);
        configuration.setProperty("pinot.broker.routing.table.builder.class", "random");
    }

    LOGGER.info("Executing command: " + toString());
    final HelixBrokerStarter pinotHelixBrokerStarter = new HelixBrokerStarter(_clusterName, _zkAddress,
            configuration);

    String pidFile = ".pinotAdminBroker-" + String.valueOf(System.currentTimeMillis()) + ".pid";
    savePID(System.getProperty("java.io.tmpdir") + File.separator + pidFile);
    return true;
}

From source file:com.evolveum.midpoint.repo.sql.testing.TestSqlRepositoryFactory.java

private void updateConfigurationBooleanProperty(Configuration configuration, Properties properties,
        String propertyName) {// w w  w .  j  av  a  2  s  . c  o  m
    String value = properties != null ? properties.getProperty(propertyName) : System.getProperty(propertyName);
    if (value == null) {
        return;
    }
    boolean val = new Boolean(value).booleanValue();
    LOGGER.info("Overriding loaded configuration with value read from system properties: {}={}", propertyName,
            val);
    configuration.setProperty(propertyName, val);
}

From source file:com.evolveum.midpoint.repo.sql.testing.TestSqlRepositoryFactory.java

private void updateConfigurationIntegerProperty(Configuration configuration, Properties properties,
        String propertyName) {// ww  w . ja v  a 2 s .com
    String value = properties != null ? properties.getProperty(propertyName) : System.getProperty(propertyName);
    if (value == null || !value.matches("[1-9]{1}[0-9]*")) {
        return;
    }
    int val = Integer.parseInt(value);
    LOGGER.info("Overriding loaded configuration with value read from system properties: {}={}", propertyName,
            val);
    configuration.setProperty(propertyName, val);
}