Example usage for org.apache.commons.configuration Configuration addProperty

List of usage examples for org.apache.commons.configuration Configuration addProperty

Introduction

In this page you can find the example usage for org.apache.commons.configuration Configuration addProperty.

Prototype

void addProperty(String key, Object value);

Source Link

Document

Add a property to the configuration.

Usage

From source file:com.evolveum.midpoint.init.StartupConfiguration.java

@Override
public Configuration getConfiguration(String componentName) {
    if (null == componentName) {
        throw new IllegalArgumentException("NULL argument");
    }/*from   ww  w .  j ava  2 s. c  om*/
    Configuration sub = config.subset(componentName);
    // Insert replacement for relative path to midpoint.home else clean
    // replace
    if (getMidpointHome() != null) {
        sub.addProperty(MIDPOINT_HOME, getMidpointHome());
    } else {
        @SuppressWarnings("unchecked")
        Iterator<String> i = sub.getKeys();
        while (i.hasNext()) {
            String key = i.next();
            sub.setProperty(key, sub.getString(key).replace("${" + MIDPOINT_HOME + "}/", ""));
            sub.setProperty(key, sub.getString(key).replace("${" + MIDPOINT_HOME + "}", ""));
        }
    }

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("Configuration for {} :", componentName);
        @SuppressWarnings("unchecked")
        Iterator<String> i = sub.getKeys();
        while (i.hasNext()) {
            String key = i.next();
            LOGGER.debug("    {} = {}", key, sub.getString(key));
        }
    }
    return sub;
}

From source file:co.turnus.analysis.partitioning.CommunicationCostPartitioningCli.java

private static Configuration parseCommandLine(CommandLine cmd) throws ParseException {
    Configuration config = new BaseConfiguration();

    StringBuffer s = new StringBuffer();

    config.setProperty(VERBOSE, cmd.hasOption("v"));

    if (!cmd.hasOption("t")) {
        s.append("Trace project directory not specified. ");
    } else {//  ww w .  j  ava  2 s.c  om
        String fileName = cmd.getOptionValue("t", "");
        File file = new File(fileName);
        if (file == null || !file.exists()) {
            s.append("Trace project does not exists. ");
        } else {
            config.setProperty(TRACE_PROJECT, fileName);
        }
    }

    if (!cmd.hasOption("o")) {
        s.append("Output directory not specified. ");
    } else {
        String fileName = cmd.getOptionValue("o", "");
        File file = new File(fileName);
        if (file == null || !file.exists()) {
            s.append("Output directory does not exists. ");
        } else {
            config.setProperty(OUTPUT_PATH, fileName);
        }
    }

    if (!cmd.hasOption("p")) {
        s.append("The number of partitions is not specified. ");
    } else {
        String[] sp = cmd.getOptionValues("p");
        if (sp.length == 1) {
            int value = Integer.parseInt(sp[0]);
            config.addProperty(PARTITION_UNITS_MIN, value);
            config.addProperty(PARTITION_UNITS_MAX, value);
            config.addProperty(PARTITION_UNITS_POINTS, 1);
        } else if (sp.length == 2) {
            int min = Integer.parseInt(sp[0]);
            int max = Integer.parseInt(sp[1]);
            config.addProperty(PARTITION_UNITS_MIN, min);
            config.addProperty(PARTITION_UNITS_MAX, max);
            config.addProperty(PARTITION_UNITS_POINTS, max - min + 1);
        } else {
            int min = Integer.parseInt(sp[0]);
            int max = Integer.parseInt(sp[1]);
            int points = Integer.parseInt(sp[2]);
            config.addProperty(PARTITION_UNITS_MIN, min);
            config.addProperty(PARTITION_UNITS_MAX, max);
            config.addProperty(PARTITION_UNITS_POINTS, points);
        }
    }

    if (cmd.hasOption("xls")) {
        String fileName = cmd.getOptionValue("xls", "");
        if (fileName == null || fileName.isEmpty()) {
            s.append("XLS file name is not correct. ");
        } else {
            config.setProperty(XLS, fileName);
        }

    }

    if (cmd.hasOption("xcf")) {
        String fileName = cmd.getOptionValue("xcf", "");
        if (fileName == null || fileName.isEmpty()) {
            s.append("XCF file name is not correct. ");
        } else {
            config.setProperty(XCF, fileName);
        }
    }

    String error = s.toString();
    if (!error.isEmpty()) {
        throw new ParseException(error);
    }

    return config;
}

From source file:com.linkedin.pinot.controller.helix.core.rebalance.DefaultRebalanceStrategyTest.java

@Test
public void testGetRebalancedIdealStateOffline() throws IOException, JSONException {

    String offlineTableName = "letsRebalanceThisTable_OFFLINE";
    TableConfig tableConfig;/*w w  w .  ja  v a  2  s.co  m*/

    // start with an ideal state, i instances, r replicas, n segments, OFFLINE table
    int nReplicas = 2;
    int nSegments = 5;
    int nInstances = 6;
    List<String> instances = getInstanceList(nInstances);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(instances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(instances);

    final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(offlineTableName);
    customModeIdealStateBuilder
            .setStateModel(
                    PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL)
            .setNumPartitions(0).setNumReplica(nReplicas).setMaxPartitionsPerNode(1);
    IdealState idealState = customModeIdealStateBuilder.build();
    idealState.setInstanceGroupTag(offlineTableName);
    setInstanceStateMapForIdealStateOffline(idealState, nSegments, nReplicas, instances, offlineTableName);

    Configuration rebalanceUserConfig = new PropertiesConfiguration();
    rebalanceUserConfig.addProperty(RebalanceUserConfigConstants.DRYRUN, true);
    rebalanceUserConfig.addProperty(RebalanceUserConfigConstants.INCLUDE_CONSUMING, false);

    IdealState rebalancedIdealState;
    int targetNumReplicas = nReplicas;

    // rebalance with no change
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE)
            .setTableName(offlineTableName).setNumReplicas(targetNumReplicas).build();
    rebalancedIdealState = testRebalance(idealState, tableConfig, rebalanceUserConfig, targetNumReplicas,
            nSegments, instances, false);

    // increase i (i > n*r)
    instances = getInstanceList(12);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(instances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(instances);
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, true);

    // rebalance with no change
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, false);

    // remove unused servers
    for (String segment : rebalancedIdealState.getPartitionSet()) {
        for (String server : rebalancedIdealState.getInstanceSet(segment)) {
            instances.remove(server);
        }
    }
    String serverToRemove = instances.get(0);
    instances = getInstanceList(12);
    instances.remove(serverToRemove);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(instances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(instances);
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, false);

    // remove used servers
    instances = getInstanceList(8);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(instances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(instances);
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, true);

    // replace servers
    String removedServer = instances.remove(0);
    instances.add(removedServer + "_replaced_server");
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(instances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(instances);
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, true);

    // reduce targetNumReplicas
    targetNumReplicas = 1;
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE)
            .setTableName(offlineTableName).setNumReplicas(targetNumReplicas).build();
    rebalancedIdealState = testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            targetNumReplicas, nSegments, instances, true);

    // increase targetNumReplicas
    targetNumReplicas = 3;
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE)
            .setTableName(offlineTableName).setNumReplicas(targetNumReplicas).build();
    testRebalance(rebalancedIdealState, tableConfig, rebalanceUserConfig, targetNumReplicas, nSegments,
            instances, true);
}

From source file:eagle.storage.jdbc.conn.impl.TorqueConnectionManagerImpl.java

/**
 * http://db.apache.org/torque/torque-4.0/documentation/orm-reference/initialisation-configuration.html
 * http://commons.apache.org/proper/commons-dbcp/configuration.html
 *
 * @param config//from  w ww .  j  a  v  a2  s  .  co  m
 * @return
 */
private Configuration buildConfiguration(ConnectionConfig config) {
    Configuration configuration = new BaseConfiguration();

    String databaseName = config.getDatabaseName();
    if (databaseName == null) {
        LOG.warn(JdbcConstants.EAGLE_DATABASE + " is null, trying default database name as: eagle");
        databaseName = "eagle";
    }

    LOG.info("Using default database: " + databaseName + " (adapter: " + config.getAdapter() + ")");

    configuration.addProperty("torque.database.default", config.getDatabaseName());

    // This factory uses the SharedDataSource available in the commons-dbcp package
    configuration.addProperty(String.format("torque.dsfactory.%s.factory", databaseName),
            DEFAULT_DATA_SOURCE_FACTORY_CLASS);

    // mysql, oracle, ...
    configuration.addProperty(String.format("torque.database.%s.adapter", databaseName), config.getAdapter());

    // "org.gjt.mm.mysql.Driver"
    configuration.addProperty(String.format("torque.dsfactory.%s.connection.driver", databaseName),
            config.getDriverClassName());

    configuration.addProperty(String.format("torque.dsfactory.%s.connection.url", databaseName),
            config.getConnectionUrl());
    configuration.addProperty(String.format("torque.dsfactory.%s.connection.user", databaseName),
            config.getUserName());
    configuration.addProperty(String.format("torque.dsfactory.%s.connection.password", databaseName),
            config.getPassword());
    configuration.addProperty(String.format("torque.dsfactory.%s.pool.maxActive", databaseName),
            Integer.toString(config.getConnectionMaxActive()));
    //        configuration.addProperty(String.format("torque.dsfactory.%s.pool.minIdle",databaseName),Integer.toString(config.getConnectionMinIdle()));
    //        configuration.addProperty(String.format("torque.dsfactory.%s.pool.initialSize",databaseName),Integer.toString(config.getConnectionInitialSize()));

    return configuration;
}

From source file:com.linkedin.pinot.controller.helix.core.rebalance.DefaultRebalanceStrategyTest.java

@Test
public void testGetRebalancedIdealStateRealtime() throws IOException, JSONException {

    String realtimeTableName = "letsRebalanceThisTable_REALTIME";
    TableConfig tableConfig;/*from w ww  .  ja v  a  2 s  .  c  om*/

    // new ideal state, i instances, r replicas, p partitions (p consuming segments, n*p completed segments), REALTIME table
    int nReplicas = 2;
    int nPartitions = 4;
    int nIterationsCompleted = 2;
    int nConsumingSegments = 4;
    int nCompletedSegments = nPartitions * nIterationsCompleted;
    int nCompletedInstances = 6;
    int nConsumingInstances = 3;
    PartitionAssignment newPartitionAssignment = new PartitionAssignment(realtimeTableName);
    List<String> completedInstances = getInstanceList(nCompletedInstances);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(completedInstances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(completedInstances);

    List<String> consumingInstances = getConsumingInstanceList(nConsumingInstances);
    final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(realtimeTableName);
    customModeIdealStateBuilder
            .setStateModel(
                    PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL)
            .setNumPartitions(0).setNumReplica(nReplicas).setMaxPartitionsPerNode(1);
    IdealState idealState = customModeIdealStateBuilder.build();
    idealState.setInstanceGroupTag(realtimeTableName);
    setInstanceStateMapForIdealStateRealtimeCompleted(idealState, nPartitions, nIterationsCompleted, nReplicas,
            completedInstances, realtimeTableName);
    setInstanceStateMapForIdealStateRealtimeConsuming(idealState, newPartitionAssignment, nConsumingSegments, 2,
            nReplicas, consumingInstances, realtimeTableName);

    Configuration rebalanceUserConfig = new PropertiesConfiguration();
    rebalanceUserConfig.addProperty(RebalanceUserConfigConstants.DRYRUN, true);
    rebalanceUserConfig.addProperty(RebalanceUserConfigConstants.INCLUDE_CONSUMING, true);

    IdealState rebalancedIdealState;
    int targetNumReplicas = nReplicas;
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.REALTIME)
            .setTableName(realtimeTableName).setLLC(true).setNumReplicas(targetNumReplicas).build();

    // no change
    rebalancedIdealState = testRebalanceRealtime(idealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // reduce replicas
    targetNumReplicas = 1;
    for (Map.Entry<String, List<String>> entry : newPartitionAssignment.getPartitionToInstances().entrySet()) {
        entry.getValue().remove(1);
    }
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.REALTIME)
            .setTableName(realtimeTableName).setLLC(true).setNumReplicas(targetNumReplicas).build();
    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // increase replicas
    targetNumReplicas = 2;
    setPartitionAssignment(newPartitionAssignment, targetNumReplicas, consumingInstances);
    tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.REALTIME)
            .setTableName(realtimeTableName).setLLC(true).setNumReplicas(targetNumReplicas).build();
    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // remove completed server
    nCompletedInstances = 4;
    completedInstances = getInstanceList(nCompletedInstances);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(completedInstances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(completedInstances);
    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // add completed server
    nCompletedInstances = 6;
    completedInstances = getInstanceList(nCompletedInstances);
    when(mockHelixAdmin.getInstancesInClusterWithTag(anyString(), anyString())).thenReturn(completedInstances);
    when(mockHelixAdmin.getInstancesInCluster(anyString())).thenReturn(completedInstances);
    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // remove consuming server
    nConsumingInstances = 2;
    consumingInstances = getConsumingInstanceList(nConsumingInstances);
    setPartitionAssignment(newPartitionAssignment, targetNumReplicas, consumingInstances);

    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // add consuming server
    nConsumingInstances = 3;
    consumingInstances = getConsumingInstanceList(nConsumingInstances);
    setPartitionAssignment(newPartitionAssignment, targetNumReplicas, consumingInstances);

    rebalancedIdealState = testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig,
            newPartitionAssignment, targetNumReplicas, nCompletedSegments, nConsumingSegments,
            completedInstances, consumingInstances);

    // change partition assignment, but keep rebalanceConsuming false
    nConsumingInstances = 2;
    consumingInstances = getConsumingInstanceList(nConsumingInstances);
    setPartitionAssignment(newPartitionAssignment, targetNumReplicas, consumingInstances);
    rebalanceUserConfig.addProperty(RebalanceUserConfigConstants.INCLUDE_CONSUMING, false);
    testRebalanceRealtime(rebalancedIdealState, tableConfig, rebalanceUserConfig, newPartitionAssignment,
            targetNumReplicas, nCompletedSegments, nConsumingSegments, completedInstances, consumingInstances);
}

From source file:fr.mby.utils.spring.beans.factory.BasicProxywiredManager.java

/**
 * Update the wiring configuration.//from   w  w  w.  j  a  va2  s  . c  o  m
 * 
 * @param dependencies
 * @param dependencyToModify
 */
protected void updateWiringConfiguration(final LinkedHashMap<String, Object> dependencies,
        final IManageableProxywired dependencyToModify) {
    if (this.configuration != null) {
        final IProxywiredIdentifier identifier = dependencyToModify.getIdentifier();
        // Internal method => Identifier cannot be null here !
        Assert.notNull(identifier, "Cannot found valid identifier for this dependency !");

        final String beanNames = StringUtils.collectionToDelimitedString(dependencies.keySet(),
                IProxywiredManager.WIRING_PREFS_SEPARATOR);

        final Configuration elementConfig = identifier.getConfigurationSubset(this.configuration);
        elementConfig.addProperty(IProxywiredManager.WIRED_BEANS_CONFIG_KEY, beanNames);
    }
}

From source file:com.linkedin.pinot.routing.RoutingTableTest.java

@Test
public void testCombinedKafkaRouting() throws Exception {
    HelixExternalViewBasedRouting routingTable = new HelixExternalViewBasedRouting(null, NO_LLC_ROUTING, null,
            new BaseConfiguration());

    final long now = System.currentTimeMillis();
    final String tableName = "table";
    final String resourceName = tableName + "_REALTIME";
    final String group1 = resourceName + "_" + Long.toString(now) + "_0";
    final String group2 = resourceName + "_" + Long.toString(now) + "_1";
    final String online = "ONLINE";
    final String consuming = "CONSUMING";
    final int partitionId = 1;
    final String partitionRange = "JUNK";
    final int segId1 = 1;
    final int segId2 = 2;
    final int port1 = 1;
    final int port2 = 2;
    final String host = "host";
    final ServerInstance serverInstance1 = new ServerInstance(host, port1);
    final ServerInstance serverInstance2 = new ServerInstance(host, port2);
    final String helixInstance1 = CommonConstants.Helix.PREFIX_OF_SERVER_INSTANCE + serverInstance1;
    final String helixInstance2 = CommonConstants.Helix.PREFIX_OF_SERVER_INSTANCE + serverInstance2;
    final HLCSegmentName s1HlcSegment1 = new HLCSegmentName(group1, partitionRange, Integer.toString(segId1));
    final HLCSegmentName s1HlcSegment2 = new HLCSegmentName(group1, partitionRange, Integer.toString(segId2));
    final HLCSegmentName s2HlcSegment1 = new HLCSegmentName(group2, partitionRange, Integer.toString(segId1));
    final HLCSegmentName s2HlcSegment2 = new HLCSegmentName(group2, partitionRange, Integer.toString(segId2));
    final LLCSegmentName llcSegment1 = new LLCSegmentName(tableName, partitionId, segId1, now);
    final LLCSegmentName llcSegment2 = new LLCSegmentName(tableName, partitionId, segId2, now);

    final List<InstanceConfig> instanceConfigs = new ArrayList<>(2);
    instanceConfigs.add(new InstanceConfig(helixInstance1));
    instanceConfigs.add(new InstanceConfig(helixInstance2));
    ExternalView ev = new ExternalView(resourceName);
    ev.setState(s1HlcSegment1.getSegmentName(), helixInstance1, online);
    ev.setState(s1HlcSegment2.getSegmentName(), helixInstance1, online);
    ev.setState(llcSegment1.getSegmentName(), helixInstance2, online);
    ev.setState(llcSegment2.getSegmentName(), helixInstance2, consuming);
    routingTable.markDataResourceOnline(resourceName, ev, instanceConfigs);

    RoutingTableLookupRequest request = new RoutingTableLookupRequest(resourceName,
            Collections.<String>emptyList());
    for (int i = 0; i < 100; i++) {
        Map<ServerInstance, SegmentIdSet> routingMap = routingTable.findServers(request);
        Assert.assertEquals(routingMap.size(), 1);
        List<String> segments = routingMap.get(serverInstance1).getSegmentsNameList();
        Assert.assertEquals(segments.size(), 2);
        Assert.assertTrue(segments.contains(s1HlcSegment1.getSegmentName()));
        Assert.assertTrue(segments.contains(s1HlcSegment2.getSegmentName()));
    }/*from  w w  w.  ja va2  s. co m*/

    // Now change the percent value in the routing table selector to be 100, and we should get only LLC segments.
    Configuration configuration = new PropertiesConfiguration();
    configuration.addProperty("class", PercentageBasedRoutingTableSelector.class.getName());
    configuration.addProperty("table." + resourceName, new Integer(100));
    RoutingTableSelector selector = RoutingTableSelectorFactory.getRoutingTableSelector(configuration, null);
    selector.init(configuration, null);
    Field selectorField = HelixExternalViewBasedRouting.class.getDeclaredField("_routingTableSelector");
    selectorField.setAccessible(true);
    selectorField.set(routingTable, selector);

    // And we should find only LLC segments.
    for (int i = 0; i < 100; i++) {
        Map<ServerInstance, SegmentIdSet> routingMap = routingTable.findServers(request);
        Assert.assertEquals(routingMap.size(), 1);
        List<String> segments = routingMap.get(serverInstance2).getSegmentsNameList();
        Assert.assertEquals(segments.size(), 2);
        Assert.assertTrue(segments.contains(llcSegment1.getSegmentName()));
        Assert.assertTrue(segments.contains(llcSegment2.getSegmentName()));
    }

    // Now change it to 50, and we should find both (at least 10 times each).
    configuration = new PropertiesConfiguration();
    configuration.addProperty("table." + resourceName, new Integer(50));
    selector = new PercentageBasedRoutingTableSelector();
    selector.init(configuration, null);
    selectorField.set(routingTable, selector);

    int hlc = 0;
    int llc = 0;
    for (int i = 0; i < 100; i++) {
        Map<ServerInstance, SegmentIdSet> routingMap = routingTable.findServers(request);
        Assert.assertEquals(routingMap.size(), 1);
        if (routingMap.containsKey(serverInstance2)) {
            List<String> segments = routingMap.get(serverInstance2).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(llcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(llcSegment2.getSegmentName()));
            llc++;
        } else {
            List<String> segments = routingMap.get(serverInstance1).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(s1HlcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(s1HlcSegment2.getSegmentName()));
            hlc++;
        }
    }

    // If we do the above iteration 100 times, we should get at least 10 of each type of routing.
    // If this test fails
    Assert.assertTrue(hlc >= 10, "Got low values hlc=" + hlc + ",llc=" + llc);
    Assert.assertTrue(llc >= 10, "Got low values hlc=" + hlc + ",llc=" + llc);

    // Check that force HLC works
    request = new RoutingTableLookupRequest(resourceName, Collections.singletonList("FORCE_HLC"));
    hlc = 0;
    llc = 0;
    for (int i = 0; i < 100; i++) {
        Map<ServerInstance, SegmentIdSet> routingMap = routingTable.findServers(request);
        Assert.assertEquals(routingMap.size(), 1);
        if (routingMap.containsKey(serverInstance2)) {
            List<String> segments = routingMap.get(serverInstance2).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(llcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(llcSegment2.getSegmentName()));
            llc++;
        } else {
            List<String> segments = routingMap.get(serverInstance1).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(s1HlcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(s1HlcSegment2.getSegmentName()));
            hlc++;
        }
    }

    Assert.assertEquals(hlc, 100);
    Assert.assertEquals(llc, 0);

    // Check that force LLC works
    request = new RoutingTableLookupRequest(resourceName, Collections.singletonList("FORCE_LLC"));
    hlc = 0;
    llc = 0;
    for (int i = 0; i < 100; i++) {
        Map<ServerInstance, SegmentIdSet> routingMap = routingTable.findServers(request);
        Assert.assertEquals(routingMap.size(), 1);
        if (routingMap.containsKey(serverInstance2)) {
            List<String> segments = routingMap.get(serverInstance2).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(llcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(llcSegment2.getSegmentName()));
            llc++;
        } else {
            List<String> segments = routingMap.get(serverInstance1).getSegmentsNameList();
            Assert.assertEquals(segments.size(), 2);
            Assert.assertTrue(segments.contains(s1HlcSegment1.getSegmentName()));
            Assert.assertTrue(segments.contains(s1HlcSegment2.getSegmentName()));
            hlc++;
        }
    }

    Assert.assertEquals(hlc, 0);
    Assert.assertEquals(llc, 100);
}

From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java

@Test
public void testBasicCRUD() throws IOException {
    Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test");

    final String key = UUID.randomUUID().toString();
    final String v1 = UUID.randomUUID().toString(), v2 = UUID.randomUUID().toString(),
            v3 = UUID.randomUUID().toString(), v4 = UUID.randomUUID().toString(),
            v5 = UUID.randomUUID().toString();

    assertNull(config.getString(key));//w ww.  j ava 2s .c om
    assertEquals(v5, config.getString(key, v5));

    config.setProperty(key, v1);
    assertEquals(v1, config.getString(key, v5));

    config.addProperty(key, v2);
    assertEquals(v1, config.getString(key, v5));
    assertThat(config.getList(key), CoreMatchers.<Object>hasItems(v1, v2));

    config.addProperty(key, v3);
    config.addProperty(key, v4);
    assertEquals(v1, config.getString(key, v5));
    assertThat(config.getList(key), CoreMatchers.<Object>hasItems(v1, v2, v3, v4));

    config.clearProperty(key);
    assertNull(config.getString(key));
    assertEquals(v5, config.getString(key, v5));

    config.addProperty(key, v5);
    assertEquals(v5, config.getString(key));

    config.clearProperty(key);
    config.setProperty(key, Arrays.asList(v3, v2, v4, v1, v5));
    assertEquals(v3, config.getString(key));
    assertThat(config.getList(key), CoreMatchers.<Object>hasItems(v3, v2, v4, v1, v5));

}

From source file:com.yahoo.pulsar.zookeeper.ZkIsolatedBookieEnsemblePlacementPolicy.java

private ZooKeeperCache getAndSetZkCache(Configuration conf) {
    ZooKeeperCache zkCache = null;//from w  w w .j a v a2  s.  co m
    if (conf.getProperty(ZooKeeperCache.ZK_CACHE_INSTANCE) != null) {
        zkCache = (ZooKeeperCache) conf.getProperty(ZooKeeperCache.ZK_CACHE_INSTANCE);
    } else {
        int zkTimeout;
        String zkServers;
        if (conf instanceof ClientConfiguration) {
            zkTimeout = ((ClientConfiguration) conf).getZkTimeout();
            zkServers = ((ClientConfiguration) conf).getZkServers();
            ZooKeeperWatcherBase w = new ZooKeeperWatcherBase(zkTimeout) {
            };
            try {
                ZooKeeper zkClient = ZkUtils.createConnectedZookeeperClient(zkServers, w);
                zkCache = new ZooKeeperCache(zkClient) {
                };
                conf.addProperty(ZooKeeperCache.ZK_CACHE_INSTANCE, zkCache);
            } catch (Exception e) {
                LOG.error("Error creating zookeeper client", e);
            }
        } else {
            LOG.error("No zk configurations available");
        }
    }
    return zkCache;
}

From source file:com.linkedin.pinot.server.starter.helix.HelixServerStarter.java

public HelixServerStarter(String helixClusterName, String zkServer, Configuration pinotHelixProperties)
        throws Exception {
    LOGGER.info("Starting Pinot server");
    _helixClusterName = helixClusterName;
    _pinotHelixProperties = pinotHelixProperties;
    String hostname = pinotHelixProperties.getString(CommonConstants.Helix.KEY_OF_SERVER_NETTY_HOST,
            NetUtil.getHostAddress());/*from w w  w.  j  a va 2 s  .c o  m*/
    _instanceId = pinotHelixProperties.getString("instanceId",
            CommonConstants.Helix.PREFIX_OF_SERVER_INSTANCE + hostname + "_"
                    + pinotHelixProperties.getInt(CommonConstants.Helix.KEY_OF_SERVER_NETTY_PORT,
                            CommonConstants.Helix.DEFAULT_SERVER_NETTY_PORT));

    pinotHelixProperties.addProperty("pinot.server.instance.id", _instanceId);
    startServerInstance(pinotHelixProperties);

    LOGGER.info("Connecting Helix components");
    // Replace all white-spaces from list of zkServers.
    String zkServers = zkServer.replaceAll("\\s+", "");
    _helixManager = HelixManagerFactory.getZKHelixManager(helixClusterName, _instanceId,
            InstanceType.PARTICIPANT, zkServers);
    final StateMachineEngine stateMachineEngine = _helixManager.getStateMachineEngine();
    _helixManager.connect();
    ZkHelixPropertyStore<ZNRecord> zkPropertyStore = ZkUtils.getZkPropertyStore(_helixManager,
            helixClusterName);

    SegmentFetcherAndLoader fetcherAndLoader = new SegmentFetcherAndLoader(
            _serverInstance.getInstanceDataManager(), new ColumnarSegmentMetadataLoader(), zkPropertyStore,
            pinotHelixProperties, _instanceId);

    // Register state model factory
    final StateModelFactory<?> stateModelFactory = new SegmentOnlineOfflineStateModelFactory(helixClusterName,
            _instanceId, _serverInstance.getInstanceDataManager(), zkPropertyStore, fetcherAndLoader);
    stateMachineEngine.registerStateModelFactory(SegmentOnlineOfflineStateModelFactory.getStateModelName(),
            stateModelFactory);
    _helixAdmin = _helixManager.getClusterManagmentTool();
    addInstanceTagIfNeeded(helixClusterName, _instanceId);
    // Start restlet server for admin API endpoint
    int adminApiPort = pinotHelixProperties.getInt(CommonConstants.Server.CONFIG_OF_ADMIN_API_PORT,
            Integer.parseInt(CommonConstants.Server.DEFAULT_ADMIN_API_PORT));
    adminApiService = new AdminApiService(_serverInstance);
    adminApiService.start(adminApiPort);
    updateInstanceConfigInHelix(adminApiPort, false/*shutDownStatus*/);

    // Register message handler factory
    SegmentMessageHandlerFactory messageHandlerFactory = new SegmentMessageHandlerFactory(fetcherAndLoader);
    _helixManager.getMessagingService().registerMessageHandlerFactory(
            Message.MessageType.USER_DEFINE_MSG.toString(), messageHandlerFactory);

    _serverInstance.getServerMetrics().addCallbackGauge("helix.connected", new Callable<Long>() {
        @Override
        public Long call() throws Exception {
            return _helixManager.isConnected() ? 1L : 0L;
        }
    });

    _helixManager.addPreConnectCallback(new PreConnectCallback() {
        @Override
        public void onPreConnect() {
            _serverInstance.getServerMetrics().addMeteredGlobalValue(ServerMeter.HELIX_ZOOKEEPER_RECONNECTS,
                    1L);
        }
    });

    ControllerLeaderLocator.create(_helixManager);

    LOGGER.info("Pinot server ready");

    // Create metrics for mmap stuff
    _serverInstance.getServerMetrics().addCallbackGauge("memory.directByteBufferUsage", new Callable<Long>() {
        @Override
        public Long call() throws Exception {
            return MmapUtils.getDirectByteBufferUsage();
        }
    });

    _serverInstance.getServerMetrics().addCallbackGauge("memory.mmapBufferUsage", new Callable<Long>() {
        @Override
        public Long call() throws Exception {
            return MmapUtils.getMmapBufferUsage();
        }
    });

    _serverInstance.getServerMetrics().addCallbackGauge("memory.mmapBufferCount", new Callable<Long>() {
        @Override
        public Long call() throws Exception {
            return MmapUtils.getMmapBufferCount();
        }
    });

}