Example usage for org.apache.commons.configuration PropertiesConfiguration setProperty

List of usage examples for org.apache.commons.configuration PropertiesConfiguration setProperty

Introduction

In this page you can find the example usage for org.apache.commons.configuration PropertiesConfiguration setProperty.

Prototype

public void setProperty(String key, Object value) 

Source Link

Document

Sets a new value for the specified property.

Usage

From source file:nl.tudelft.graphalytics.configuration.GraphParserTest.java

private static Fixture constructVertexPropertyGraph(String rootDir) {
    final String NAME = "Graph name";
    final long NUM_VERTICES = 123;
    final long NUM_EDGES = 765;
    final boolean IS_DIRECTED = true;
    final String VERTEX_FILE_PATH = "example.graph.v";
    final String EDGE_FILE_PATH = "other.example.graph.edges";
    final String VERTEX_PROPERTY_NAME_1 = "prop-1";
    final String VERTEX_PROPERTY_NAME_2 = "prop-2";
    final String VERTEX_PROPERTY_NAME_3 = "prop-3";
    final PropertyType VERTEX_PROPERTY_TYPE_1 = PropertyType.INTEGER;
    final PropertyType VERTEX_PROPERTY_TYPE_2 = PropertyType.INTEGER;
    final PropertyType VERTEX_PROPERTY_TYPE_3 = PropertyType.REAL;

    Graph graph = new Graph(NAME, NUM_VERTICES, NUM_EDGES, IS_DIRECTED,
            Paths.get(rootDir, VERTEX_FILE_PATH).toString(), Paths.get(rootDir, EDGE_FILE_PATH).toString(),
            new PropertyList(new Property(VERTEX_PROPERTY_NAME_1, VERTEX_PROPERTY_TYPE_1),
                    new Property(VERTEX_PROPERTY_NAME_2, VERTEX_PROPERTY_TYPE_2),
                    new Property(VERTEX_PROPERTY_NAME_3, VERTEX_PROPERTY_TYPE_3)),
            new PropertyList());

    PropertiesConfiguration configuration = new PropertiesConfiguration();
    configuration.setProperty("meta.vertices", NUM_VERTICES);
    configuration.setProperty("meta.edges", NUM_EDGES);
    configuration.setProperty("directed", IS_DIRECTED);
    configuration.setProperty("vertex-file", VERTEX_FILE_PATH);
    configuration.setProperty("edge-file", EDGE_FILE_PATH);
    configuration.setProperty("vertex-properties.names",
            VERTEX_PROPERTY_NAME_1 + "," + VERTEX_PROPERTY_NAME_2 + "," + VERTEX_PROPERTY_NAME_3);
    configuration.setProperty("vertex-properties.types",
            VERTEX_PROPERTY_TYPE_1 + "," + VERTEX_PROPERTY_TYPE_2 + "," + VERTEX_PROPERTY_TYPE_3);

    return new Fixture(NAME, graph, configuration);
}

From source file:nl.tudelft.graphalytics.configuration.GraphParserTest.java

private static Fixture constructEdgePropertyGraph(String rootDir) {
    final String NAME = "Graph name";
    final long NUM_VERTICES = 123;
    final long NUM_EDGES = 765;
    final boolean IS_DIRECTED = true;
    final String VERTEX_FILE_PATH = "example.graph.v";
    final String EDGE_FILE_PATH = "other.example.graph.edges";
    final String EDGE_PROPERTY_NAME_1 = "prop-1";
    final String EDGE_PROPERTY_NAME_2 = "prop-2";
    final String EDGE_PROPERTY_NAME_3 = "prop-3";
    final PropertyType EDGE_PROPERTY_TYPE_1 = PropertyType.INTEGER;
    final PropertyType EDGE_PROPERTY_TYPE_2 = PropertyType.INTEGER;
    final PropertyType EDGE_PROPERTY_TYPE_3 = PropertyType.REAL;

    Graph graph = new Graph(NAME, NUM_VERTICES, NUM_EDGES, IS_DIRECTED,
            Paths.get(rootDir, VERTEX_FILE_PATH).toString(), Paths.get(rootDir, EDGE_FILE_PATH).toString(),
            new PropertyList(),
            new PropertyList(new Property(EDGE_PROPERTY_NAME_1, EDGE_PROPERTY_TYPE_1),
                    new Property(EDGE_PROPERTY_NAME_2, EDGE_PROPERTY_TYPE_2),
                    new Property(EDGE_PROPERTY_NAME_3, EDGE_PROPERTY_TYPE_3)));

    PropertiesConfiguration configuration = new PropertiesConfiguration();
    configuration.setProperty("meta.vertices", NUM_VERTICES);
    configuration.setProperty("meta.edges", NUM_EDGES);
    configuration.setProperty("directed", IS_DIRECTED);
    configuration.setProperty("vertex-file", VERTEX_FILE_PATH);
    configuration.setProperty("edge-file", EDGE_FILE_PATH);
    configuration.setProperty("edge-properties.names",
            EDGE_PROPERTY_NAME_1 + "," + EDGE_PROPERTY_NAME_2 + "," + EDGE_PROPERTY_NAME_3);
    configuration.setProperty("edge-properties.types",
            EDGE_PROPERTY_TYPE_1 + "," + EDGE_PROPERTY_TYPE_2 + "," + EDGE_PROPERTY_TYPE_3);

    return new Fixture(NAME, graph, configuration);
}

From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java

@Test
public void test() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        final String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);

        // Write some data to a table and add some splits
        final SortedSet<Text> splits = new TreeSet<>();
        try (BatchWriter bw = c.createBatchWriter(tableName)) {
            for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
                splits.add(new Text(split));
                Mutation m = new Mutation(new Text(split));
                m.put(new byte[] {}, new byte[] {}, new byte[] {});
                bw.addMutation(m);//from  w  ww . ja  va2s. c  o  m
            }
        }
        c.tableOperations().addSplits(tableName, splits);

        try (BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME)) {
            DIRECTORY_COLUMN.fetch(scanner);
            TableId tableId = TableId.of(c.tableOperations().tableIdMap().get(tableName));
            assertNotNull("TableID for " + tableName + " was null", tableId);
            scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
            // verify the directory entries are all on v1, make a few entries relative
            int count = 0;
            try (BatchWriter bw = c.createBatchWriter(MetadataTable.NAME)) {
                for (Entry<Key, Value> entry : scanner) {
                    assertTrue("Expected " + entry.getValue() + " to contain " + v1,
                            entry.getValue().toString().contains(v1.toString()));
                    count++;
                    if (count % 2 == 0) {
                        String[] parts = entry.getValue().toString().split("/");
                        Key key = entry.getKey();
                        Mutation m = new Mutation(key.getRow());
                        m.put(key.getColumnFamily(), key.getColumnQualifier(),
                                new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
                        bw.addMutation(m);
                    }
                }
            }
            assertEquals(splits.size() + 1, count);

            // This should fail: only one volume
            assertEquals(1,
                    cluster.exec(RandomizeVolumes.class, "-c", cluster.getClientPropsPath(), "-t", tableName)
                            .getProcess().waitFor());

            cluster.stop();

            // add the 2nd volume
            PropertiesConfiguration conf = new PropertiesConfiguration();
            conf.load(cluster.getAccumuloPropertiesPath());
            conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2);
            conf.save(cluster.getAccumuloPropertiesPath());

            // initialize volume
            assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());
            cluster.start();

            // change the directory entries
            assertEquals(0,
                    cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).getProcess().waitFor());

            // verify a more equal sharing
            int v1Count = 0, v2Count = 0;
            for (Entry<Key, Value> entry : scanner) {
                if (entry.getValue().toString().contains(v1.toString())) {
                    v1Count++;
                }
                if (entry.getValue().toString().contains(v2.toString())) {
                    v2Count++;
                }
            }

            log.info("Count for volume1: {}", v1Count);
            log.info("Count for volume2: {}", v2Count);

            assertEquals(splits.size() + 1, v1Count + v2Count);
            // a fair chooser will differ by less than count(volumes)
            assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count
                    + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
            // verify we can read the old data
            count = 0;
            for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
                assertTrue("Found unexpected entry in table: " + entry,
                        splits.contains(entry.getKey().getRow()));
                count++;
            }
            assertEquals(splits.size(), count);
        }
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testAddVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);

        PropertiesConfiguration conf = new PropertiesConfiguration();

        String uuid = verifyAndShutdownCluster(client, conf, tableNames[0]);

        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2 + "," + v3);
        conf.save(cluster.getAccumuloPropertiesPath());

        // initialize volume
        assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());

        checkVolumesInitialized(Arrays.asList(v1, v2, v3), uuid);

        // start cluster and verify that new volume is used
        cluster.start();//from www  . ja va 2  s .  c o  m

        verifyVolumesUsed(client, tableNames[1], false, v1, v2, v3);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testNonConfiguredVolumes() throws Exception {

    String[] tableNames = getUniqueNames(2);
    PropertiesConfiguration conf = new PropertiesConfiguration();

    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String uuid = verifyAndShutdownCluster(client, conf, tableNames[0]);

        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2 + "," + v3);
        conf.save(cluster.getAccumuloPropertiesPath());

        // initialize volume
        assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());

        checkVolumesInitialized(Arrays.asList(v1, v2, v3), uuid);

        // start cluster and verify that new volume is used
        cluster.start();/*from  ww w.  j av a  2 s. c  o  m*/

        // verify we can still read the tables (tableNames[0] is likely to have a file still on v1)
        verifyData(expected, client.createScanner(tableNames[0], Authorizations.EMPTY));

        // v1 should not have any data for tableNames[1]
        verifyVolumesUsed(client, tableNames[1], false, v2, v3);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testRemoveVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);

        verifyVolumesUsed(client, tableNames[0], false, v1, v2);

        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
        cluster.stop();/* w  w  w  . j  ava 2s  .c  o  m*/

        PropertiesConfiguration conf = new PropertiesConfiguration();
        conf.load(cluster.getAccumuloPropertiesPath());
        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
        conf.save(cluster.getAccumuloPropertiesPath());

        // start cluster and verify that volume was decommissioned
        cluster.start();

        client.tableOperations().compact(tableNames[0], null, null, true, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);

        // check that root tablet is not on volume 1
        ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
        String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID())
                + RootTable.ZROOT_TABLET_PATH;
        String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
        assertTrue(rootTabletDir.startsWith(v2.toString()));

        client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());

        client.tableOperations().flush(MetadataTable.NAME, null, null, true);
        client.tableOperations().flush(RootTable.NAME, null, null, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);
        verifyVolumesUsed(client, tableNames[1], true, v2);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths provided by test")
private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);

    verifyVolumesUsed(client, tableNames[0], false, v1, v2);

    // write to 2nd table, but do not flush data to disk before shutdown
    writeData(tableNames[1], cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD)));

    if (cleanShutdown)
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());

    cluster.stop();//from ww w  . java2s  .  com

    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());

    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());

    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.load(cluster.getAccumuloPropertiesPath());
    conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
    conf.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    conf.save(cluster.getAccumuloPropertiesPath());

    // start cluster and verify that volumes were replaced
    cluster.start();

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // verify writes to new dir
    client.tableOperations().compact(tableNames[0], null, null, true, true);
    client.tableOperations().compact(tableNames[1], null, null, true, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // check that root tablet is not on volume 1 or 2
    ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
    String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID()) + RootTable.ZROOT_TABLET_PATH;
    String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
    assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));

    client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());

    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}

From source file:org.apache.atlas.AtlasIT.java

@Test
public void testPortSelection() throws Exception {
    PropertiesConfiguration config = new PropertiesConfiguration();
    // test ports via config
    config.setProperty(Atlas.ATLAS_SERVER_HTTP_PORT, 21001);
    config.setProperty(Atlas.ATLAS_SERVER_HTTPS_PORT, 22443);
    int port = Atlas.getApplicationPort(Atlas.parseArgs(new String[] {}), "false", config);
    Assert.assertEquals(21001, port, "wrong http port");
    port = Atlas.getApplicationPort(Atlas.parseArgs(new String[] {}), "true", config);
    Assert.assertEquals(22443, port, "wrong https port");
    // test defaults
    port = Atlas.getApplicationPort(Atlas.parseArgs(new String[] {}), "false", new PropertiesConfiguration());
    Assert.assertEquals(21000, port, "wrong http port");
    port = Atlas.getApplicationPort(Atlas.parseArgs(new String[] {}), "true", new PropertiesConfiguration());
    Assert.assertEquals(21443, port, "wrong https port");
    // test command line override
    CommandLine commandLine = Atlas.parseArgs(new String[] { "--port", "22000" });
    port = Atlas.getApplicationPort(commandLine, "true", config);
    Assert.assertEquals(22000, port, "wrong https port");
    port = Atlas.getApplicationPort(commandLine, "false", config);
    Assert.assertEquals(22000, port, "wrong https port");
}

From source file:org.apache.atlas.MainIT.java

@Test
public void testPortSelection() throws Exception {
    PropertiesConfiguration config = new PropertiesConfiguration();
    // test ports via config
    config.setProperty(Main.ATLAS_SERVER_HTTP_PORT, 21001);
    config.setProperty(Main.ATLAS_SERVER_HTTPS_PORT, 22443);
    int port = Main.getApplicationPort(Main.parseArgs(new String[] {}), "false", config);
    Assert.assertEquals(21001, port, "wrong http port");
    port = Main.getApplicationPort(Main.parseArgs(new String[] {}), "true", config);
    Assert.assertEquals(22443, port, "wrong https port");
    // test defaults
    port = Main.getApplicationPort(Main.parseArgs(new String[] {}), "false", new PropertiesConfiguration());
    Assert.assertEquals(21000, port, "wrong http port");
    port = Main.getApplicationPort(Main.parseArgs(new String[] {}), "true", new PropertiesConfiguration());
    Assert.assertEquals(21443, port, "wrong https port");
    // test command line override
    CommandLine commandLine = Main.parseArgs(new String[] { "--port", "22000" });
    port = Main.getApplicationPort(commandLine, "true", config);
    Assert.assertEquals(22000, port, "wrong https port");
    port = Main.getApplicationPort(commandLine, "false", config);
    Assert.assertEquals(22000, port, "wrong https port");
}

From source file:org.apache.atlas.web.filters.AtlasAuthenticationKerberosFilterTest.java

protected String generateKerberosTestProperties() throws Exception {
    PropertiesConfiguration props = new PropertiesConfiguration();
    props.setProperty("atlas.http.authentication.enabled", "true");
    props.setProperty("atlas.http.authentication.type", "kerberos");
    props.setProperty("atlas.http.authentication.kerberos.principal", "HTTP/localhost@" + kdc.getRealm());
    props.setProperty("atlas.http.authentication.kerberos.keytab", httpKeytabFile.getAbsolutePath());
    props.setProperty("atlas.http.authentication.kerberos.name.rules",
            "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT");

    return writeConfiguration(props);
}