Example usage for org.apache.commons.configuration PropertiesConfiguration save

List of usage examples for org.apache.commons.configuration PropertiesConfiguration save

Introduction

In this page you can find the example usage for org.apache.commons.configuration PropertiesConfiguration save.

Prototype

public void save(Writer writer) throws ConfigurationException 

Source Link

Document

Save the configuration to the specified stream.

Usage

From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java

@Test
public void test() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        final String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);

        // Write some data to a table and add some splits
        final SortedSet<Text> splits = new TreeSet<>();
        try (BatchWriter bw = c.createBatchWriter(tableName)) {
            for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
                splits.add(new Text(split));
                Mutation m = new Mutation(new Text(split));
                m.put(new byte[] {}, new byte[] {}, new byte[] {});
                bw.addMutation(m);//from ww w  . ja  v a  2s .  co  m
            }
        }
        c.tableOperations().addSplits(tableName, splits);

        try (BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME)) {
            DIRECTORY_COLUMN.fetch(scanner);
            TableId tableId = TableId.of(c.tableOperations().tableIdMap().get(tableName));
            assertNotNull("TableID for " + tableName + " was null", tableId);
            scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
            // verify the directory entries are all on v1, make a few entries relative
            int count = 0;
            try (BatchWriter bw = c.createBatchWriter(MetadataTable.NAME)) {
                for (Entry<Key, Value> entry : scanner) {
                    assertTrue("Expected " + entry.getValue() + " to contain " + v1,
                            entry.getValue().toString().contains(v1.toString()));
                    count++;
                    if (count % 2 == 0) {
                        String[] parts = entry.getValue().toString().split("/");
                        Key key = entry.getKey();
                        Mutation m = new Mutation(key.getRow());
                        m.put(key.getColumnFamily(), key.getColumnQualifier(),
                                new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
                        bw.addMutation(m);
                    }
                }
            }
            assertEquals(splits.size() + 1, count);

            // This should fail: only one volume
            assertEquals(1,
                    cluster.exec(RandomizeVolumes.class, "-c", cluster.getClientPropsPath(), "-t", tableName)
                            .getProcess().waitFor());

            cluster.stop();

            // add the 2nd volume
            PropertiesConfiguration conf = new PropertiesConfiguration();
            conf.load(cluster.getAccumuloPropertiesPath());
            conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2);
            conf.save(cluster.getAccumuloPropertiesPath());

            // initialize volume
            assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());
            cluster.start();

            // change the directory entries
            assertEquals(0,
                    cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).getProcess().waitFor());

            // verify a more equal sharing
            int v1Count = 0, v2Count = 0;
            for (Entry<Key, Value> entry : scanner) {
                if (entry.getValue().toString().contains(v1.toString())) {
                    v1Count++;
                }
                if (entry.getValue().toString().contains(v2.toString())) {
                    v2Count++;
                }
            }

            log.info("Count for volume1: {}", v1Count);
            log.info("Count for volume2: {}", v2Count);

            assertEquals(splits.size() + 1, v1Count + v2Count);
            // a fair chooser will differ by less than count(volumes)
            assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count
                    + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
            // verify we can read the old data
            count = 0;
            for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
                assertTrue("Found unexpected entry in table: " + entry,
                        splits.contains(entry.getKey().getRow()));
                count++;
            }
            assertEquals(splits.size(), count);
        }
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testAddVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);

        PropertiesConfiguration conf = new PropertiesConfiguration();

        String uuid = verifyAndShutdownCluster(client, conf, tableNames[0]);

        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2 + "," + v3);
        conf.save(cluster.getAccumuloPropertiesPath());

        // initialize volume
        assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());

        checkVolumesInitialized(Arrays.asList(v1, v2, v3), uuid);

        // start cluster and verify that new volume is used
        cluster.start();//from  ww w  . j  a  v  a  2s  .co  m

        verifyVolumesUsed(client, tableNames[1], false, v1, v2, v3);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testNonConfiguredVolumes() throws Exception {

    String[] tableNames = getUniqueNames(2);
    PropertiesConfiguration conf = new PropertiesConfiguration();

    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String uuid = verifyAndShutdownCluster(client, conf, tableNames[0]);

        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2 + "," + v3);
        conf.save(cluster.getAccumuloPropertiesPath());

        // initialize volume
        assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());

        checkVolumesInitialized(Arrays.asList(v1, v2, v3), uuid);

        // start cluster and verify that new volume is used
        cluster.start();/*from   www. j  av a  2 s .c  om*/

        // verify we can still read the tables (tableNames[0] is likely to have a file still on v1)
        verifyData(expected, client.createScanner(tableNames[0], Authorizations.EMPTY));

        // v1 should not have any data for tableNames[1]
        verifyVolumesUsed(client, tableNames[1], false, v2, v3);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testRemoveVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);

        verifyVolumesUsed(client, tableNames[0], false, v1, v2);

        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
        cluster.stop();/*from w w w.j a v  a2 s. co m*/

        PropertiesConfiguration conf = new PropertiesConfiguration();
        conf.load(cluster.getAccumuloPropertiesPath());
        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
        conf.save(cluster.getAccumuloPropertiesPath());

        // start cluster and verify that volume was decommissioned
        cluster.start();

        client.tableOperations().compact(tableNames[0], null, null, true, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);

        // check that root tablet is not on volume 1
        ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
        String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID())
                + RootTable.ZROOT_TABLET_PATH;
        String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
        assertTrue(rootTabletDir.startsWith(v2.toString()));

        client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());

        client.tableOperations().flush(MetadataTable.NAME, null, null, true);
        client.tableOperations().flush(RootTable.NAME, null, null, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);
        verifyVolumesUsed(client, tableNames[1], true, v2);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths provided by test")
private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);

    verifyVolumesUsed(client, tableNames[0], false, v1, v2);

    // write to 2nd table, but do not flush data to disk before shutdown
    writeData(tableNames[1], cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD)));

    if (cleanShutdown)
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());

    cluster.stop();//  www.ja  va2s  .c o  m

    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());

    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());

    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.load(cluster.getAccumuloPropertiesPath());
    conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
    conf.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    conf.save(cluster.getAccumuloPropertiesPath());

    // start cluster and verify that volumes were replaced
    cluster.start();

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // verify writes to new dir
    client.tableOperations().compact(tableNames[0], null, null, true, true);
    client.tableOperations().compact(tableNames[1], null, null, true, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // check that root tablet is not on volume 1 or 2
    ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
    String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID()) + RootTable.ZROOT_TABLET_PATH;
    String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
    assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));

    client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());

    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}

From source file:org.apache.atlas.web.security.BaseSecurityTest.java

protected void generateTestProperties(Properties props) throws ConfigurationException, IOException {
    PropertiesConfiguration config = new PropertiesConfiguration(
            System.getProperty("user.dir") + "/../src/conf/" + ApplicationProperties.APPLICATION_PROPERTIES);
    for (String propName : props.stringPropertyNames()) {
        config.setProperty(propName, props.getProperty(propName));
    }/* www.ja  va2 s .com*/
    File file = new File(System.getProperty("user.dir"), ApplicationProperties.APPLICATION_PROPERTIES);
    file.deleteOnExit();
    Writer fileWriter = new FileWriter(file);
    config.save(fileWriter);
}

From source file:org.apache.atlas.web.TestUtils.java

public static void writeConfiguration(PropertiesConfiguration configuration, String fileName) throws Exception {
    LOG.debug("Storing configuration in file {}", fileName);
    File file = new File(fileName);
    File parentFile = file.getParentFile();
    if (!parentFile.exists() && !parentFile.mkdirs()) {
        throw new Exception("Failed to create dir " + parentFile.getAbsolutePath());
    }//from w  ww.ja v a2 s.  c o m
    file.createNewFile();
    configuration.save(new FileWriter(file));
}

From source file:org.apache.fluo.api.config.SimpleConfiguration.java

public void save(File file) {
    PropertiesConfiguration pconf = new PropertiesConfiguration();
    pconf.append(internalConfig);//from w w w  .j  a  va  2 s.co m
    try {
        pconf.save(file);
    } catch (ConfigurationException e) {
        throw new FluoException(e);
    }
}

From source file:org.apache.fluo.api.config.SimpleConfiguration.java

public void save(OutputStream out) {
    PropertiesConfiguration pconf = new PropertiesConfiguration();
    pconf.append(internalConfig);/* w  w  w.ja v a  2s  .c  o m*/
    try {
        pconf.save(out);
    } catch (ConfigurationException e) {
        throw new FluoException(e);
    }
}

From source file:org.apache.kylin.rest.service.AdminService.java

/**
 * Get Java Env info as string/*ww w .  j a  v a2s.  co  m*/
 */
@PreAuthorize(Constant.ACCESS_HAS_ROLE_ADMIN)
public String getEnv() throws ConfigurationException {
    PropertiesConfiguration tempConfig = new PropertiesConfiguration();
    OrderedProperties orderedProperties = new OrderedProperties(new TreeMap<String, String>());
    // Add Java Env

    String content = "";
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    // env
    Map<String, String> env = System.getenv();

    for (Map.Entry<String, String> entry : env.entrySet()) {
        orderedProperties.setProperty(entry.getKey(), entry.getValue());
    }

    // properties
    Properties properties = System.getProperties();

    for (Map.Entry<Object, Object> entry : properties.entrySet()) {
        orderedProperties.setProperty((String) entry.getKey(), (String) entry.getValue());
    }

    for (Map.Entry<String, String> entry : orderedProperties.entrySet()) {
        tempConfig.addProperty(entry.getKey(), entry.getValue());
    }

    // do save
    tempConfig.save(baos);
    content = baos.toString();
    return content;
}