Example usage for org.apache.commons.configuration PropertiesConfiguration load

List of usage examples for org.apache.commons.configuration PropertiesConfiguration load

Introduction

In this page you can find the example usage for org.apache.commons.configuration PropertiesConfiguration load.

Prototype

public synchronized void load(Reader in) throws ConfigurationException 

Source Link

Document

Load the properties from the given reader.

Usage

From source file:org.apache.accumulo.core.client.ClientConfiguration.java

public static ClientConfiguration deserialize(String serializedConfig) {
    PropertiesConfiguration propConfig = new PropertiesConfiguration();
    propConfig.setListDelimiter('\0');
    try {//from  w w w  .j  a  va2s.co m
        propConfig.load(new StringReader(serializedConfig));
    } catch (ConfigurationException e) {
        throw new IllegalArgumentException("Error deserializing client configuration: " + serializedConfig, e);
    }
    return new ClientConfiguration(propConfig);
}

From source file:org.apache.accumulo.core.conf.SiteConfiguration.java

@SuppressFBWarnings(value = "URLCONNECTION_SSRF_FD", justification = "location of props is specified by an admin")
private static ImmutableMap<String, String> createMap(URL accumuloPropsLocation,
        Map<String, String> overrides) {
    CompositeConfiguration config = new CompositeConfiguration();
    config.setThrowExceptionOnMissing(false);
    config.setDelimiterParsingDisabled(true);
    PropertiesConfiguration propsConfig = new PropertiesConfiguration();
    propsConfig.setDelimiterParsingDisabled(true);
    if (accumuloPropsLocation != null) {
        try {//from  w  ww.j  av  a 2 s .  c  o  m
            propsConfig.load(accumuloPropsLocation.openStream());
        } catch (IOException | ConfigurationException e) {
            throw new IllegalArgumentException(e);
        }
    }
    config.addConfiguration(propsConfig);

    // Add all properties in config file
    Map<String, String> result = new HashMap<>();
    config.getKeys().forEachRemaining(key -> result.put(key, config.getString(key)));

    // Add all overrides
    overrides.forEach(result::put);

    // Add sensitive properties from credential provider (if set)
    String credProvider = result.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
    if (credProvider != null) {
        org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
        hadoopConf.set(CredentialProviderFactoryShim.CREDENTIAL_PROVIDER_PATH, credProvider);
        for (Property property : Property.values()) {
            if (property.isSensitive()) {
                char[] value = CredentialProviderFactoryShim.getValueFromCredentialProvider(hadoopConf,
                        property.getKey());
                if (value != null) {
                    result.put(property.getKey(), new String(value));
                }
            }
        }
    }
    return ImmutableMap.copyOf(result);
}

From source file:org.apache.accumulo.minicluster.MiniAccumuloInstance.java

public static PropertiesConfiguration getConfigProperties(File directory) {
    try {//from  w ww. j  av  a  2s . com
        PropertiesConfiguration conf = new PropertiesConfiguration();
        conf.setListDelimiter('\0');
        conf.load(new File(new File(directory, "conf"), "client.conf"));
        return conf;
    } catch (ConfigurationException e) {
        // this should never happen since we wrote the config file ourselves
        throw new IllegalArgumentException(e);
    }
}

From source file:org.apache.accumulo.start.classloader.AccumuloClassLoader.java

/**
 * Returns value of property in accumulo.properties file, otherwise default value
 *
 * @param propertyName/*  w ww .ja  v a  2s  . c  o m*/
 *          Name of the property to pull
 * @param defaultValue
 *          Value to default to if not found.
 * @return value of property or default
 */
public static String getAccumuloProperty(String propertyName, String defaultValue) {
    if (accumuloConfigUrl == null) {
        log.warn("Using default value '{}' for '{}' as there is no Accumulo configuration on classpath",
                defaultValue, propertyName);
        return defaultValue;
    }
    try {
        PropertiesConfiguration config = new PropertiesConfiguration();
        config.setDelimiterParsingDisabled(true);
        config.setThrowExceptionOnMissing(false);
        config.load(accumuloConfigUrl);
        String value = config.getString(propertyName);
        if (value != null)
            return value;
        return defaultValue;
    } catch (Exception e) {
        throw new IllegalStateException(
                "Failed to look up property " + propertyName + " in " + accumuloConfigUrl.getFile(), e);
    }
}

From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java

@Test
public void test() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        final String tableName = getUniqueNames(1)[0];
        c.tableOperations().create(tableName);

        // Write some data to a table and add some splits
        final SortedSet<Text> splits = new TreeSet<>();
        try (BatchWriter bw = c.createBatchWriter(tableName)) {
            for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
                splits.add(new Text(split));
                Mutation m = new Mutation(new Text(split));
                m.put(new byte[] {}, new byte[] {}, new byte[] {});
                bw.addMutation(m);/*from  w  w w . ja  v  a2  s .c  om*/
            }
        }
        c.tableOperations().addSplits(tableName, splits);

        try (BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME)) {
            DIRECTORY_COLUMN.fetch(scanner);
            TableId tableId = TableId.of(c.tableOperations().tableIdMap().get(tableName));
            assertNotNull("TableID for " + tableName + " was null", tableId);
            scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
            // verify the directory entries are all on v1, make a few entries relative
            int count = 0;
            try (BatchWriter bw = c.createBatchWriter(MetadataTable.NAME)) {
                for (Entry<Key, Value> entry : scanner) {
                    assertTrue("Expected " + entry.getValue() + " to contain " + v1,
                            entry.getValue().toString().contains(v1.toString()));
                    count++;
                    if (count % 2 == 0) {
                        String[] parts = entry.getValue().toString().split("/");
                        Key key = entry.getKey();
                        Mutation m = new Mutation(key.getRow());
                        m.put(key.getColumnFamily(), key.getColumnQualifier(),
                                new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
                        bw.addMutation(m);
                    }
                }
            }
            assertEquals(splits.size() + 1, count);

            // This should fail: only one volume
            assertEquals(1,
                    cluster.exec(RandomizeVolumes.class, "-c", cluster.getClientPropsPath(), "-t", tableName)
                            .getProcess().waitFor());

            cluster.stop();

            // add the 2nd volume
            PropertiesConfiguration conf = new PropertiesConfiguration();
            conf.load(cluster.getAccumuloPropertiesPath());
            conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2);
            conf.save(cluster.getAccumuloPropertiesPath());

            // initialize volume
            assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").getProcess().waitFor());
            cluster.start();

            // change the directory entries
            assertEquals(0,
                    cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).getProcess().waitFor());

            // verify a more equal sharing
            int v1Count = 0, v2Count = 0;
            for (Entry<Key, Value> entry : scanner) {
                if (entry.getValue().toString().contains(v1.toString())) {
                    v1Count++;
                }
                if (entry.getValue().toString().contains(v2.toString())) {
                    v2Count++;
                }
            }

            log.info("Count for volume1: {}", v1Count);
            log.info("Count for volume2: {}", v2Count);

            assertEquals(splits.size() + 1, v1Count + v2Count);
            // a fair chooser will differ by less than count(volumes)
            assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count
                    + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
            // verify we can read the old data
            count = 0;
            for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
                assertTrue("Found unexpected entry in table: " + entry,
                        splits.contains(entry.getKey().getRow()));
                count++;
            }
            assertEquals(splits.size(), count);
        }
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

private String verifyAndShutdownCluster(AccumuloClient c, PropertiesConfiguration conf, String tableName)
        throws Exception {
    String uuid = c.instanceOperations().getInstanceID();

    verifyVolumesUsed(c, tableName, false, v1, v2);

    assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
    cluster.stop();/*  www .j ava2 s.  c o m*/

    conf.load(cluster.getAccumuloPropertiesPath());
    return uuid;
}

From source file:org.apache.accumulo.test.VolumeIT.java

@Test
public void testRemoveVolumes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String[] tableNames = getUniqueNames(2);

        verifyVolumesUsed(client, tableNames[0], false, v1, v2);

        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
        cluster.stop();//from  w  w  w .  jav a 2 s  . c o  m

        PropertiesConfiguration conf = new PropertiesConfiguration();
        conf.load(cluster.getAccumuloPropertiesPath());
        conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
        conf.save(cluster.getAccumuloPropertiesPath());

        // start cluster and verify that volume was decommissioned
        cluster.start();

        client.tableOperations().compact(tableNames[0], null, null, true, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);

        // check that root tablet is not on volume 1
        ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
        String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID())
                + RootTable.ZROOT_TABLET_PATH;
        String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
        assertTrue(rootTabletDir.startsWith(v2.toString()));

        client.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());

        client.tableOperations().flush(MetadataTable.NAME, null, null, true);
        client.tableOperations().flush(RootTable.NAME, null, null, true);

        verifyVolumesUsed(client, tableNames[0], true, v2);
        verifyVolumesUsed(client, tableNames[1], true, v2);
    }
}

From source file:org.apache.accumulo.test.VolumeIT.java

@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths provided by test")
private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);

    verifyVolumesUsed(client, tableNames[0], false, v1, v2);

    // write to 2nd table, but do not flush data to disk before shutdown
    writeData(tableNames[1], cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD)));

    if (cleanShutdown)
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());

    cluster.stop();/*from   w ww  . j a  va2s .c  om*/

    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());

    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());

    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.load(cluster.getAccumuloPropertiesPath());
    conf.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
    conf.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    conf.save(cluster.getAccumuloPropertiesPath());

    // start cluster and verify that volumes were replaced
    cluster.start();

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // verify writes to new dir
    client.tableOperations().compact(tableNames[0], null, null, true, true);
    client.tableOperations().compact(tableNames[1], null, null, true, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);

    // check that root tablet is not on volume 1 or 2
    ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
    String zpath = ZooUtil.getRoot(client.instanceOperations().getInstanceID()) + RootTable.ZROOT_TABLET_PATH;
    String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
    assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));

    client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());

    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);

    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}

From source file:org.apache.atlas.web.security.BaseSecurityTest.java

public static String writeConfiguration(final PropertiesConfiguration configuration) throws Exception {
    String confLocation = System.getProperty("atlas.conf");
    URL url;/* w w w  .  jav  a 2 s.  co m*/
    if (confLocation == null) {
        url = BaseSecurityTest.class.getResource("/" + ApplicationProperties.APPLICATION_PROPERTIES);
    } else {
        url = new File(confLocation, ApplicationProperties.APPLICATION_PROPERTIES).toURI().toURL();
    }
    PropertiesConfiguration configuredProperties = new PropertiesConfiguration();
    configuredProperties.load(url);

    configuredProperties.copy(configuration);

    String persistDir = TestUtils.getTempDirectory();
    configuredProperties.setProperty("atlas.authentication.method.file", "true");
    configuredProperties.setProperty("atlas.authentication.method.file.filename",
            persistDir + "/users-credentials");
    configuredProperties.setProperty("atlas.auth.policy.file", persistDir + "/policy-store.txt");
    TestUtils.writeConfiguration(configuredProperties,
            persistDir + File.separator + ApplicationProperties.APPLICATION_PROPERTIES);
    setupUserCredential(persistDir);
    setUpPolicyStore(persistDir);
    ApplicationProperties.forceReload();
    return persistDir;
}

From source file:org.apache.atlas.web.security.NegativeSSLAndKerberosTest.java

@BeforeClass
public void setUp() throws Exception {
    jksPath = new Path(Files.createTempDirectory("tempproviders").toString(), "test.jks");
    providerUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file/" + jksPath.toUri();

    String persistDir = TestUtils.getTempDirectory();

    setupKDCAndPrincipals();//from   w  w w  .  j  ava  2  s. c  om
    setupCredentials();

    // client will actually only leverage subset of these properties
    final PropertiesConfiguration configuration = getSSLConfiguration(providerUrl);

    persistSSLClientConfiguration(configuration);

    TestUtils.writeConfiguration(configuration,
            persistDir + File.separator + ApplicationProperties.APPLICATION_PROPERTIES);

    String confLocation = System.getProperty("atlas.conf");
    URL url;
    if (confLocation == null) {
        url = NegativeSSLAndKerberosTest.class.getResource("/" + ApplicationProperties.APPLICATION_PROPERTIES);
    } else {
        url = new File(confLocation, ApplicationProperties.APPLICATION_PROPERTIES).toURI().toURL();
    }
    configuration.load(url);

    configuration.setProperty(TLS_ENABLED, true);
    configuration.setProperty("atlas.authentication.method.kerberos", "true");
    configuration.setProperty("atlas.authentication.keytab", userKeytabFile.getAbsolutePath());
    configuration.setProperty("atlas.authentication.principal", "dgi/localhost@" + kdc.getRealm());

    configuration.setProperty("atlas.authentication.method.file", "false");
    configuration.setProperty("atlas.authentication.method.kerberos", "true");
    configuration.setProperty("atlas.authentication.method.kerberos.principal",
            "HTTP/localhost@" + kdc.getRealm());
    configuration.setProperty("atlas.authentication.method.kerberos.keytab", httpKeytabFile.getAbsolutePath());
    configuration.setProperty("atlas.authentication.method.kerberos.name.rules",
            "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT");

    configuration.setProperty("atlas.authentication.method.file", "true");
    configuration.setProperty("atlas.authentication.method.file.filename", persistDir + "/users-credentials");
    configuration.setProperty("atlas.auth.policy.file", persistDir + "/policy-store.txt");

    TestUtils.writeConfiguration(configuration,
            persistDir + File.separator + ApplicationProperties.APPLICATION_PROPERTIES);

    setupUserCredential(persistDir);
    setUpPolicyStore(persistDir);

    // save original setting
    originalConf = System.getProperty("atlas.conf");
    System.setProperty("atlas.conf", persistDir);

    dgiClient = new AtlasClient(configuration, DGI_URL);

    secureEmbeddedServer = new TestSecureEmbeddedServer(21443, getWarPath()) {
        @Override
        public Configuration getConfiguration() {
            return configuration;
        }
    };
    secureEmbeddedServer.getServer().start();
}