Example usage for org.apache.commons.configuration Configuration clearProperty

List of usage examples for org.apache.commons.configuration Configuration clearProperty

Introduction

In this page you can find the example usage for org.apache.commons.configuration Configuration clearProperty.

Prototype

void clearProperty(String key);

Source Link

Document

Remove a property from the configuration.

Usage

From source file:org.apache.tinkerpop.gremlin.spark.structure.io.gryo.GryoSerializerIntegrateTest.java

@Test
public void shouldHaveAllRegisteredGryoSerializerClasses() throws Exception {
    // this is a stress test that ensures that when data is spilling to disk, persisted to an RDD, etc. the correct classes are registered with GryoSerializer.
    final TinkerGraph randomGraph = TinkerGraph.open();
    int totalVertices = 200000;
    TestHelper.createRandomGraph(randomGraph, totalVertices, 100);
    final String inputLocation = TestHelper.makeTestDataDirectory(GryoSerializerIntegrateTest.class,
            UUID.randomUUID().toString()) + "/random-graph.kryo";
    randomGraph.io(IoCore.gryo()).writeGraph(inputLocation);
    randomGraph.clear();/* w w w .  ja  v  a  2 s.  co m*/
    randomGraph.close();

    final String outputLocation = TestHelper.makeTestDataDirectory(GryoSerializerIntegrateTest.class,
            UUID.randomUUID().toString());
    Configuration configuration = getBaseConfiguration();
    configuration.clearProperty(Constants.SPARK_SERIALIZER); // ensure proper default to GryoSerializer
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, inputLocation);
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT,
            GryoInputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT,
            GryoOutputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, outputLocation);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false);
    Graph graph = GraphFactory.open(configuration);
    final GraphTraversal.Admin<Vertex, Map<Vertex, Collection<Vertex>>> traversal = graph.traversal()
            .withComputer(SparkGraphComputer.class).V().group("m").<Map<Vertex, Collection<Vertex>>>cap("m")
            .asAdmin();
    assertTrue(traversal.hasNext());
    assertTrue(traversal.next() == traversal.getSideEffects().get("m"));
    assertFalse(traversal.hasNext());
    assertTrue(traversal.getSideEffects().exists("m"));
    assertTrue(traversal.getSideEffects().get("m") instanceof Map);
    assertEquals(totalVertices, traversal.getSideEffects().<Map>get("m").size());

    configuration = getBaseConfiguration();
    configuration.clearProperty(Constants.SPARK_SERIALIZER); // ensure proper default to GryoSerializer
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, inputLocation);
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT,
            GryoInputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD,
            PersistedOutputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_STORAGE_LEVEL, "DISK_ONLY");
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, "persisted-rdd");
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
    graph = GraphFactory.open(configuration);
    assertEquals(totalVertices,
            graph.compute(SparkGraphComputer.class)
                    .program(PageRankVertexProgram.build().iterations(2).create(graph)).submit().get().graph()
                    .traversal().V().count().next().longValue());

    configuration = getBaseConfiguration();
    configuration.clearProperty(Constants.SPARK_SERIALIZER); // ensure proper default to GryoSerializer
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, "persisted-rdd");
    configuration.setProperty(Constants.GREMLIN_SPARK_GRAPH_INPUT_RDD,
            PersistedInputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT,
            GryoOutputFormat.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, outputLocation);
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
    graph = GraphFactory.open(configuration);
    assertEquals(totalVertices,
            graph.traversal().withComputer(SparkGraphComputer.class).V().count().next().longValue());

    configuration = getBaseConfiguration();
    configuration.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION, "persisted-rdd");
    configuration.setProperty(Constants.GREMLIN_SPARK_GRAPH_INPUT_RDD,
            PersistedInputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD,
            PersistedOutputRDD.class.getCanonicalName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, outputLocation);
    configuration.setProperty(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY"); // this should be ignored as you can't change the persistence level once created
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_STORAGE_LEVEL, "MEMORY_AND_DISK");
    configuration.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, true);
    graph = GraphFactory.open(configuration);
    assertEquals(totalVertices,
            graph.traversal().withComputer(SparkGraphComputer.class).V().count().next().longValue());
}

From source file:org.eclipse.winery.repository.backend.BackendUtils.java

/**
 * Updates the given property in the given configuration. Currently always
 * returns "no content", because the underlying class does not report any
 * errors during updating. <br />/*  w ww.  j  av  a  2  s .  co m*/
 * 
 * If null or "" is passed as value, the property is cleared
 * 
 * @return Status.NO_CONTENT
 */
public static Response updateProperty(Configuration configuration, String property, String val) {
    if (StringUtils.isBlank(val)) {
        configuration.clearProperty(property);
    } else {
        configuration.setProperty(property, val);
    }
    return Response.noContent().build();
}

From source file:org.janusgraph.hadoop.MapReduceIndexManagement.java

/**
 * Updates the provided index according to the given {@link SchemaAction}.
 * Only {@link SchemaAction#REINDEX} and {@link SchemaAction#REMOVE_INDEX} are supported.
 *
 * @param index the index to process/*from   ww  w .  j a  v a  2  s .  c o m*/
 * @param updateAction either {@code REINDEX} or {@code REMOVE_INDEX}
 * @return a future that returns immediately;
 *         this method blocks until the Hadoop MapReduce job completes
 */
// TODO make this future actually async and update javadoc @return accordingly
public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction updateAction)
        throws BackendException {

    Preconditions.checkNotNull(index, "Index parameter must not be null", index);
    Preconditions.checkNotNull(updateAction, "%s parameter must not be null",
            SchemaAction.class.getSimpleName());
    Preconditions.checkArgument(SUPPORTED_ACTIONS.contains(updateAction),
            "Only these %s parameters are supported: %s (was given %s)", SchemaAction.class.getSimpleName(),
            SUPPORTED_ACTIONS_STRING, updateAction);
    Preconditions.checkArgument(
            RelationTypeIndex.class.isAssignableFrom(index.getClass())
                    || JanusGraphIndex.class.isAssignableFrom(index.getClass()),
            "Index %s has class %s: must be a %s or %s (or subtype)", index.getClass(),
            RelationTypeIndex.class.getSimpleName(), JanusGraphIndex.class.getSimpleName());

    org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
    ModifiableHadoopConfiguration janusgraphmrConf = ModifiableHadoopConfiguration
            .of(JanusGraphHadoopConfiguration.MAPRED_NS, hadoopConf);

    // The job we'll execute to either REINDEX or REMOVE_INDEX
    final Class<? extends IndexUpdateJob> indexJobClass;
    final Class<? extends Mapper> mapperClass;

    // The class of the IndexUpdateJob and the Mapper that will be used to run it (VertexScanJob vs ScanJob)
    if (updateAction.equals(SchemaAction.REINDEX)) {
        indexJobClass = IndexRepairJob.class;
        mapperClass = HadoopVertexScanMapper.class;
    } else if (updateAction.equals(SchemaAction.REMOVE_INDEX)) {
        indexJobClass = IndexRemoveJob.class;
        mapperClass = HadoopScanMapper.class;
    } else {
        // Shouldn't get here -- if this exception is ever thrown, update SUPPORTED_ACTIONS
        throw new IllegalStateException(
                "Unrecognized " + SchemaAction.class.getSimpleName() + ": " + updateAction);
    }

    // The column family that serves as input to the IndexUpdateJob
    final String readCF;
    if (RelationTypeIndex.class.isAssignableFrom(index.getClass())) {
        readCF = Backend.EDGESTORE_NAME;
    } else {
        JanusGraphIndex gindex = (JanusGraphIndex) index;
        if (gindex.isMixedIndex() && !updateAction.equals(SchemaAction.REINDEX))
            throw new UnsupportedOperationException(
                    "External mixed indexes must be removed in the indexing system directly.");

        Preconditions.checkState(JanusGraphIndex.class.isAssignableFrom(index.getClass()));
        if (updateAction.equals(SchemaAction.REMOVE_INDEX))
            readCF = Backend.INDEXSTORE_NAME;
        else
            readCF = Backend.EDGESTORE_NAME;
    }
    janusgraphmrConf.set(JanusGraphHadoopConfiguration.COLUMN_FAMILY_NAME, readCF);

    // The MapReduce InputFormat class based on the open graph's store manager
    final Class<? extends InputFormat> inputFormat;
    final Class<? extends KeyColumnValueStoreManager> storeManagerClass = graph.getBackend()
            .getStoreManagerClass();
    if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
        inputFormat = CassandraBinaryInputFormat.class;
        // Set the partitioner
        IPartitioner part = ((AbstractCassandraStoreManager) graph.getBackend().getStoreManager())
                .getCassandraPartitioner();
        hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName());
    } else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
        inputFormat = HBaseBinaryInputFormat.class;
    } else {
        throw new IllegalArgumentException("Store manager class " + storeManagerClass + "is not supported");
    }

    // The index name and relation type name (if the latter is applicable)
    final String indexName = index.name();
    final String relationTypeName = RelationTypeIndex.class.isAssignableFrom(index.getClass())
            ? ((RelationTypeIndex) index).getType().name()
            : "";
    Preconditions.checkNotNull(indexName);

    // Set the class of the IndexUpdateJob
    janusgraphmrConf.set(JanusGraphHadoopConfiguration.SCAN_JOB_CLASS, indexJobClass.getName());
    // Set the configuration of the IndexUpdateJob
    copyIndexJobKeys(hadoopConf, indexName, relationTypeName);
    janusgraphmrConf.set(JanusGraphHadoopConfiguration.SCAN_JOB_CONFIG_ROOT,
            GraphDatabaseConfiguration.class.getName() + "#JOB_NS");
    // Copy the StandardJanusGraph configuration under JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS
    org.apache.commons.configuration.Configuration localbc = graph.getConfiguration().getLocalConfiguration();
    localbc.clearProperty(Graph.GRAPH);
    copyInputKeys(hadoopConf, localbc);

    String jobName = HadoopScanMapper.class.getSimpleName() + "[" + indexJobClass.getSimpleName() + "]";

    try {
        return new CompletedJobFuture(HadoopScanRunner.runJob(hadoopConf, inputFormat, jobName, mapperClass));
    } catch (Exception e) {
        return new FailedJobFuture(e);
    }
}

From source file:org.lable.oss.dynamicconfig.core.commonsconfiguration.ConcurrentConfigurationTest.java

@Test(expected = UnsupportedOperationException.class)
public void clearPropertyTest() {
    CombinedConfiguration mockConfiguration = mock(CombinedConfiguration.class);
    Configuration concurrentConfiguration = new ConcurrentConfiguration(mockConfiguration, null);
    concurrentConfiguration.clearProperty("test");
}

From source file:org.neo4j.server.configuration.validation.WebadminConfigurationRule.java

@Override
public void validate(Configuration configuration) throws RuleFailedException {
    String managementApi = validateConfigurationContainsKey(configuration,
            Configurator.MANAGEMENT_PATH_PROPERTY_KEY);
    String restApi = validateConfigurationContainsKey(configuration, Configurator.REST_API_PATH_PROPERTY_KEY);

    // Check URIs are ok
    URI managementUri = validateAndNormalizeUri(managementApi, Configurator.MANAGEMENT_PATH_PROPERTY_KEY);
    URI restUri = validateAndNormalizeUri(restApi, Configurator.REST_API_PATH_PROPERTY_KEY);

    // Overwrite the properties with the new normalised URIs
    configuration.clearProperty(Configurator.MANAGEMENT_PATH_PROPERTY_KEY);
    configuration.addProperty(Configurator.MANAGEMENT_PATH_PROPERTY_KEY, managementUri.toString());

    configuration.clearProperty(Configurator.REST_API_PATH_PROPERTY_KEY);
    configuration.addProperty(Configurator.REST_API_PATH_PROPERTY_KEY, restUri.toString());
}