Example usage for org.apache.commons.configuration BaseConfiguration setDelimiterParsingDisabled

List of usage examples for org.apache.commons.configuration BaseConfiguration setDelimiterParsingDisabled

Introduction

In this page you can find the example usage for org.apache.commons.configuration BaseConfiguration setDelimiterParsingDisabled.

Prototype

public void setDelimiterParsingDisabled(boolean delimiterParsingDisabled) 

Source Link

Document

Set whether this configuration should use delimiters when parsing property values to convert them to lists of values.

Usage

From source file:com.intel.cosbench.config.common.KVConfigParser.java

public static Config parse(String str) {
    BaseConfiguration config = new BaseConfiguration();
    config.setDelimiterParsingDisabled(true);
    str = StringUtils.trimToEmpty(str);//from www.j  a  v  a 2  s  .c  o m
    String[] entries = StringUtils.split(str, ';');
    for (String entry : entries) {
        addConfigEntry(entry, config);
    }
    return new COSBConfigApator(config);
}

From source file:org.apache.tinkerpop.gremlin.giraph.process.computer.GiraphGraphComputer.java

@Override
public GraphComputer program(final VertexProgram vertexProgram) {
    super.program(vertexProgram);
    this.memory.addVertexProgramMemoryComputeKeys(this.vertexProgram);
    final BaseConfiguration apacheConfiguration = new BaseConfiguration();
    apacheConfiguration.setDelimiterParsingDisabled(true);
    vertexProgram.storeState(apacheConfiguration);
    ConfUtil.mergeApacheIntoHadoopConfiguration(apacheConfiguration, this.giraphConfiguration);
    this.vertexProgram.getMessageCombiner().ifPresent(
            combiner -> this.giraphConfiguration.setMessageCombinerClass(GiraphMessageCombiner.class));
    return this;
}

From source file:org.apache.tinkerpop.gremlin.giraph.process.computer.GiraphGraphComputer.java

@Override
public int run(final String[] args) {
    final Storage storage = FileSystemStorage.open(this.giraphConfiguration);
    storage.rm(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES,
            this.persist.equals(Persist.EDGES));
    try {//ww w. ja va 2  s .co  m
        // store vertex and edge filters (will propagate down to native InputFormat or else GiraphVertexInputFormat will process)
        final BaseConfiguration apacheConfiguration = new BaseConfiguration();
        apacheConfiguration.setDelimiterParsingDisabled(true);
        GraphFilterAware.storeGraphFilter(apacheConfiguration, this.giraphConfiguration, this.graphFilter);

        // it is possible to run graph computer without a vertex program (and thus, only map reduce jobs if they exist)
        if (null != this.vertexProgram) {
            // a way to verify in Giraph whether the traversal will go over the wire or not
            try {
                VertexProgram.createVertexProgram(this.hadoopGraph,
                        ConfUtil.makeApacheConfiguration(this.giraphConfiguration));
            } catch (final IllegalStateException e) {
                if (e.getCause() instanceof NumberFormatException)
                    throw new NotSerializableException(
                            "The provided traversal is not serializable and thus, can not be distributed across the cluster");
            }
            // remove historic combiners in configuration propagation (this occurs when job chaining)
            if (!this.vertexProgram.getMessageCombiner().isPresent())
                this.giraphConfiguration.unset(GiraphConstants.MESSAGE_COMBINER_CLASS.getKey());
            // split required workers across system (open map slots + max threads per machine = total amount of TinkerPop workers)
            if (!this.useWorkerThreadsInConfiguration) {
                final Cluster cluster = new Cluster(GiraphGraphComputer.this.giraphConfiguration);
                int totalMappers = cluster.getClusterStatus().getMapSlotCapacity() - 1; // 1 is needed for master
                cluster.close();
                if (this.workers <= totalMappers) {
                    this.giraphConfiguration.setWorkerConfiguration(this.workers, this.workers, 100.0F);
                    this.giraphConfiguration.setNumComputeThreads(1);
                } else {
                    if (totalMappers == 0)
                        totalMappers = 1; // happens in local mode
                    int threadsPerMapper = Long
                            .valueOf(Math.round((double) this.workers / (double) totalMappers)).intValue(); // TODO: need to find least common denominator
                    this.giraphConfiguration.setWorkerConfiguration(totalMappers, totalMappers, 100.0F);
                    this.giraphConfiguration.setNumComputeThreads(threadsPerMapper);
                }
            }
            // prepare the giraph vertex-centric computing job
            final GiraphJob job = new GiraphJob(this.giraphConfiguration,
                    Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram);
            job.getInternalJob().setJarByClass(GiraphGraphComputer.class);
            this.logger.info(Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram);
            // handle input paths (if any)
            String inputLocation = this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION, null);
            if (null != inputLocation && FileInputFormat.class.isAssignableFrom(this.giraphConfiguration
                    .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class))) {
                inputLocation = Constants.getSearchGraphLocation(inputLocation, storage)
                        .orElse(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION));
                FileInputFormat.setInputPaths(job.getInternalJob(), new Path(inputLocation));
            }
            // handle output paths (if any)
            String outputLocation = this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
                    null);
            if (null != outputLocation && FileOutputFormat.class.isAssignableFrom(this.giraphConfiguration
                    .getClass(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, OutputFormat.class))) {
                outputLocation = Constants.getGraphLocation(
                        this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
                FileOutputFormat.setOutputPath(job.getInternalJob(), new Path(outputLocation));
            }
            // execute the job and wait until it completes (if it fails, throw an exception)
            if (!job.run(true))
                throw new IllegalStateException(
                        "The GiraphGraphComputer job failed -- aborting all subsequent MapReduce jobs: "
                                + job.getInternalJob().getStatus().getFailureInfo());
            // add vertex program memory values to the return memory
            for (final MemoryComputeKey memoryComputeKey : this.vertexProgram.getMemoryComputeKeys()) {
                if (!memoryComputeKey.isTransient() && storage.exists(Constants.getMemoryLocation(
                        this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                        memoryComputeKey.getKey()))) {
                    final ObjectWritableIterator iterator = new ObjectWritableIterator(this.giraphConfiguration,
                            new Path(Constants.getMemoryLocation(
                                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                                    memoryComputeKey.getKey())));
                    if (iterator.hasNext()) {
                        this.memory.set(memoryComputeKey.getKey(), iterator.next().getValue());
                    }
                    // vertex program memory items are not stored on disk
                    storage.rm(Constants.getMemoryLocation(
                            this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                            memoryComputeKey.getKey()));
                }
            }
            final Path path = new Path(Constants.getMemoryLocation(
                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                    Constants.HIDDEN_ITERATION));
            this.memory.setIteration(
                    (Integer) new ObjectWritableIterator(this.giraphConfiguration, path).next().getValue());
            storage.rm(Constants.getMemoryLocation(
                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                    Constants.HIDDEN_ITERATION));
        }
        // do map reduce jobs
        this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT_HAS_EDGES,
                this.giraphConfiguration.getBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES,
                        true));
        for (final MapReduce mapReduce : this.mapReducers) {
            this.memory.addMapReduceMemoryKey(mapReduce);
            MapReduceHelper.executeMapReduceJob(mapReduce, this.memory, this.giraphConfiguration);
        }

        // if no persistence, delete the graph and memory output
        if (this.persist.equals(Persist.NOTHING))
            storage.rm(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    } catch (final Exception e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
    return 0;
}

From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.util.MapReduceHelper.java

public static void executeMapReduceJob(final MapReduce mapReduce, final Memory.Admin memory,
        final Configuration configuration) throws IOException, ClassNotFoundException, InterruptedException {
    final Configuration newConfiguration = new Configuration(configuration);
    final boolean vertexProgramExists = newConfiguration.get(VertexProgram.VERTEX_PROGRAM, null) != null;
    if (vertexProgramExists) {
        newConfiguration.set(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT,
                InputOutputHelper//from   w  ww.  j  a  v  a2  s .  co  m
                        .getInputFormat((Class) newConfiguration
                                .getClass(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, OutputFormat.class))
                        .getCanonicalName());
        newConfiguration.unset(Constants.GREMLIN_HADOOP_GRAPH_FILTER);
    }
    final BaseConfiguration apacheConfiguration = new BaseConfiguration();
    apacheConfiguration.setDelimiterParsingDisabled(true);
    mapReduce.storeState(apacheConfiguration);
    ConfUtil.mergeApacheIntoHadoopConfiguration(apacheConfiguration, newConfiguration);

    final Optional<Comparator<?>> mapSort = mapReduce.getMapKeySort();
    final Optional<Comparator<?>> reduceSort = mapReduce.getReduceKeySort();
    newConfiguration.setClass(Constants.GREMLIN_HADOOP_MAP_REDUCE_CLASS, mapReduce.getClass(), MapReduce.class);
    final Job job = Job.getInstance(newConfiguration, mapReduce.toString());
    HadoopGraph.LOGGER.info(Constants.GREMLIN_HADOOP_JOB_PREFIX + mapReduce.toString());
    job.setJarByClass(HadoopGraph.class);
    if (mapSort.isPresent())
        job.setSortComparatorClass(ObjectWritableComparator.ObjectWritableMapComparator.class);
    job.setMapperClass(HadoopMap.class);
    if (mapReduce.doStage(MapReduce.Stage.REDUCE)) {
        if (mapReduce.doStage(MapReduce.Stage.COMBINE))
            job.setCombinerClass(HadoopCombine.class);
        job.setReducerClass(HadoopReduce.class);
    } else {
        if (mapSort.isPresent()) {
            job.setReducerClass(Reducer.class);
            job.setNumReduceTasks(1); // todo: is this necessary to ensure sorted order?
        } else {
            job.setNumReduceTasks(0);
        }
    }
    job.setMapOutputKeyClass(ObjectWritable.class);
    job.setMapOutputValueClass(ObjectWritable.class);
    job.setOutputKeyClass(ObjectWritable.class);
    job.setOutputValueClass(ObjectWritable.class);
    job.setInputFormatClass(GraphFilterInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    // if there is no vertex program, then grab the graph from the input location
    final Path graphPath;
    if (vertexProgramExists) {
        graphPath = new Path(
                Constants.getGraphLocation(newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION)));
    } else {
        graphPath = new Path(newConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION));
    }

    Path memoryPath = new Path(
            Constants.getMemoryLocation(newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                    (reduceSort.isPresent() ? mapReduce.getMemoryKey() + "-temp" : mapReduce.getMemoryKey())));
    if (FileSystem.get(newConfiguration).exists(memoryPath)) {
        FileSystem.get(newConfiguration).delete(memoryPath, true);
    }
    FileInputFormat.setInputPaths(job, graphPath);
    FileOutputFormat.setOutputPath(job, memoryPath);
    job.waitForCompletion(true);

    // if there is a reduce sort, we need to run another identity MapReduce job
    if (reduceSort.isPresent()) {
        final Job reduceSortJob = Job.getInstance(newConfiguration, "ReduceKeySort");
        reduceSortJob.setSortComparatorClass(ObjectWritableComparator.ObjectWritableReduceComparator.class);
        reduceSortJob.setMapperClass(Mapper.class);
        reduceSortJob.setReducerClass(Reducer.class);
        reduceSortJob.setMapOutputKeyClass(ObjectWritable.class);
        reduceSortJob.setMapOutputValueClass(ObjectWritable.class);
        reduceSortJob.setOutputKeyClass(ObjectWritable.class);
        reduceSortJob.setOutputValueClass(ObjectWritable.class);
        reduceSortJob.setInputFormatClass(SequenceFileInputFormat.class);
        reduceSortJob.setOutputFormatClass(SequenceFileOutputFormat.class);
        reduceSortJob.setNumReduceTasks(1); // todo: is this necessary to ensure sorted order?
        FileInputFormat.setInputPaths(reduceSortJob, memoryPath);
        final Path sortedMemoryPath = new Path(Constants.getMemoryLocation(
                newConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION), mapReduce.getMemoryKey()));
        FileOutputFormat.setOutputPath(reduceSortJob, sortedMemoryPath);
        reduceSortJob.waitForCompletion(true);
        FileSystem.get(newConfiguration).delete(memoryPath, true); // delete the temporary memory path
        memoryPath = sortedMemoryPath;
    }
    mapReduce.addResultToMemory(memory, new ObjectWritableIterator(newConfiguration, memoryPath));
}

From source file:org.apache.tinkerpop.gremlin.hadoop.structure.util.ConfUtil.java

public static org.apache.commons.configuration.Configuration makeApacheConfiguration(
        final Configuration hadoopConfiguration) {
    final BaseConfiguration apacheConfiguration = new BaseConfiguration();
    apacheConfiguration.setDelimiterParsingDisabled(true);
    hadoopConfiguration.iterator()//from  ww  w.  j a va 2 s .co m
            .forEachRemaining(e -> apacheConfiguration.setProperty(e.getKey(), e.getValue()));
    return apacheConfiguration;
}

From source file:org.apache.tinkerpop.gremlin.spark.AbstractSparkTest.java

protected Configuration getBaseConfiguration() {
    final BaseConfiguration configuration = new BaseConfiguration();
    configuration.setDelimiterParsingDisabled(true);
    configuration.setProperty("spark.master", "local[4]");
    configuration.setProperty(Constants.SPARK_SERIALIZER, GryoSerializer.class.getCanonicalName());
    configuration.setProperty("spark.kryo.registrationRequired", true);
    configuration.setProperty(Graph.GRAPH, HadoopGraph.class.getName());
    configuration.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_DISTRIBUTED_CACHE, false);
    return configuration;
}

From source file:org.apache.tinkerpop.gremlin.spark.structure.io.gryo.GryoSerializer.java

private static Configuration makeApacheConfiguration(final SparkConf sparkConfiguration) {
    final BaseConfiguration apacheConfiguration = new BaseConfiguration();
    apacheConfiguration.setDelimiterParsingDisabled(true);
    for (final Tuple2<String, String> tuple : sparkConfiguration.getAll()) {
        apacheConfiguration.setProperty(tuple._1(), tuple._2());
    }/*from w  w w  .  j  ava2s .  c om*/
    return apacheConfiguration;
}

From source file:org.apache.tinkerpop.gremlin.util.SystemUtil.java

/**
 * Generate a {@link Configuration} from the {@link System#getProperties}.
 * Only those properties with specified prefix key are aggregated.
 * If the prefix and a . should be removed, then trim prefix.
 *
 * @param prefix     the prefix of the keys to include in the configuration
 * @param trimPrefix whether to trim the prefix + . from the key
 * @return a configuration generated from the System properties
 *//* w  w  w .j  av  a  2 s.  co  m*/
public static Configuration getSystemPropertiesConfiguration(final String prefix, final boolean trimPrefix) {
    final BaseConfiguration apacheConfiguration = new BaseConfiguration();
    apacheConfiguration.setDelimiterParsingDisabled(true);
    for (final Map.Entry<Object, Object> entry : System.getProperties().entrySet()) {
        final String key = entry.getKey().toString();
        final Object value = entry.getValue();
        if (key.startsWith(prefix + "."))
            apacheConfiguration.setProperty(trimPrefix ? key.substring(prefix.length() + 1) : key, value);
    }
    return apacheConfiguration;
}

From source file:org.xwiki.configuration.internal.SystemEnvironmentConfigurationSource.java

@Override
public void initialize() throws InitializationException {
    BaseConfiguration configuration = new BaseConfiguration();
    configuration.setDelimiterParsingDisabled(true);

    Map<String, String> environment = System.getenv();
    for (String key : environment.keySet()) {
        configuration.setProperty(key, environment.get(key));
    }//  ww  w.ja  v a 2s. co m

    setConfiguration(configuration);
}

From source file:org.xwiki.configuration.internal.SystemPropertiesConfigurationSource.java

@Override
public void initialize() throws InitializationException {
    BaseConfiguration configuration = new BaseConfiguration();
    configuration.setDelimiterParsingDisabled(true);

    Properties properties = System.getProperties();
    for (String key : properties.stringPropertyNames()) {
        configuration.setProperty(key, properties.get(key));
    }/*from w  w  w  .j  a va2  s. co  m*/

    setConfiguration(configuration);
}