List of usage examples for org.apache.commons.configuration ConfigurationUtils copy
public static void copy(Configuration source, Configuration target)
Copy all properties from the source configuration to the target configuration.
From source file:net.sf.mpaxs.spi.server.DrmaaComputeHostLauncher.java
/** * Submits a new ComputeHost to the GridEngine. * Settings from the Settings class are used and converted to <code>Configuration</code>. * * @param cfg the configuration to use// w w w . ja v a2 s . c o m * @see net.sf.mpaxs.spi.computeHost.Settings */ @Override public void startComputeHost(Configuration cfg) { String drmaaImplementation = SessionFactory.getFactory().getSession().getDrmaaImplementation(); System.out.println("Drmaa Implementation: " + drmaaImplementation); File configLocation = new File(cfg.getString(ConfigurationKeys.KEY_COMPUTE_HOST_WORKING_DIR), "computeHost.properties"); try { PropertiesConfiguration pc = new PropertiesConfiguration(configLocation); ConfigurationUtils.copy(cfg, pc); pc.save(); } catch (ConfigurationException ex) { Logger.getLogger(DrmaaComputeHostLauncher.class.getName()).log(Level.SEVERE, null, ex); } List<String> arguments = new ArrayList<String>(); arguments.add("-cp"); arguments.add(cfg.getString(ConfigurationKeys.KEY_PATH_TO_COMPUTEHOST_JAR)); arguments.add(cfg.getString(ConfigurationKeys.KEY_COMPUTE_HOST_MAIN_CLASS)); arguments.add("-c"); try { arguments.add(configLocation.toURI().toURL().toString()); } catch (MalformedURLException ex) { Logger.getLogger(DrmaaComputeHostLauncher.class.getName()).log(Level.SEVERE, null, ex); } Logger.getLogger(this.getClass().getName()).log(Level.INFO, "ComputeHost configuration: {0}", ConfigurationUtils.toString(cfg)); try { SessionFactory factory = SessionFactory.getFactory(); Session session = factory.getSession(); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "DRM System: {0} Implementation: {1} Version: {2}", new Object[] { session.getDrmSystem(), session.getDrmaaImplementation(), session.getVersion() }); session.init(""); JobTemplate jt = session.createJobTemplate(); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Remote command: {0}", cfg.getString(ConfigurationKeys.KEY_PATH_TO_JAVA)); jt.setRemoteCommand(cfg.getString(ConfigurationKeys.KEY_PATH_TO_JAVA)); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Working dir: {0}", cfg.getString(ConfigurationKeys.KEY_COMPUTE_HOST_WORKING_DIR)); jt.setWorkingDirectory(cfg.getString(ConfigurationKeys.KEY_COMPUTE_HOST_WORKING_DIR)); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Arguments: {0}", arguments); jt.setArgs(arguments); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Error path: {0}", cfg.getString(ConfigurationKeys.KEY_ERROR_FILE)); jt.setErrorPath(":" + cfg.getString(ConfigurationKeys.KEY_ERROR_FILE)); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Output path: {0}", cfg.getString(ConfigurationKeys.KEY_OUTPUT_FILE)); jt.setOutputPath(":" + cfg.getString(ConfigurationKeys.KEY_OUTPUT_FILE)); jt.setNativeSpecification(cfg.getString(ConfigurationKeys.KEY_NATIVE_SPEC, "")); jt.setJobName("mpaxs-chost"); session.runJob(jt); session.deleteJobTemplate(jt); session.exit(); Logger.getLogger(this.getClass().getName()).log(Level.INFO, "Session started!"); } catch (DrmaaException ex) { Logger.getLogger(this.getClass().getName()).log(Level.SEVERE, null, ex); } }
From source file:net.sf.mpaxs.spi.server.MpaxsImpl.java
@Override public void startMasterServer(Configuration config, Container c) { if (master != null) { throw new IllegalStateException("Master server was already started!"); }//from w w w . ja v a 2 s . co m if (config == null) { System.out.println("Configuration is null, starting master with default parameters!"); startMasterServer(); return; } try { File f = File.createTempFile(UUID.randomUUID().toString(), ".properties"); PropertiesConfiguration pc; try { pc = new PropertiesConfiguration(f); ConfigurationUtils.copy(config, pc); pc.save(f); System.out.println(ConfigurationUtils.toString(pc)); master = StartUp.start(f.getAbsolutePath(), c); } catch (ConfigurationException ex) { Logger.getLogger(MpaxsImpl.class.getName()).log(Level.SEVERE, null, ex); } } catch (IOException ex) { Logger.getLogger(MpaxsImpl.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:maltcms.ui.fileHandles.properties.tools.SceneExporter.java
/** * Layout: NAME/ NAME.properties NAME-general.properties (optional) * fragmentCommands/ 00_CLASSNAME/ CLASSNAME.properties 01_CLASSNAME/ * CLASSNAME.properties//from ww w . j a va 2 s. c o m * * @param pipeline * @param general */ private void createConfigFiles(List<PipelineElementWidget> pipeline, PipelineGeneralConfigWidget general) { try { //create base config FileObject baseConfigFo = this.file.getFileObject(this.name + ".mpl"); File f = FileUtil.toFile(baseConfigFo); PropertiesConfiguration baseConfig = new PropertiesConfiguration(); File subDir = new File(f.getParent(), "xml"); FileUtil.createFolder(subDir); //retrieve general configuration Configuration generalConfig = general.getProperties(); //only create and link, if non-empty if (!generalConfig.isEmpty()) { ConfigurationUtils.copy(generalConfig, baseConfig); } //String list for pipeline elements List<String> pipelineElements = new LinkedList<>(); File pipelineXml = new File(subDir, this.name + ".xml"); for (PipelineElementWidget pw : pipeline) { //add full class name to pipeline elements pipelineElements.add(pw.getClassName()); //write configuration to that file PropertiesConfiguration pc = new PropertiesConfiguration(); } //set pipeline property baseConfig.setProperty("pipeline", pipelineElements); //set pipeline.properties property String pipelineXmlString = "file:${config.basedir}/xml/bipace.xml"; baseConfig.setProperty("pipeline.xml", pipelineXmlString); FileObject fo = FileUtil.toFileObject(f); try { baseConfig.save(new PrintStream(fo.getOutputStream())); } catch (ConfigurationException ex) { Exceptions.printStackTrace(ex); } } catch (FileNotFoundException ex) { Exceptions.printStackTrace(ex); } catch (IOException ex) { Exceptions.printStackTrace(ex); } }
From source file:ai.grakn.graph.internal.computer.GraknSparkComputer.java
public GraknSparkComputer(final HadoopGraph hadoopGraph) { super(hadoopGraph); this.sparkConfiguration = new HadoopConfiguration(); ConfigurationUtils.copy(this.hadoopGraph.configuration(), this.sparkConfiguration); this.apacheConfiguration = new HadoopConfiguration(this.sparkConfiguration); apacheConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES, false); hadoopConfiguration = ConfUtil.makeHadoopConfiguration(apacheConfiguration); if (hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_INPUT_RDD, null) == null && hadoopConfiguration.get(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, null) != null && FileInputFormat.class.isAssignableFrom(hadoopConfiguration .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class))) { try {//ww w.j a v a2 s .c o m final String inputLocation = FileSystem.get(hadoopConfiguration) .getFileStatus(new Path(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION))) .getPath().toString(); apacheConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, inputLocation); hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, inputLocation); } catch (final IOException e) { throw new IllegalStateException(e.getMessage(), e); } } }
From source file:ai.grakn.kb.internal.computer.GraknSparkComputer.java
public GraknSparkComputer(final HadoopGraph hadoopGraph) { super(hadoopGraph); this.sparkConfiguration = new HadoopConfiguration(); ConfigurationUtils.copy(this.hadoopGraph.configuration(), this.sparkConfiguration); }
From source file:com.datastax.openflights.OpenflightsBulkLoaderVertexProgram.java
@Override public void loadState(final Graph graph, final Configuration config) { configuration = new BaseConfiguration(); if (config != null) { ConfigurationUtils.copy(config, configuration); }/* w w w .ja va 2 s. c o m*/ intermediateBatchSize = configuration.getLong(INTERMEDIATE_BATCH_SIZE_CFG_KEY, 0L); elementComputeKeys .add(configuration.getString(BULK_LOADER_VERTEX_ID_CFG_KEY, DEFAULT_BULK_LOADER_VERTEX_ID)); bulkLoader = createBulkLoader(); }
From source file:com.datastax.openflights.OpenflightsBulkLoaderVertexProgram.java
@Override public void storeState(final Configuration config) { VertexProgram.super.storeState(config); if (configuration != null) { ConfigurationUtils.copy(configuration, config); }/*from w w w. jav a2 s. c om*/ }
From source file:ai.grakn.graph.internal.computer.GraknSparkComputer.java
private Future<ComputerResult> submitWithExecutor(Executor exec) { getGraphRDD(this); jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE)); String jobDescription = this.vertexProgram == null ? this.mapReducers.toString() : this.vertexProgram + "+" + this.mapReducers; this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId); this.apacheConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION)); this.hadoopConfiguration.set(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION)); // create the completable future return CompletableFuture.supplyAsync(() -> { graknGraphRDD.sparkContext.setJobGroup(jobGroupId, jobDescription); final long startTime = System.currentTimeMillis(); GraknSparkMemory memory = null;/*from w w w . j av a 2 s .c o m*/ JavaPairRDD<Object, VertexWritable> computedGraphRDD = null; JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null; //////////////////////////////// // process the vertex program // //////////////////////////////// if (null != this.vertexProgram) { // set up the vertex program and wire up configurations this.mapReducers.addAll(this.vertexProgram.getMapReducers()); memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, graknGraphRDD.sparkContext); this.vertexProgram.setup(memory); memory.broadcastMemory(graknGraphRDD.sparkContext); final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration(); this.vertexProgram.storeState(vertexProgramConfiguration); ConfigurationUtils.copy(vertexProgramConfiguration, apacheConfiguration); ConfUtil.mergeApacheIntoHadoopConfiguration(vertexProgramConfiguration, hadoopConfiguration); // execute the vertex program while (true) { memory.setInTask(true); viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(graknGraphRDD.loadedGraphRDD, viewIncomingRDD, memory, vertexProgramConfiguration); memory.setInTask(false); if (this.vertexProgram.terminate(memory)) break; else { memory.incrIteration(); memory.broadcastMemory(graknGraphRDD.sparkContext); } } // write the computed graph to the respective output (rdd or output format) final String[] elementComputeKeys = this.vertexProgram.getElementComputeKeys() .toArray(new String[this.vertexProgram.getElementComputeKeys().size()]); computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(graknGraphRDD.loadedGraphRDD, viewIncomingRDD, elementComputeKeys); if ((hadoopConfiguration.get(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, null) != null || hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, null) != null) && !this.persist.equals(Persist.NOTHING)) { try { hadoopConfiguration .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class, OutputRDD.class) .newInstance().writeGraphRDD(apacheConfiguration, computedGraphRDD); } catch (final InstantiationException | IllegalAccessException e) { throw new IllegalStateException(e.getMessage(), e); } } } final boolean computedGraphCreated = computedGraphRDD != null; if (!computedGraphCreated) { computedGraphRDD = graknGraphRDD.loadedGraphRDD; } final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory); ////////////////////////////// // process the map reducers // ////////////////////////////// if (!this.mapReducers.isEmpty()) { for (final MapReduce mapReduce : this.mapReducers) { // execute the map reduce job final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(apacheConfiguration); mapReduce.storeState(newApacheConfiguration); // map final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(computedGraphRDD, mapReduce, newApacheConfiguration); // combine final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE) ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration) : mapRDD; // reduce final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE) ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration) : combineRDD; // write the map reduce output back to disk and computer result memory try { mapReduce.addResultToMemory(finalMemory, hadoopConfiguration .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class, OutputRDD.class) .newInstance() .writeMemoryRDD(apacheConfiguration, mapReduce.getMemoryKey(), reduceRDD)); } catch (final InstantiationException | IllegalAccessException e) { throw new IllegalStateException(e.getMessage(), e); } } } // unpersist the computed graph if it will not be used again (no PersistedOutputRDD) if (!graknGraphRDD.outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING)) { computedGraphRDD.unpersist(); } // delete any file system or rdd data if persist nothing String outputPath = sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION); if (null != outputPath && this.persist.equals(GraphComputer.Persist.NOTHING)) { if (graknGraphRDD.outputToHDFS) { graknGraphRDD.fileSystemStorage.rm(outputPath); } if (graknGraphRDD.outputToSpark) { graknGraphRDD.sparkContextStorage.rm(outputPath); } } // update runtime and return the newly computed graph finalMemory.setRuntime(System.currentTimeMillis() - startTime); return new DefaultComputerResult( InputOutputHelper.getOutputGraph(apacheConfiguration, this.resultGraph, this.persist), finalMemory.asImmutable()); }, exec); }
From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.spark.SparkGraphComputer.java
@Override public Future<ComputerResult> submit() { super.validateStatePriorToExecution(); // apache and hadoop configurations that are used throughout the graph computer computation final org.apache.commons.configuration.Configuration apacheConfiguration = new HadoopConfiguration( this.hadoopGraph.configuration()); apacheConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES, this.persist.equals(Persist.EDGES)); final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(apacheConfiguration); if (FileInputFormat.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class))) { try {//from w w w . j a v a 2s . c o m final String inputLocation = FileSystem.get(hadoopConfiguration) .getFileStatus(new Path(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION))) .getPath().toString(); apacheConfiguration.setProperty(Constants.MAPRED_INPUT_DIR, inputLocation); hadoopConfiguration.set(Constants.MAPRED_INPUT_DIR, inputLocation); } catch (final IOException e) { throw new IllegalStateException(e.getMessage(), e); } } // create the completable future return CompletableFuture.<ComputerResult>supplyAsync(() -> { final long startTime = System.currentTimeMillis(); SparkMemory memory = null; // delete output location final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null); if (null != outputLocation) { try { FileSystem.get(hadoopConfiguration).delete(new Path(outputLocation), true); } catch (final IOException e) { throw new IllegalStateException(e.getMessage(), e); } } // wire up a spark context final SparkConf sparkConfiguration = new SparkConf(); sparkConfiguration.setAppName(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "[" + this.mapReducers + "]"); /*final List<Class> classes = new ArrayList<>(); classes.addAll(IOClasses.getGryoClasses(GryoMapper.build().create())); classes.addAll(IOClasses.getSharedHadoopClasses()); classes.add(ViewPayload.class); classes.add(MessagePayload.class); classes.add(ViewIncomingPayload.class); classes.add(ViewOutgoingPayload.class); sparkConfiguration.registerKryoClasses(classes.toArray(new Class[classes.size()]));*/ // TODO: fix for user submitted jars in Spark 1.3.0 // create the spark configuration from the graph computer configuration hadoopConfiguration.forEach(entry -> sparkConfiguration.set(entry.getKey(), entry.getValue())); // execute the vertex program and map reducers and if there is a failure, auto-close the spark context try (final JavaSparkContext sparkContext = new JavaSparkContext(sparkConfiguration)) { // add the project jars to the cluster this.loadJars(sparkContext, hadoopConfiguration); // create a message-passing friendly rdd from the input rdd final JavaPairRDD<Object, VertexWritable> graphRDD; try { graphRDD = hadoopConfiguration .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_RDD, InputFormatRDD.class, InputRDD.class) .newInstance().readGraphRDD(apacheConfiguration, sparkContext).setName("graphRDD") .cache(); } catch (final InstantiationException | IllegalAccessException e) { throw new IllegalStateException(e.getMessage(), e); } JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null; //////////////////////////////// // process the vertex program // //////////////////////////////// if (null != this.vertexProgram) { // set up the vertex program and wire up configurations memory = new SparkMemory(this.vertexProgram, this.mapReducers, sparkContext); this.vertexProgram.setup(memory); memory.broadcastMemory(sparkContext); final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration(); this.vertexProgram.storeState(vertexProgramConfiguration); ConfigurationUtils.copy(vertexProgramConfiguration, apacheConfiguration); ConfUtil.mergeApacheIntoHadoopConfiguration(vertexProgramConfiguration, hadoopConfiguration); // execute the vertex program while (true) { memory.setInTask(true); viewIncomingRDD = SparkExecutor.executeVertexProgramIteration(graphRDD, viewIncomingRDD, memory, vertexProgramConfiguration); memory.setInTask(false); if (this.vertexProgram.terminate(memory)) break; else { memory.incrIteration(); memory.broadcastMemory(sparkContext); } } // write the graph rdd using the output rdd if (!this.persist.equals(Persist.NOTHING)) { try { hadoopConfiguration .getClass(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_RDD, OutputFormatRDD.class, OutputRDD.class) .newInstance().writeGraphRDD(apacheConfiguration, graphRDD); } catch (final InstantiationException | IllegalAccessException e) { throw new IllegalStateException(e.getMessage(), e); } } } final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory); ////////////////////////////// // process the map reducers // ////////////////////////////// if (!this.mapReducers.isEmpty()) { final String[] elementComputeKeys = this.vertexProgram == null ? new String[0] : this.vertexProgram.getElementComputeKeys() .toArray(new String[this.vertexProgram.getElementComputeKeys().size()]); final JavaPairRDD<Object, VertexWritable> mapReduceGraphRDD = SparkExecutor .prepareGraphRDDForMapReduce(graphRDD, viewIncomingRDD, elementComputeKeys) .setName("mapReduceGraphRDD").cache(); for (final MapReduce mapReduce : this.mapReducers) { // execute the map reduce job final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration( apacheConfiguration); mapReduce.storeState(newApacheConfiguration); // map final JavaPairRDD mapRDD = SparkExecutor .executeMap((JavaPairRDD) mapReduceGraphRDD, mapReduce, newApacheConfiguration) .setName("mapRDD"); // combine TODO: is this really needed // reduce final JavaPairRDD reduceRDD = (mapReduce.doStage(MapReduce.Stage.REDUCE)) ? SparkExecutor .executeReduce(mapRDD, mapReduce, newApacheConfiguration).setName("reduceRDD") : null; // write the map reduce output back to disk (memory) SparkExecutor.saveMapReduceRDD(null == reduceRDD ? mapRDD : reduceRDD, mapReduce, finalMemory, hadoopConfiguration); } } // update runtime and return the newly computed graph finalMemory.setRuntime(System.currentTimeMillis() - startTime); return new DefaultComputerResult( HadoopHelper.getOutputGraph(this.hadoopGraph, this.resultGraph, this.persist), finalMemory.asImmutable()); } }); }
From source file:org.apache.tinkerpop.gremlin.process.computer.bulkloading.BulkLoaderVertexProgram.java
@Override public void loadState(final Graph graph, final Configuration config) { configuration = new BaseConfiguration(); if (config != null) { ConfigurationUtils.copy(config, configuration); }//from w ww. j a va2s . c om intermediateBatchSize = configuration.getLong(INTERMEDIATE_BATCH_SIZE_CFG_KEY, 0L); elementComputeKeys.add(VertexComputeKey.of(DEFAULT_BULK_LOADER_VERTEX_ID, true)); bulkLoader = createBulkLoader(); }