Java tutorial
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jena.tdbloader3; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.MiniMRCluster; public abstract class AbstractMiniMRClusterTest { protected static MiniDFSCluster dfsCluster; protected static MiniMRCluster mrCluster; protected static FileSystem fs; protected static final int numNodes = 2; protected static final String config = "target/hadoop-localhost-test.xml"; public static void startCluster() throws IOException { FileUtils.deleteDirectory(new File("build/test")); Configuration configuration = new Configuration(); // this is to avoid problems with permissions in the ./build directory used by tests... none of these attempts works. // configuration.setBoolean("dfs.permissions", false) ; // configuration.set("dfs.datanode.data.dir.perm", "755") ; // "dfs.umask=022" System.setProperty("hadoop.log.dir", "build/test/logs"); dfsCluster = new MiniDFSCluster(configuration, numNodes, true, null); mrCluster = new MiniMRCluster(numNodes, dfsCluster.getFileSystem().getUri().toString(), 1); // Generate Hadoop configuration ByteArrayOutputStream baos = new ByteArrayOutputStream(); mrCluster.createJobConf().writeXml(baos); String cfg = baos.toString(); cfg = cfg.replace("<name>dfs.permissions</name><value>true</value>", "<name>dfs.permissions</name><value>false</value>"); FileOutputStream out = new FileOutputStream(config); out.write(cfg.getBytes()); out.close(); // Copy testing data onto (H)DFS fs = dfsCluster.getFileSystem(); fs.copyFromLocalFile(new Path("src/test/resources"), new Path("src/test/resources")); } public static void stopCluster() throws IOException { if (dfsCluster != null) { dfsCluster.shutdown(); dfsCluster = null; } if (mrCluster != null) { mrCluster.shutdown(); mrCluster = null; } } }