List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster
@Deprecated public MiniDFSCluster(int nameNodePort, Configuration conf, int numDataNodes, boolean format, boolean manageNameDfsDirs, boolean manageDataDfsDirs, StartupOption operation, String[] racks, String hosts[], long[] simulatedCapacities) throws IOException
From source file:com.trendmicro.hdfs.webdav.test.MiniClusterTestUtil.java
License:Apache License
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir, final String hosts[]) throws Exception { if (dir == null) { clusterTestBuildDir = setupClusterTestBuildDir(); } else {//from w w w . j a v a 2 s.c o m clusterTestBuildDir = dir; } System.setProperty(TEST_DIRECTORY_KEY, clusterTestBuildDir.toString()); System.setProperty("test.cache.data", clusterTestBuildDir.toString()); Configuration conf = getConfiguration(); dfsCluster = new MiniDFSCluster(0, conf, servers, true, true, true, null, null, hosts, null); FileSystem fs = dfsCluster.getFileSystem(); conf.set("fs.defaultFS", fs.getUri().toString()); conf.set("fs.default.name", fs.getUri().toString()); return dfsCluster; }
From source file:org.apache.commons.vfs2.provider.hdfs.test.HdfsFileProviderTest.java
License:Apache License
@BeforeClass public static void setUp() throws Exception { Logger.getRootLogger().setLevel(Level.ERROR); // Put the MiniDFSCluster directory in the target directory File data = new File("target/test/hdfstestdata").getAbsoluteFile(); data.mkdirs();/*w ww .j a v a 2 s. com*/ System.setProperty("test.build.data", data.toString()); FileUtils.cleanDirectory(data); // Setup HDFS conf = new Configuration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, HDFS_URI); conf.set("hadoop.security.token.service.use_ip", "true"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize setUmask(conf); cluster = new MiniDFSCluster(PORT, conf, 1, true, true, true, null, null, null, null); cluster.waitActive(); // Set up the VFS manager = new DefaultFileSystemManager(); manager.addProvider("hdfs", new HdfsFileProvider()); manager.init(); hdfs = cluster.getFileSystem(); }
From source file:org.apache.hama.HamaClusterTestCase.java
License:Apache License
@Override protected void setUp() throws Exception { try {//from w ww . j a v a2s. co m if (this.startDfs) { // This spews a bunch of warnings about missing scheme. TODO: fix. this.dfsCluster = new MiniDFSCluster(0, this.conf, 2, true, true, true, null, null, null, null); // mangle the conf so that the fs parameter points to the minidfs we // just started up FileSystem filesystem = dfsCluster.getFileSystem(); conf.set("fs.defaultFS", filesystem.getUri().toString()); Path parentdir = filesystem.getHomeDirectory(); filesystem.mkdirs(parentdir); } // do the super setup now. if we had done it first, then we would have // gotten our conf all mangled and a local fs started up. super.setUp(); // start the instance hamaClusterSetup(); } catch (Exception e) { if (zooKeeperCluster != null) { zooKeeperCluster.shutdown(); } if (dfsCluster != null) { shutdownDfs(dfsCluster); } throw e; } }
From source file:org.lilyproject.hadooptestfw.fork.HBaseTestingUtility.java
License:Apache License
/** * Start a minidfscluster./* w w w .j a va2 s . c om*/ * Can only create one. * * @param servers How many DNs to start. * @param hosts hostnames DNs to run on. * @return The mini dfs cluster created. * @see {@link #shutdownMiniDFSCluster()} */ public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception { // Check that there is not already a cluster running isRunningCluster(); // Initialize the local directory used by the MiniDFS if (clusterTestDir == null) { setupClusterTestDir(); } // We have to set this property as it is used by MiniCluster System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString()); // Some tests also do this: // System.getProperty("test.cache.data", "build/test/cache"); // It's also deprecated System.setProperty("test.cache.data", this.clusterTestDir.toString()); // Ok, now we can start // Lily change: first argument changed from 0 to 8020 // Lily change: let the formatting of NameNode and DataNodes depend on whether the dir is empty boolean format = this.clusterTestDir.list().length == 0; this.dfsCluster = new MiniDFSCluster(8020 /* Lily change */, this.conf, servers, format /* Lily change */, true, true, null, null, hosts, null); // Set this just-started cluster as our filesystem. FileSystem fs = this.dfsCluster.getFileSystem(); this.conf.set("fs.defaultFS", fs.getUri().toString()); // Do old style too just to be safe. // Lily change: commented this out to avoid warnings // this.conf.set("fs.default.name", fs.getUri().toString()); // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); return this.dfsCluster; }
From source file:org.lilyproject.testfw.HadoopLauncher.java
License:Apache License
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir) throws Exception { // This does the following to home the minidfscluster // base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); // Some tests also do this: // System.getProperty("test.cache.data", "build/test/cache"); if (dir == null) this.clusterTestBuildDir = setupClusterTestBuildDir(); else/*from w ww . jav a 2 s . c om*/ this.clusterTestBuildDir = dir; System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString()); System.setProperty("test.cache.data", this.clusterTestBuildDir.toString()); this.dfsCluster = new MiniDFSCluster(9000, this.conf, servers, true, true, true, null, null, null, null); return this.dfsCluster; }
From source file:tajo.TajoTestingUtility.java
License:Apache License
/** * Start a minidfscluster.//ww w .ja v a2s . co m * Can only create one. * @param servers How many DNs to start. * @param dir Where to home your dfs cluster. * @param hosts hostnames DNs to run on. * @throws Exception * @see {@link #shutdownMiniDFSCluster()} * @return The mini dfs cluster created. * @throws IOException */ public MiniDFSCluster startMiniDFSCluster(int servers, final File dir, final String hosts[]) throws IOException { // This does the following to home the minidfscluster // base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); // Some tests also do this: // System.getProperty("test.cache.data", "build/test/cache"); if (dir == null) { this.clusterTestBuildDir = setupClusterTestBuildDir(); } else { this.clusterTestBuildDir = dir; } System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString()); System.setProperty("test.cache.data", this.clusterTestBuildDir.toString()); this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, true, null, null, hosts, null); // Set this just-started cluser as our filesystem. this.defaultFS = this.dfsCluster.getFileSystem(); this.conf.set("fs.defaultFS", defaultFS.getUri().toString()); // Do old style too just to be safe. this.conf.set("fs.default.name", defaultFS.getUri().toString()); return this.dfsCluster; }