List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster
@Deprecated public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException
From source file:edu.uci.ics.pregelix.example.dataload.DataLoadTest.java
License:Apache License
private void startHDFS() throws IOException { conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); FileSystem lfs = FileSystem.getLocal(new Configuration()); lfs.delete(new Path("build"), true); System.setProperty("hadoop.log.dir", "logs"); dfsCluster = new MiniDFSCluster(conf, numberOfNC, true, null); }
From source file:edu.uci.ics.pregelix.example.jobrun.RunJobTestSuite.java
License:Apache License
private void startHDFS() throws IOException { conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); FileSystem lfs = FileSystem.getLocal(new Configuration()); lfs.delete(new Path("build"), true); System.setProperty("hadoop.log.dir", "logs"); dfsCluster = new MiniDFSCluster(conf, numberOfNC, true, null); FileSystem dfs = FileSystem.get(conf); Path src = new Path(DATA_PATH); Path dest = new Path(HDFS_PATH); dfs.mkdirs(dest);//from www .j av a2 s . c om dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH2); dest = new Path(HDFS_PATH2); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH3); dest = new Path(HDFS_PATH3); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); DataOutputStream confOutput = new DataOutputStream(new FileOutputStream(new File(HADOOP_CONF_PATH))); conf.writeXml(confOutput); confOutput.flush(); confOutput.close(); }
From source file:edu.uci.ics.pregelix.example.util.TestCluster.java
License:Apache License
private void startHDFS() throws IOException { conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); FileSystem lfs = FileSystem.getLocal(new Configuration()); lfs.delete(new Path("build"), true); System.setProperty("hadoop.log.dir", "logs"); dfsCluster = new MiniDFSCluster(conf, numberOfNC, true, null); FileSystem dfs = FileSystem.get(conf); Path src = new Path(DATA_PATH); Path dest = new Path(HDFS_PATH); dfs.mkdirs(dest);//w ww . j a va 2s . co m dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH2); dest = new Path(HDFS_PATH2); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH3); dest = new Path(HDFS_PATH3); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH4); dest = new Path(HDFS_PATH4); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); src = new Path(DATA_PATH5); dest = new Path(HDFS_PATH5); dfs.mkdirs(dest); dfs.copyFromLocalFile(src, dest); DataOutputStream confOutput = new DataOutputStream(new FileOutputStream(new File(HADOOP_CONF_PATH))); conf.writeXml(confOutput); confOutput.flush(); confOutput.close(); }
From source file:hadoop.example.hdfs.ShowFileStatusTestCase.java
License:Open Source License
@BeforeClass public static void setUp() throws IOException { Configuration conf = new Configuration(); if (System.getProperty("test.build.data") == null) { System.setProperty("test.build.data", "/tmp"); cluster = new MiniDFSCluster(conf, 1, true, null); fs = cluster.getFileSystem();/*from w w w . j ava 2s . co m*/ OutputStream out = fs.create(new Path("/dir/file")); out.write("content".getBytes("UTF-8")); out.close(); } }
From source file:io.aos.hdfs.CoherencyModelTest.java
License:Apache License
@Before public void setUp() throws IOException { Configuration conf = new Configuration(); if (System.getProperty("test.build.data") == null) { System.setProperty("test.build.data", "/tmp"); }//from w w w .j av a2 s .c o m cluster = new MiniDFSCluster(conf, 1, true, null); fs = cluster.getFileSystem(); }
From source file:io.aos.hdfs.ShowFileStatusTest.java
License:Apache License
@Before public void setUp() throws IOException { Configuration conf = new Configuration(); if (System.getProperty("test.build.data") == null) { System.setProperty("test.build.data", "/tmp"); }/*from w w w .jav a 2 s . c o m*/ cluster = new MiniDFSCluster(conf, 1, true, null); fs = cluster.getFileSystem(); OutputStream out = fs.create(new Path("/dir/file")); out.write("content".getBytes("UTF-8")); out.close(); }
From source file:io.aos.t4f.hadoop.mapred.WordCountTest.java
License:Apache License
@Before public void init() throws IOException { new File(TARGET_FOLDER).mkdirs(); LOCAL_FILE.createNewFile();// w w w . ja va2 s .co m FileUtils.deleteDirectory(new File(MINI_DFS_CLUSTER_FOLDER)); new File(DATA_FOLDER).mkdirs(); new File(LOG_FOLDER).mkdirs(); configuration = new Configuration(); dfsCluster = new MiniDFSCluster(configuration, 1, true, null); System.setProperty("hadoop.log.dir", LOG_FOLDER); mrCluster = new MiniMRCluster(1, getFileSystem().getUri().toString(), 1); }
From source file:org.apache.accumulo.minicluster.MiniAccumuloCluster.java
License:Apache License
/** * @param config//from w ww .jav a 2s.c om * initial configuration */ public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException { this.config = config.initialize(); config.getConfDir().mkdirs(); config.getAccumuloDir().mkdirs(); config.getZooKeeperDir().mkdirs(); config.getLogDir().mkdirs(); config.getWalogDir().mkdirs(); config.getLibDir().mkdirs(); if (config.useMiniDFS()) { File nn = new File(config.getAccumuloDir(), "nn"); nn.mkdirs(); File dn = new File(config.getAccumuloDir(), "dn"); dn.mkdirs(); File dfs = new File(config.getAccumuloDir(), "dfs"); dfs.mkdirs(); Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1"); conf.set("dfs.support.append", "true"); conf.set("dfs.datanode.synconclose", "true"); conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission()); String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath()); miniDFS = new MiniDFSCluster(conf, 1, true, null); if (oldTestBuildData == null) System.clearProperty("test.build.data"); else System.setProperty("test.build.data", oldTestBuildData); miniDFS.waitClusterUp(); InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress(); dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort(); File coreFile = new File(config.getConfDir(), "core-site.xml"); writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet()); File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml"); writeConfig(hdfsFile, conf); Map<String, String> siteConfig = config.getSiteConfig(); siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri); siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo"); config.setSiteConfig(siteConfig); } else { dfsUri = "file://"; } File siteFile = new File(config.getConfDir(), "accumulo-site.xml"); writeConfig(siteFile, config.getSiteConfig().entrySet()); FileWriter fileWriter = new FileWriter(siteFile); fileWriter.append("<configuration>\n"); for (Entry<String, String> entry : config.getSiteConfig().entrySet()) fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue() + "</value></property>\n"); fileWriter.append("</configuration>\n"); fileWriter.close(); zooCfgFile = new File(config.getConfDir(), "zoo.cfg"); fileWriter = new FileWriter(zooCfgFile); // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths Properties zooCfg = new Properties(); zooCfg.setProperty("tickTime", "2000"); zooCfg.setProperty("initLimit", "10"); zooCfg.setProperty("syncLimit", "5"); zooCfg.setProperty("clientPort", config.getZooKeeperPort() + ""); zooCfg.setProperty("maxClientCnxns", "1000"); zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath()); zooCfg.store(fileWriter, null); fileWriter.close(); File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map"); nativeMap.mkdirs(); File testRoot = new File( new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap") .getAbsolutePath()); if (testRoot.exists()) { for (String file : testRoot.list()) { File src = new File(testRoot, file); if (src.isFile() && file.startsWith("libNativeMap")) FileUtils.copyFile(src, new File(nativeMap, file)); } } }
From source file:org.apache.accumulo.test.AccumuloDFSBase.java
License:Apache License
@BeforeClass public static void miniDfsClusterSetup() { System.setProperty("java.io.tmpdir", System.getProperty("user.dir") + "/target"); // System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.NoOpLog"); // Logger.getRootLogger().setLevel(Level.ERROR); // Put the MiniDFSCluster directory in the target directory System.setProperty("test.build.data", "target/build/test/data"); // Setup HDFS conf = new Configuration(); conf.set("hadoop.security.token.service.use_ip", "true"); conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission()); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize try {/*from ww w . ja v a2 s. c o m*/ cluster = new MiniDFSCluster(conf, 1, true, null); cluster.waitClusterUp(); // We can't assume that the hostname of "localhost" will still be "localhost" after // starting up the NameNode. We may get mapped into a FQDN via settings in /etc/hosts. HDFS_URI = cluster.getFileSystem().getUri(); } catch (IOException e) { throw new RuntimeException("Error setting up mini cluster", e); } // Set up the VFS vfs = new DefaultFileSystemManager(); try { vfs.setFilesCache(new DefaultFilesCache()); vfs.addProvider("res", new org.apache.commons.vfs2.provider.res.ResourceFileProvider()); vfs.addProvider("zip", new org.apache.commons.vfs2.provider.zip.ZipFileProvider()); vfs.addProvider("gz", new org.apache.commons.vfs2.provider.gzip.GzipFileProvider()); vfs.addProvider("ram", new org.apache.commons.vfs2.provider.ram.RamFileProvider()); vfs.addProvider("file", new org.apache.commons.vfs2.provider.local.DefaultLocalFileProvider()); vfs.addProvider("jar", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("http", new org.apache.commons.vfs2.provider.http.HttpFileProvider()); vfs.addProvider("https", new org.apache.commons.vfs2.provider.https.HttpsFileProvider()); vfs.addProvider("ftp", new org.apache.commons.vfs2.provider.ftp.FtpFileProvider()); vfs.addProvider("ftps", new org.apache.commons.vfs2.provider.ftps.FtpsFileProvider()); vfs.addProvider("war", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("par", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("ear", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("sar", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("ejb3", new org.apache.commons.vfs2.provider.jar.JarFileProvider()); vfs.addProvider("tmp", new org.apache.commons.vfs2.provider.temp.TemporaryFileProvider()); vfs.addProvider("tar", new org.apache.commons.vfs2.provider.tar.TarFileProvider()); vfs.addProvider("tbz2", new org.apache.commons.vfs2.provider.tar.TarFileProvider()); vfs.addProvider("tgz", new org.apache.commons.vfs2.provider.tar.TarFileProvider()); vfs.addProvider("bz2", new org.apache.commons.vfs2.provider.bzip2.Bzip2FileProvider()); vfs.addProvider("hdfs", new HdfsFileProvider()); vfs.addExtensionMap("jar", "jar"); vfs.addExtensionMap("zip", "zip"); vfs.addExtensionMap("gz", "gz"); vfs.addExtensionMap("tar", "tar"); vfs.addExtensionMap("tbz2", "tar"); vfs.addExtensionMap("tgz", "tar"); vfs.addExtensionMap("bz2", "bz2"); vfs.addMimeTypeMap("application/x-tar", "tar"); vfs.addMimeTypeMap("application/x-gzip", "gz"); vfs.addMimeTypeMap("application/zip", "zip"); vfs.setFileContentInfoFactory(new FileContentInfoFilenameFactory()); vfs.setFilesCache(new SoftRefFilesCache()); vfs.setReplicator(new DefaultFileReplicator(new File(System.getProperty("java.io.tmpdir"), "accumulo-vfs-cache-" + System.getProperty("user.name", "nouser")))); vfs.setCacheStrategy(CacheStrategy.ON_RESOLVE); vfs.init(); } catch (FileSystemException e) { throw new RuntimeException("Error setting up VFS", e); } }
From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java
License:Apache License
public static MiniDFSCluster createCluster(Configuration conf, String dataDirPath, int numDataNodes) throws IOException { File testDataDir = new File(dataDirPath); System.setProperty("test.build.data", testDataDir.getAbsolutePath()); return new MiniDFSCluster(conf, numDataNodes, true, null); }