List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster getNameNodePort
public int getNameNodePort()
From source file:com.github.cbismuth.spark.utils.cluster.HadoopFactory.java
License:Open Source License
public MiniDFSCluster cluster(final Configuration config) throws IOException { final MiniDFSCluster cluster = new Builder(config).build(); config.set("fs.defaultFS", format("hdfs://localhost:%d", cluster.getNameNodePort())); return cluster; }
From source file:com.github.stephenc.javaisotools.loopfs.iso9660.Iso9660FileSystemTest.java
License:Open Source License
@Test public void hdfsSmokes() throws Exception { assumeTrue(isNotWindows());//from w w w.j a v a 2s. c om //Creating a Mini DFS Cluster as the default File System does not return a Seekable Stream MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new Configuration()); MiniDFSCluster hdfsCluster = builder.build(); String hdfsTestFile = "hdfs://127.0.0.1:" + hdfsCluster.getNameNodePort() + "/test/" + filePath; hdfsCluster.getFileSystem().copyFromLocalFile(new Path(filePath), new Path(hdfsTestFile)); InputStream is = hdfsCluster.getFileSystem().open(new Path(hdfsTestFile)); Iso9660FileSystem image = new Iso9660FileSystem(new SeekableInputFileHadoop(is), true); this.runCheck(image); hdfsCluster.shutdown(); }
From source file:edu.uci.ics.asterix.aoya.test.AsterixYARNInstanceUtil.java
License:Apache License
public YarnConfiguration setUp() throws Exception { File asterixProjectDir = new File(System.getProperty("user.dir")); File installerTargetDir = new File(asterixProjectDir, "target"); String[] dirsInTarget = installerTargetDir.list(new FilenameFilter() { @Override/* ww w. j av a 2s .c o m*/ public boolean accept(File dir, String name) { return new File(dir, name).isDirectory() && name.startsWith("asterix-yarn") && name.endsWith("binary-assembly"); } }); if (dirsInTarget.length != 1) { throw new IllegalStateException("Could not find binary to run YARN integration test with"); } aoyaHome = installerTargetDir.getAbsolutePath() + File.separator + dirsInTarget[0]; File asterixServerInstallerDir = new File(aoyaHome, "asterix"); String[] zipsInFolder = asterixServerInstallerDir.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith("asterix-server") && name.endsWith("binary-assembly.zip"); } }); if (zipsInFolder.length != 1) { throw new IllegalStateException("Could not find server binary to run YARN integration test with"); } aoyaServerPath = asterixServerInstallerDir.getAbsolutePath() + File.separator + zipsInFolder[0]; configPath = aoyaHome + File.separator + "configs" + File.separator + "local.xml"; parameterPath = aoyaHome + File.separator + "conf" + File.separator + "base-asterix-configuration.xml"; YARNCluster.getInstance().setup(); appConf = new YarnConfiguration(); File baseDir = new File("./target/hdfs/").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); appConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(appConf); MiniDFSCluster hdfsCluster = builder.build(); miniCluster = YARNCluster.getInstance().getCluster(); appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort()); miniCluster.init(appConf); Cluster defaultConfig = Utils.parseYarnClusterConfig(configPath); for (Node n : defaultConfig.getNode()) { n.setClusterIp(MiniYARNCluster.getHostname()); } defaultConfig.getMasterNode().setClusterIp(MiniYARNCluster.getHostname()); configPath = "target" + File.separator + "localized-aoya-config.xml"; Utils.writeYarnClusterConfig(configPath, defaultConfig); miniCluster.start(); appConf = new YarnConfiguration(miniCluster.getConfig()); appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort()); //TODO:why must I do this!? what is not being passed properly via environment variables??? appConf.writeXml(new FileOutputStream("target" + File.separator + "yarn-site.xml")); //once the cluster is created, you can get its configuration //with the binding details to the cluster added from the minicluster FileSystem fs = FileSystem.get(appConf); Path instanceState = new Path(fs.getHomeDirectory(), AsterixYARNClient.CONF_DIR_REL + INSTANCE_NAME + "/"); fs.delete(instanceState, true); Assert.assertFalse(fs.exists(instanceState)); File outdir = new File(PATH_ACTUAL); outdir.mkdirs(); return appConf; }
From source file:hdfs.MiniHDFS.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length != 1) { throw new IllegalArgumentException("MiniHDFS <baseDirectory>"); }/*w ww .j a v a2 s . c om*/ // configure Paths Path baseDir = Paths.get(args[0]); // hadoop-home/, so logs will not complain if (System.getenv("HADOOP_HOME") == null) { Path hadoopHome = baseDir.resolve("hadoop-home"); Files.createDirectories(hadoopHome); System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString()); } // hdfs-data/, where any data is going Path hdfsHome = baseDir.resolve("hdfs-data"); // start cluster Configuration cfg = new Configuration(); cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString()); // lower default permission: TODO: needed? cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766"); // TODO: remove hardcoded port! MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build(); // write our PID file Path tmp = Files.createTempFile(baseDir, null, null); String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8)); Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); // write our port file tmp = Files.createTempFile(baseDir, null, null); Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8)); Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); }