List of usage examples for org.apache.hadoop.hdfs.server.namenode NameNode format
public static void format(Configuration conf) throws IOException
From source file:com.griddynamics.jagger.storage.fs.hdfs.HDFSNamenodeServer.java
License:Open Source License
public void formatStorage() { log.info("Format Storage..."); try {/* w ww.j a v a 2s . c om*/ NameNode.format(HadoopUtils.toConfiguration(startupProperties)); } catch (IOException e) { throw new TechnicalException(e); } log.info("Storage formatted"); }
From source file:hudson.gridmaven.gridlayer.NameNodeStartTask.java
License:Open Source License
public Void call() throws IOException { File hadoopRoot = new File(hudsonRoot, "hadoop"); if (hadoopRoot.mkdirs()) format = true;//from www . j a va 2 s . c om final Configuration conf = new Configuration(); // location of the name node conf.set("fs.default.name", hdfsUrl); conf.set("dfs.http.address", "0.0.0.0:" + HTTP_PORT); // namespace node stores information here File namedir = new File(hadoopRoot, "namedir"); if (namedir.mkdirs()) format = true; conf.set("dfs.name.dir", namedir.getPath()); // dfs node stores information here File datadir = new File(hadoopRoot, "datadir"); conf.set("dfs.namenode.logging.level", "ALL"); if (datadir.mkdirs()) format = true; conf.set("dfs.data.dir", datadir.getPath()); conf.setInt("dfs.replication", 1); conf.set("dfs.safemode.extension", "1"); conf.set("dfs.block.size", "1048576"); //if(format) { // This will provide format HDFS with every start System.out.println("Formatting HDFS"); NameNode.format(conf); //} System.out.println("Starting namenode"); NameNode.createNameNode(new String[0], conf); return null; }
From source file:hudson.gridmaven.gridlayer.PluginImpl.java
License:Open Source License
/** * Namenode initialization.//w ww .jav a 2 s . c o m */ public void postInit() throws IOException, InterruptedException { masterHostName = getMasterHostName(); // File root = Hudson.getInstance().getRootDir(); File hadoopRoot = new File(root, "hadoop"); String hdfsUrl = getHdfsUrl(); if (hadoopRoot.mkdirs()) format = true; Hudson.getInstance().getActions().add(page); nameConf = new Configuration(); // location of the name node nameConf.set("fs.default.name", hdfsUrl); nameConf.set("dfs.http.address", "0.0.0.0:" + HTTP_PORT); // namespace node stores information here File namedir = new File(hadoopRoot, "namedir"); if (namedir.mkdirs()) format = true; nameConf.set("dfs.name.dir", namedir.getPath()); // dfs node stores information here File datadir = new File(hadoopRoot, "datadir"); if (datadir.mkdirs()) format = true; nameConf.set("dfs.data.dir", datadir.getPath()); nameConf.setInt("dfs.replication", 1); //nameConf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem"); if (format) { System.out.println("Formatting HDFS"); NameNode.format(nameConf); } // Hadoop adds all project files recursively to storage when job starts //String projectRoot = getWorkspace().getRemote() + File.separator + root.getRelativePath(); //hadoop.quickAdd(projectRoot); //hadoop.listFiles("/"); }
From source file:hudson.plugins.hadoop.NameNodeStartTask.java
License:Open Source License
public Void call() throws IOException { File hadoopRoot = new File(hudsonRoot, "hadoop"); if (hadoopRoot.mkdirs()) format = true;//from w w w.j av a2 s .com final Configuration conf = new Configuration(); // location of the name node conf.set("fs.default.name", hdfsUrl); conf.set("dfs.http.address", "0.0.0.0:" + HTTP_PORT); // namespace node stores information here File namedir = new File(hadoopRoot, "namedir"); if (namedir.mkdirs()) format = true; conf.set("dfs.name.dir", namedir.getPath()); // dfs node stores information here File datadir = new File(hadoopRoot, "datadir"); if (datadir.mkdirs()) format = true; conf.set("dfs.data.dir", datadir.getPath()); conf.setInt("dfs.replication", 1); if (format) { System.out.println("Formatting HDFS"); NameNode.format(conf); } System.out.println("Starting namenode"); NameNode.createNameNode(new String[0], conf); return null; }
From source file:io.fabric8.hadoop.commands.NameNodeFormat.java
License:Apache License
@Override protected void doExecute(Configuration conf) throws Exception { Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf); for (Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) { File curDir = it.next();//from w ww.jav a 2s . c o m if (!curDir.exists()) { continue; } if (!force) { System.err.print("Re-format filesystem in " + curDir + " ? (Y or N) "); System.err.flush(); if (!(System.in.read() == 'Y')) { System.err.println("Format aborted in " + curDir); return; } while (System.in.read() != '\n') ; // discard the enter-key } } NameNode.format(conf); }
From source file:io.fabric8.hadoop.hdfs.NameNodeFactory.java
License:Apache License
@Override protected NameNode doCreate(Dictionary properties) throws Exception { Configuration conf = new Configuration(); for (Enumeration e = properties.keys(); e.hasMoreElements();) { Object key = e.nextElement(); Object val = properties.get(key); conf.set(key.toString(), val.toString()); }//from w ww . ja va 2 s .c om boolean exists = false; for (File file : FSNamesystem.getNamespaceDirs(conf)) { exists |= file.exists(); } if (!exists) { NameNode.format(conf); } NameNode nameNode = NameNode.createNameNode(null, conf); return nameNode; }