List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster
@Deprecated public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException
From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java
License:Apache License
@BeforeClass public static void setup() throws Exception { configuration = getConfigurationForCluster(); cluster = new MiniDFSCluster(configuration, 1, true, null); }
From source file:com.inmobi.conduit.distcp.tools.mapred.TestUniformSizeInputFormat.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.datanode.max.xcievers", "512"); cluster = new MiniDFSCluster(conf, 1, true, null); totalFileSize = 0;//from ww w.jav a2 s . c om for (int i = 0; i < N_FILES; ++i) totalFileSize += createFile("/tmp/source/" + String.valueOf(i), SIZEOF_EACH_FILE); }
From source file:com.inmobi.conduit.distcp.tools.TestCopyListing.java
License:Apache License
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster(config, 1, true, null); }
From source file:com.inmobi.conduit.distcp.tools.TestDistCp.java
License:Apache License
@BeforeClass public static void setup() throws Exception { configuration = getConfigurationForCluster(); cluster = new MiniDFSCluster(configuration, 1, true, null); System.setProperty("org.apache.hadoop.mapred.TaskTracker", "target/tmp"); configuration.set("org.apache.hadoop.mapred.TaskTracker", "target/tmp"); System.setProperty("hadoop.log.dir", "target/tmp"); configuration.set("hadoop.log.dir", "target/tmp"); mrCluster = new MiniMRCluster(1, configuration.get("fs.default.name"), 1); Configuration mrConf = mrCluster.createJobConf(); final String mrJobTracker = mrConf.get("mapred.job.tracker"); configuration.set("mapred.job.tracker", mrJobTracker); final String mrJobTrackerAddress = mrConf.get("mapred.job.tracker.http.address"); configuration.set("mapred.job.tracker.http.address", mrJobTrackerAddress); }
From source file:com.inmobi.conduit.distcp.tools.TestFileBasedCopyListing.java
License:Apache License
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster(config, 1, true, null); fs = cluster.getFileSystem();// w w w .j a v a 2 s .co m buildExpectedValuesMap(); }
From source file:com.inmobi.conduit.distcp.tools.TestGlobbedCopyListing.java
License:Apache License
@BeforeClass public static void setup() throws Exception { cluster = new MiniDFSCluster(new Configuration(), 1, true, null); createSourceData();/*from w w w. ja v a 2s . co m*/ }
From source file:com.inmobi.messaging.consumer.util.MiniClusterUtil.java
License:Apache License
public static synchronized MiniDFSCluster getDFSCluster(Configuration conf) throws IOException { if (dfsCluster == null) { lfs = FileSystem.getLocal(conf); lfs.delete(new Path(MiniClusterUtil.getBaseDirectory().toString()), true); dfsCluster = new MiniDFSCluster(conf, 1, true, null); }// w w w. ja va2s. c o m numAccess++; return dfsCluster; }
From source file:com.linkedin.haivvreo.TestHaivvreoUtils.java
License:Apache License
@Test public void determineSchemaCanReadSchemaFromHDFS() throws IOException, HaivvreoException { // TODO: Make this an integration test, mock out hdfs for the actual unit test. String schemaString = TestAvroObjectInspectorGenerator.RECORD_SCHEMA; MiniDFSCluster miniDfs = null;//from w w w . java 2 s. c o m try { // MiniDFSCluster litters files and folders all over the place. System.setProperty("test.build.data", "target/test-intermediate-stuff-data/"); miniDfs = new MiniDFSCluster(new Configuration(), 1, true, null); miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create(new Path("/path/to/schema/schema.avsc")); out.writeBytes(schemaString); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc"; Schema schemaFromHDFS = HaivvreoUtils.getSchemaFromHDFS(onHDFS, miniDfs.getFileSystem().getConf()); Schema expectedSchema = Schema.parse(schemaString); assertEquals(expectedSchema, schemaFromHDFS); } finally { if (miniDfs != null) miniDfs.shutdown(); } }
From source file:com.shopzilla.hadoop.mapreduce.MiniMRClusterContext.java
License:Apache License
@PostConstruct public void start() { try {/* w w w. j a va 2 s.co m*/ this.hdfsRoot = new Path(localRoot.getFile().getName()); System.setProperty("hadoop.log.dir", logDirectory.getFilename()); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); miniDFSCluster = new MiniDFSCluster(configuration, 2, true, null); miniMrCluster = new MiniMRCluster(2, miniDFSCluster.getFileSystem().getUri().toString(), 1); File confFile = new File("/tmp/hadoop-site.xml"); configuration.setInt("mapred.submit.replication", 1); configuration.set("dfs.datanode.address", "0.0.0.0:0"); configuration.set("dfs.datanode.http.address", "0.0.0.0:0"); configuration.writeXml(new FileOutputStream(confFile)); System.setProperty("cluster", configuration.get("mapred.job.tracker")); System.setProperty("namenode", configuration.get("fs.default.name")); System.setProperty("junit.hadoop.conf", confFile.getPath()); pigServer = new PigServer(ExecType.MAPREDUCE, ConfigurationUtil.toProperties(configuration)); /*hiveServer = createHiveServer(); new Thread(new Runnable() { @Override public void run() { hiveServer.serve(); } }).start(); hiveClient = createHiveClient();*/ buildDirectory = new File(miniDFSCluster.getDataDirectory()).getParentFile().getParentFile() .getParentFile().getParentFile(); projectDirectory = buildDirectory.getParentFile(); importHDFSDirectory(localRoot.getFile()); } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:com.shopzilla.hadoop.testing.hdfs.DFSCluster.java
License:Apache License
@PostConstruct public DFSCluster start() { try {/* w ww . j a va2s . co m*/ miniDFSCluster = new MiniDFSCluster(configuration, numberOfDataNodes, true, null); buildDirectory = new File(System.getProperty("user.dir"), "build"); projectDirectory = buildDirectory.getParentFile(); if (localRoot != null) { importHDFSDirectory(new Path(localRoot.getName()), localRoot); } return this; } catch (final IOException ex) { throw new RuntimeException(ex); } }