Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:jadoop.HadoopGridJobWithClusterTest.java

License:Open Source License

@BeforeClass
public static void startCluster() throws Exception {

    // Get the name of this testfile.
    String testName = Thread.currentThread().getStackTrace()[1].getClassName();
    testName = testName.substring(testName.lastIndexOf('.') + 1);

    conf = new HdfsConfiguration();

    baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();

    File f = new File("./target");
    removeDirectory(f);//from ww  w. ja va  2s. c o  m

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    cluster = builder.build();

    fs = FileSystem.get(conf);
}

From source file:org.apache.ambari.view.filebrowser.FilebrowserTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    handler = createNiceMock(ViewResourceHandler.class);
    context = createNiceMock(ViewContext.class);
    httpHeaders = createNiceMock(HttpHeaders.class);
    uriInfo = createNiceMock(UriInfo.class);

    properties = new HashMap<String, String>();
    File baseDir = new File("./target/hdfs/" + "FilebrowserTest").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);// ww  w . ja  v a 2 s .co m
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    String hdfsURI = hdfsCluster.getURI() + "/";
    properties.put("webhdfs.url", hdfsURI);
    expect(context.getProperties()).andReturn(properties).anyTimes();
    expect(context.getUsername()).andReturn(System.getProperty("user.name")).anyTimes();
    replay(handler, context, httpHeaders, uriInfo);
    fileBrowserService = getService(FileBrowserService.class, handler, context);

    FileOperationService.MkdirRequest request = new FileOperationService.MkdirRequest();
    request.path = "/tmp";
    fileBrowserService.fileOps().mkdir(request);
}

From source file:org.apache.ambari.view.hive.HDFSTest.java

License:Apache License

@BeforeClass
public static void startUp() throws Exception {
    BaseHiveTest.startUp(); // super
    File hdfsDir = new File("./target/HiveTest/hdfs/").getAbsoluteFile();
    FileUtil.fullyDelete(hdfsDir);/*from   w  ww .ja  v a  2s  .  c  o m*/

    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    hdfsURI = hdfsCluster.getURI().toString();
}

From source file:org.apache.ambari.view.pig.HDFSTest.java

License:Apache License

@BeforeClass
public static void startUp() throws Exception {
    BasePigTest.startUp(); // super
    File hdfsDir = new File("./target/PigTest/hdfs/").getAbsoluteFile();
    FileUtil.fullyDelete(hdfsDir);/*from  w  ww  .  j  a  va  2  s .  c o m*/

    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    hdfsURI = hdfsCluster.getURI().toString();
    hdfsCluster.getFileSystem().mkdir(new Path("/tmp"), FsPermission.getDefault());
    hdfsCluster.getFileSystem().setPermission(new Path("/tmp"),
            new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
}

From source file:org.apache.asterix.aoya.test.YARNCluster.java

License:Apache License

/**
 * Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
 * Post instantiation, data is laoded to HDFS.
 * Called prior to running the Runtime test suite.
 *///from  w w  w .java  2s  . c om
public void setup() throws Exception {
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data");
    cleanupLocal();
    //this constructor is deprecated in hadoop 2x
    //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
    miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1);
    miniCluster.init(conf);
}

From source file:org.apache.beam.sdk.io.hdfs.HadoopFileSystemRegistrarTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    configuration = new Configuration();
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.getRoot().getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(configuration);
    hdfsCluster = builder.build();//from   w  w  w. j  a v a  2  s  .c  o m
    hdfsClusterBaseUri = new URI(configuration.get("fs.defaultFS") + "/");
}

From source file:org.apache.beam.sdk.io.hdfs.HadoopFileSystemTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    Configuration configuration = new Configuration();
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.getRoot().getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(configuration);
    hdfsCluster = builder.build();//from  w  w  w  . jav  a  2 s .c  o m
    hdfsClusterBaseUri = new URI(configuration.get("fs.defaultFS") + "/");
    fileSystem = new HadoopFileSystem(Objects.requireNonNull(hdfsClusterBaseUri).getScheme(), configuration);
}

From source file:org.apache.beam.sdk.io.hdfs.HadoopResourceIdTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    Configuration configuration = new Configuration();
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.getRoot().getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(configuration);
    hdfsCluster = builder.build();//from   w  ww. j ava  2s . com
    hdfsClusterBaseUri = new URI(configuration.get("fs.defaultFS") + "/");

    // Register HadoopFileSystem for this test.
    HadoopFileSystemOptions options = PipelineOptionsFactory.as(HadoopFileSystemOptions.class);
    options.setHdfsConfiguration(Collections.singletonList(configuration));
    FileSystems.setDefaultPipelineOptions(options);
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSAccessControlEnforcerTest.java

License:Apache License

@org.junit.BeforeClass
public static void setup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("dfs.namenode.inode.attributes.provider.class", CustomINodeAttributeProvider.class.getName());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//www.j  av  a2 s  .  c  om
    defaultFs = conf.get("fs.defaultFS");
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

License:Apache License

@org.junit.BeforeClass
public static void setup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//w ww  .  j  av a  2s. c o m
    defaultFs = conf.get("fs.defaultFS");
}