Example usage for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem.

Prototype

public DistributedFileSystem getFileSystem() throws IOException 

Source Link

Document

Get a client handle to the DFS cluster with a single namenode.

Usage

From source file:PerformanceEvaluation.java

License:Apache License

private void runTest(final Class<? extends Test> cmd)
        throws IOException, InterruptedException, ClassNotFoundException {
    MiniHBaseCluster hbaseMiniCluster = null;
    MiniDFSCluster dfsCluster = null;
    MiniZooKeeperCluster zooKeeperCluster = null;
    if (this.miniCluster) {
        dfsCluster = new MiniDFSCluster(conf, 2, true, (String[]) null);
        zooKeeperCluster = new MiniZooKeeperCluster();
        int zooKeeperPort = zooKeeperCluster.startup(new File(System.getProperty("java.io.tmpdir")));

        // mangle the conf so that the fs parameter points to the minidfs we
        // just started up
        FileSystem fs = dfsCluster.getFileSystem();
        conf.set("fs.default.name", fs.getUri().toString());
        conf.set("hbase.zookeeper.property.clientPort", Integer.toString(zooKeeperPort));
        Path parentdir = fs.getHomeDirectory();
        conf.set(HConstants.HBASE_DIR, parentdir.toString());
        fs.mkdirs(parentdir);//ww w .j av  a2s.com
        FSUtils.setVersion(fs, parentdir);
        hbaseMiniCluster = new MiniHBaseCluster(this.conf, N);
    }

    try {
        if (N == 1) {
            // If there is only one client and one HRegionServer, we assume nothing
            // has been set up at all.
            runNIsOne(cmd);
        } else {
            // Else, run
            runNIsMoreThanOne(cmd);
        }
    } finally {
        if (this.miniCluster) {
            if (hbaseMiniCluster != null)
                hbaseMiniCluster.shutdown();
            if (zooKeeperCluster != null)
                zooKeeperCluster.shutdown();
            HBaseTestCase.shutdownDfs(dfsCluster);
        }
    }
}

From source file:a.TestConcatExample.java

License:Apache License

@Test
public void concatIsPermissive() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    conf.set("dfs.namenode.fs-limits.min-block-size", "1000"); // Allow tiny blocks for the test
    try {/*from w w w .  java2 s. c  o  m*/
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
        final FileSystem dfs = cluster.getFileSystem();

        final FileSystem fs = dfs; // WebHDFS has a bug in getLocatedBlocks

        Path root = new Path("/dir");
        fs.mkdirs(root);

        short origRep = 3;
        short secondRep = (short) (origRep - 1);
        Path f1 = new Path("/dir/f1");
        long size1 = writeFile(fs, f1, /* blocksize */ 4096, origRep, 5);
        long f1NumBlocks = fs.getFileBlockLocations(f1, 0, size1).length;
        assertEquals(5, f1NumBlocks);

        Path f2 = new Path("/dir/f2");
        long size2 = writeFile(fs, f2, /* blocksize (must divide 512 for checksum) */ 4096 - 512, secondRep, 4);
        long f2NumBlocks = fs.getFileBlockLocations(f2, 0, size2).length;
        assertEquals(5, f2NumBlocks);

        fs.concat(f1, new Path[] { f2 });
        FileStatus[] fileStatuses = fs.listStatus(root);

        // Only one file should remain
        assertEquals(1, fileStatuses.length);
        FileStatus fileStatus = fileStatuses[0];

        // And it should be named after the first file
        assertEquals("f1", fileStatus.getPath().getName());

        // The entire file takes the replication of the first argument
        assertEquals(origRep, fileStatus.getReplication());

        // As expected, the new concated file is the length of both the previous files
        assertEquals(size1 + size2, fileStatus.getLen());

        // And we should have the same number of blocks
        assertEquals(f1NumBlocks + f2NumBlocks,
                fs.getFileBlockLocations(fileStatus.getPath(), 0, size1 + size2).length);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }

    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasic() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;//w  w w  .  j av  a 2s .c o  m
    try {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(5));
        AtomicBoolean success = new AtomicBoolean(false);
        thread = new Thread(new Runnable() {
            @Override
            public void run() {
                boolean beginTest = true;
                while (true) {
                    try {
                        try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
                            try (FSDataInputStream inputStream = fileSystem.open(path)) {
                                IOUtils.copy(inputStream, output);
                            }
                            if (beginTest) {
                                hdfsCluster.startDataNodes(conf, 1, true, null, null);
                                hdfsCluster.stopDataNode(0);
                                beginTest = false;
                            } else {
                                LOG.info("Missing block restored.");
                                success.set(true);
                                return;
                            }
                        }
                    } catch (IOException e) {
                        LOG.error(e.getMessage());
                    }
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        return;
                    }
                }
            }
        });
        thread.start();
        thread.join(TimeUnit.MINUTES.toMillis(2));
        if (!success.get()) {
            fail();
        }
    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasicFullRestoreFromShutdown() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);
    {/*from   w w  w  .ja  va  2  s . co  m*/
        MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
        try {
            DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
            for (int i = 0; i < 5; i++) {
                Path path = new Path("/testing." + i + ".txt");
                System.out.println("Adding path " + path);
                writeFile(fileSystem, path);
            }

            Thread.sleep(TimeUnit.SECONDS.toMillis(3));

            hdfsCluster.stopDataNode(0);

            // Remove data
            FileUtils.deleteDirectory(new File(hdfsDir, "data"));

            hdfsCluster.startDataNodes(conf, 1, true, null, null);

            NameNode nameNode = hdfsCluster.getNameNode();
            for (int i = 0; i < 90; i++) {
                if (!nameNode.isInSafeMode()) {
                    return;
                }
                System.out.println(nameNode.getState() + " " + nameNode.isInSafeMode());
                Thread.sleep(1000);
            }
            fail();
        } finally {
            hdfsCluster.shutdown();
            destroyBackupStore(conf);
        }
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenAllBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;//from   ww w.j av a 2  s. c  o  m
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyBackupStoreBlocks(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();
        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenSomeBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;// w w w .j ava 2 s . co  m
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        writeFile(fileSystem, new Path("/testing1.txt"));
        writeFile(fileSystem, new Path("/testing2.txt"));
        writeFile(fileSystem, new Path("/testing3.txt"));
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyOneBackupStoreBlock(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();

        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        for (ExtendedBlock eb : original) {
            System.out.println("ORIGINAL=" + eb);
        }

        for (ExtendedBlock eb : current) {
            System.out.println("CURRENT=" + eb);
        }

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:co.cask.cdap.common.logging.LogCollectorTest.java

License:Apache License

@Test
public void testCollectionDFS() throws IOException {

    MiniDFSCluster dfsCluster = null;

    try {/*from  w ww.ja v a 2  s. c o  m*/
        File dfsPath = tempFolder.newFolder();
        System.setProperty("test.build.data", dfsPath.toString());
        System.setProperty("test.cache.data", dfsPath.toString());

        System.err.println("Starting up Mini HDFS cluster...");
        Configuration hConf = new Configuration();
        CConfiguration conf = CConfiguration.create();
        //conf.setInt("dfs.block.size", 1024*1024);
        dfsCluster = new MiniDFSCluster.Builder(hConf).nameNodePort(0).numDataNodes(1).format(true)
                .manageDataDfsDirs(true).manageNameDfsDirs(true).build();
        dfsCluster.waitClusterUp();
        System.err.println("Mini HDFS is started.");

        // Add HDFS info to conf
        hConf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
        // set a root directory for log collection
        conf.set(Constants.CFG_LOG_COLLECTION_ROOT, "/logtemp");

        testCollection(conf, hConf);
    } finally {
        if (dfsCluster != null) {
            System.err.println("Shutting down Mini HDFS cluster...");
            dfsCluster.shutdown();
            System.err.println("Mini HDFS is shut down.");
        }
    }
}

From source file:co.nubetech.hiho.mapreduce.lib.input.TestFileStreamInputFormat.java

License:Apache License

@Test
public void testNumInputs() throws Exception {
    Configuration conf = new Configuration();
    JobConf job = new JobConf(conf);
    MiniDFSCluster dfs = newDFSCluster(job);
    FileSystem fs = dfs.getFileSystem();
    System.out.println("FileSystem " + fs.getUri());
    Path inputDir = new Path("/foo/");
    final int numFiles = 10;
    String fileNameBase = "part-0000";

}

From source file:com.github.cbismuth.spark.utils.cluster.HadoopFactory.java

License:Open Source License

public FileSystem fileSystem(final MiniDFSCluster cluster) throws IOException {
    return cluster.getFileSystem();
}

From source file:com.github.stephenc.javaisotools.loopfs.iso9660.Iso9660FileSystemTest.java

License:Open Source License

@Test
public void hdfsSmokes() throws Exception {
    assumeTrue(isNotWindows());/* w  ww .  ja  v  a  2 s .c o m*/
    //Creating a Mini DFS Cluster as the default File System does not return a Seekable Stream
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new Configuration());
    MiniDFSCluster hdfsCluster = builder.build();
    String hdfsTestFile = "hdfs://127.0.0.1:" + hdfsCluster.getNameNodePort() + "/test/" + filePath;
    hdfsCluster.getFileSystem().copyFromLocalFile(new Path(filePath), new Path(hdfsTestFile));
    InputStream is = hdfsCluster.getFileSystem().open(new Path(hdfsTestFile));
    Iso9660FileSystem image = new Iso9660FileSystem(new SeekableInputFileHadoop(is), true);
    this.runCheck(image);
    hdfsCluster.shutdown();
}