Example usage for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster getFileSystem.

Prototype

public DistributedFileSystem getFileSystem() throws IOException 

Source Link

Document

Get a client handle to the DFS cluster with a single namenode.

Usage

From source file:io.hops.transaction.lock.TestInodeLock.java

License:Apache License

@Test
public void testInodeLockWithWrongPath() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {//from  w  ww.  j  a v  a  2 s  .  co  m
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final MiniDFSCluster clusterFinal = cluster;
        final DistributedFileSystem hdfs = cluster.getFileSystem();

        hdfs.mkdirs(new Path("/tmp"));
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);

        new HopsTransactionalRequestHandler(HDFSOperationType.TEST) {
            @Override
            public void acquireLock(TransactionLocks locks) throws IOException {
                LockFactory lf = LockFactory.getInstance();
                INodeLock il = lf.getINodeLock(TransactionLockTypes.INodeLockType.READ_COMMITTED,
                        TransactionLockTypes.INodeResolveType.PATH, new String[] { "/tmp/f1", "/tmp/f2" })
                        .setNameNodeID(clusterFinal.getNameNode().getId())
                        .setActiveNameNodes(clusterFinal.getNameNode().getActiveNameNodes().getActiveNodes())
                        .skipReadingQuotaAttr(true);
                locks.add(il);

            }

            @Override
            public Object performTask() throws IOException {
                return null;
            }
        }.handle();

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.accumulo.core.conf.CredentialProviderFactoryShimTest.java

License:Apache License

@Test
public void extractFromHdfs() throws Exception {
    File target = new File(System.getProperty("user.dir"), "target");
    String prevValue = System.setProperty("test.build.data",
            new File(target, this.getClass().getName() + "_minidfs").toString());
    MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new Configuration()).build();
    try {/* w  w  w.ja  va2 s.  co  m*/
        if (null != prevValue) {
            System.setProperty("test.build.data", prevValue);
        } else {
            System.clearProperty("test.build.data");
        }

        // One namenode, One configuration
        Configuration dfsConfiguration = dfsCluster.getConfiguration(0);
        Path destPath = new Path("/accumulo.jceks");
        FileSystem dfs = dfsCluster.getFileSystem();
        // Put the populated keystore in hdfs
        dfs.copyFromLocalFile(new Path(populatedKeyStore.toURI()), destPath);

        Configuration cpConf = CredentialProviderFactoryShim.getConfiguration(dfsConfiguration,
                "jceks://hdfs/accumulo.jceks");

        // The values in the keystore
        Map<String, String> expectations = new HashMap<>();
        expectations.put("key1", "value1");
        expectations.put("key2", "value2");

        checkCredentialProviders(cpConf, expectations);
    } finally {
        dfsCluster.shutdown();
    }
}

From source file:org.apache.blur.HdfsMiniClusterUtil.java

License:Apache License

public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {//  ww  w . j a v  a2  s .  co  m
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]");
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]");
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}

From source file:org.apache.flume.sink.kite.TestDatasetSink.java

License:Apache License

@Test
public void testMiniClusterStore() throws EventDeliveryException, IOException {
    // setup a minicluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()).build();
    DatasetRepository hdfsRepo = null;/* w w w.  j a  va  2 s . c om*/
    try {
        FileSystem dfs = cluster.getFileSystem();
        Configuration conf = dfs.getConf();
        String repoURI = "repo:" + conf.get("fs.defaultFS") + "/tmp/repo";

        // create a repository and dataset in HDFS
        hdfsRepo = DatasetRepositories.open(repoURI);
        hdfsRepo.create(DATASET_NAME, DESCRIPTOR);

        // update the config to use the HDFS repository
        config.put(DatasetSinkConstants.CONFIG_KITE_REPO_URI, repoURI);

        DatasetSink sink = sink(in, config);

        // run the sink
        sink.start();
        sink.process();
        sink.stop();

        Assert.assertEquals(Sets.newHashSet(expected), read(hdfsRepo.<GenericData.Record>load(DATASET_NAME)));
        Assert.assertEquals("Should have committed", 0, remaining(in));

    } finally {
        if (hdfsRepo != null && hdfsRepo.exists(DATASET_NAME)) {
            hdfsRepo.delete(DATASET_NAME);
        }
        cluster.shutdown();
    }
}

From source file:org.apache.hama.HamaTestCase.java

License:Apache License

/**
 * Common method to close down a MiniDFSCluster and the associated file system
 * //from  w  w w .  j a  v a  2 s. c o m
 * @param cluster
 */
public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }
    }
}

From source file:org.apache.hive.service.server.TestHS2ClearDanglingScratchDir.java

License:Apache License

@Test
public void testScratchDirCleared() throws Exception {
    MiniDFSCluster m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).build();
    HiveConf conf = new HiveConf();
    conf.addResource(m_dfs.getConfiguration(0));
    if (Shell.WINDOWS) {
        WindowsPathUtil.convertPathsFromWindowsToHdfs(conf);
    }/*  w  w  w  .ja v a2s . c  o  m*/
    conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true");
    conf.set(HiveConf.ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR.toString(), "true");

    Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR));
    m_dfs.getFileSystem().mkdirs(scratchDir);
    m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777"));

    // Fake two live session
    SessionState.start(conf);
    conf.setVar(HiveConf.ConfVars.HIVESESSIONID, UUID.randomUUID().toString());
    SessionState.start(conf);

    // Fake dead session
    Path fakeSessionPath = new Path(new Path(scratchDir, Utils.getUGI().getShortUserName()),
            UUID.randomUUID().toString());
    m_dfs.getFileSystem().mkdirs(fakeSessionPath);
    m_dfs.getFileSystem().create(new Path(fakeSessionPath, "inuse.lck")).close();

    FileStatus[] scratchDirs = m_dfs.getFileSystem()
            .listStatus(new Path(scratchDir, Utils.getUGI().getShortUserName()));

    Assert.assertEquals(scratchDirs.length, 3);

    HiveServer2.scheduleClearDanglingScratchDir(conf, 0);

    // Check dead session get cleared
    long start = System.currentTimeMillis();
    long end;
    do {
        Thread.sleep(200);
        end = System.currentTimeMillis();
        if (end - start > 5000) {
            Assert.fail("timeout, scratch dir has not been cleared");
        }
        scratchDirs = m_dfs.getFileSystem().listStatus(new Path(scratchDir, Utils.getUGI().getShortUserName()));
    } while (scratchDirs.length != 2);
}

From source file:org.apache.sentry.binding.solr.TestSolrAuthzBinding.java

License:Apache License

/**
 * Test that when the resource is put on  HDFS and the scheme of the resource is not set,
 * the resouce can be found if fs.defaultFS is specified
 *//* w  ww .  ja  v  a 2s.  c om*/
@Test
public void testResourceWithSchemeNotSet() throws Exception {
    SolrAuthzConf solrAuthzConf = new SolrAuthzConf(
            Collections.singletonList(Resources.getResource("sentry-site.xml")));
    setUsableAuthzConf(solrAuthzConf);

    MiniDFSCluster dfsCluster = HdfsTestUtil.setupClass(new File(Files.createTempDir(),
            TestSolrAuthzBinding.class.getName() + "_" + System.currentTimeMillis()).getAbsolutePath());
    String resourceOnHDFS = "/hdfs" + File.separator + UUID.randomUUID() + File.separator
            + "test-authz-provider.ini";
    SolrAuthzBinding binding = null;
    try {
        // Copy resource to HDFSS
        dfsCluster.getFileSystem().copyFromLocalFile(false, new Path(baseDir.getPath(), RESOURCE_PATH),
                new Path(resourceOnHDFS));
        solrAuthzConf.set(AuthzConfVars.AUTHZ_PROVIDER_RESOURCE.getVar(), resourceOnHDFS);
        // set HDFS as the defaultFS so the resource will be found
        solrAuthzConf.set("fs.defaultFS", dfsCluster.getFileSystem().getConf().get("fs.defaultFS"));
        binding = new SolrAuthzBinding(solrAuthzConf);
    } finally {
        if (binding != null) {
            binding.close();
        }
        if (dfsCluster != null) {
            HdfsTestUtil.teardownClass(dfsCluster);
        }
    }
}

From source file:org.apache.tajo.storage.TestByteBufLineReader.java

License:Apache License

@Test
public void testReaderWithDFS() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();//  w  w w  . j a v  a  2s.  com

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    Path tablePath = new Path("/testReaderWithDFS");
    Path filePath = new Path(tablePath, "data.dat");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(filePath, true);
        out.write(LINE.getBytes(Charset.defaultCharset()));
        out.write('\n');
        out.close();

        assertTrue(fs.exists(filePath));
        FSDataInputStream inputStream = fs.open(filePath);
        assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable);

        ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream));
        assertEquals(LINE, lineReader.readLine());
        lineReader.seek(0);
        assertEquals(LINE, lineReader.readLine());
        assertNull(lineReader.readLine());

        lineReader.close();
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();//  w  w  w  .  j av a  2  s  .com
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*from   w  w w . j  av a 2s  .c o m*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}