Example usage for org.apache.hadoop.hdfs DistributedFileSystem mkdirs

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:alluxio.underfs.hdfs.LocalMiniDFSCluster.java

License:Apache License

/**
 * Tests the local minidfscluster only./*from w ww  .  ja v a 2s .c o  m*/
 */
public static void main(String[] args) throws Exception {
    LocalMiniDFSCluster cluster = null;
    try {
        cluster = new LocalMiniDFSCluster("/tmp/dfs", 1, 54321);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());
        Thread.sleep(10);
        DistributedFileSystem dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));
        mkdirs(cluster.getUnderFilesystemAddress() + "/1/2");
        FileStatus[] fs = dfs.listStatus(new Path(AlluxioURI.SEPARATOR));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();

        cluster = new LocalMiniDFSCluster("/tmp/dfs", 3);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());

        dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));

        UnderFileSystemUtils
                .touch(cluster.getUnderFilesystemAddress() + "/1" + "/_format_" + System.currentTimeMillis());
        fs = dfs.listStatus(new Path("/1"));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();
    } finally {
        if (cluster != null && cluster.isStarted()) {
            cluster.shutdown();
        }
    }
}

From source file:com.trace.hadoop.TestDFSRename.java

License:Apache License

public void testRenameWithQuota() throws Exception {
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    Path src1 = new Path(dir, "testRenameWithQuota/srcdir/src1");
    Path src2 = new Path(dir, "testRenameWithQuota/srcdir/src2");
    Path dst1 = new Path(dir, "testRenameWithQuota/dstdir/dst1");
    Path dst2 = new Path(dir, "testRenameWithQuota/dstdir/dst2");
    createFile(fs, src1);/*  w  w  w.  java 2s. c  o m*/
    createFile(fs, src2);
    fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_DONT_SET);
    fs.mkdirs(dst1.getParent());
    fs.setQuota(dst1.getParent(), FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_DONT_SET);

    // Test1: src does not exceed quota and dst has quota to accommodate rename
    rename(src1, dst1, true, false);

    // Test2: src does not exceed quota and dst has *no* quota to accommodate
    // rename
    fs.setQuota(dst1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
    rename(src2, dst2, false, true);

    // Test3: src exceeds quota and dst has *no* quota to accommodate rename
    fs.setQuota(src1.getParent(), 1, FSConstants.QUOTA_DONT_SET);
    rename(dst1, src1, false, true);
}

From source file:com.trace.hadoop.TestDFSRename.java

License:Apache License

/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 *//* w w  w .ja va  2  s  .  c  o  m*/
public void testEditsLog() throws Exception {
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    Path src1 = new Path(dir, "testEditsLog/srcdir/src1");
    Path dst1 = new Path(dir, "testEditsLog/dstdir/dst1");
    createFile(fs, src1);
    fs.mkdirs(dst1.getParent());
    createFile(fs, dst1);

    // Set quota so that dst1 parent cannot allow under it new files/directories 
    fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
    // Free up quota for a subsequent rename
    fs.delete(dst1, true);
    rename(src1, dst1, true, false);

    // Restart the cluster and ensure the above operations can be
    // loaded from the edits log
    restartCluster();
    fs = (DistributedFileSystem) cluster.getFileSystem();
    assertFalse(fs.exists(src1)); // ensure src1 is already renamed
    assertTrue(fs.exists(dst1)); // ensure rename dst exists
}

From source file:io.hops.transaction.lock.TestInodeLock.java

License:Apache License

@Test
public void testInodeLockWithWrongPath() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;//w  w w.j  av a  2 s.c  o  m
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final MiniDFSCluster clusterFinal = cluster;
        final DistributedFileSystem hdfs = cluster.getFileSystem();

        hdfs.mkdirs(new Path("/tmp"));
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);

        new HopsTransactionalRequestHandler(HDFSOperationType.TEST) {
            @Override
            public void acquireLock(TransactionLocks locks) throws IOException {
                LockFactory lf = LockFactory.getInstance();
                INodeLock il = lf.getINodeLock(TransactionLockTypes.INodeLockType.READ_COMMITTED,
                        TransactionLockTypes.INodeResolveType.PATH, new String[] { "/tmp/f1", "/tmp/f2" })
                        .setNameNodeID(clusterFinal.getNameNode().getId())
                        .setActiveNameNodes(clusterFinal.getNameNode().getActiveNameNodes().getActiveNodes())
                        .skipReadingQuotaAttr(true);
                locks.add(il);

            }

            @Override
            public Object performTask() throws IOException {
                return null;
            }
        }.handle();

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.dkpro.bigdata.io.hadoop.HdfsResourceLoaderLocatorTest.java

License:Apache License

@Test
public void testDKProResourceLoading() throws Exception {
    String hdfsURI = "hdfs://localhost:" + hdfsCluster.getNameNodePort() + "/";

    DistributedFileSystem fs = hdfsCluster.getFileSystem();
    fs.mkdirs(new Path("/user/test"));
    fs.copyFromLocalFile(new Path("src/test/resources/hdfsLocator/one.data"), new Path("/user/test/"));
    fs.copyFromLocalFile(new Path("src/test/resources/hdfsLocator/two.data"), new Path("/user/test/"));

    ExternalResourceDescription hdfsResource = ExternalResourceFactory.createExternalResourceDescription(
            HdfsResourceLoaderLocator.class, HdfsResourceLoaderLocator.PARAM_FILESYSTEM, hdfsURI);

    CollectionReader reader = CollectionReaderFactory.createReader(TextReader.class,
            TextReader.KEY_RESOURCE_RESOLVER, hdfsResource, TextReader.PARAM_SOURCE_LOCATION, "hdfs:/user/test",
            TextReader.PARAM_PATTERNS, "*.data");

    List<String> documents = readDocuments(reader);

    assertEquals(2, documents.size());/*www.  j av a  2 s  . c om*/
    assertTrue(documents.get(0).equals("Text of file one."));
    assertTrue(documents.get(1).equals("Text of file two."));

}

From source file:se.sics.gvod.stream.system.hops.SetupExperiment.java

License:Open Source License

public static void main(String[] args) throws IOException, HashUtil.HashBuilderException {
    String hopsURL = "bbc1.sics.se:26801";
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", hopsURL);
    DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf);

    String path = "/experiment";
    if (!fs.isDirectory(new Path(path))) {
        fs.mkdirs(new Path(path));
    } else {/*from ww  w  . j a  va 2s. c  o m*/
        fs.delete(new Path(path), true);
        fs.mkdirs(new Path(path));
    }
    String uploadDirPath = path + "/upload";
    fs.mkdirs(new Path(uploadDirPath));
    String downloadDirPath = path + "/download";
    fs.mkdirs(new Path(downloadDirPath));

    String dataFile = uploadDirPath + "/file";
    Random rand = new Random(1234);
    try (FSDataOutputStream out = fs.create(new Path(dataFile))) {
        for (int i = 0; i < fileSize / pieceSize; i++) {
            byte[] data = new byte[1024];
            rand.nextBytes(data);
            out.write(data);
            out.flush();
        }
        System.err.println("created file - expected:" + fileSize + " created:" + out.size());
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
    fs.close();
}

From source file:tachyon.LocalMiniDFSCluster.java

License:Apache License

/**
 * Test the local minidfscluster only/*from  w  w w .  ja  va 2s.  com*/
 */
public static void main(String[] args) throws Exception {
    LocalMiniDFSCluster cluster = null;
    TachyonConf tachyonConf = new TachyonConf();
    try {
        cluster = new LocalMiniDFSCluster("/tmp/dfs", 1, 54321, tachyonConf);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());
        Thread.sleep(10);
        DistributedFileSystem dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));
        mkdirs(cluster.getUnderFilesystemAddress() + "/1/2", tachyonConf);
        FileStatus[] fs = dfs.listStatus(new Path(TachyonURI.SEPARATOR));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();

        cluster = new LocalMiniDFSCluster("/tmp/dfs", 3, tachyonConf);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());

        dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));

        CommonUtils.touch(cluster.getUnderFilesystemAddress() + "/1" + "/_format_" + System.currentTimeMillis(),
                tachyonConf);
        fs = dfs.listStatus(new Path("/1"));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();
    } finally {
        if (cluster != null && cluster.isStarted()) {
            cluster.shutdown();
        }
    }
}

From source file:tachyon.underfs.hdfs.LocalMiniDFSCluster.java

License:Apache License

/**
 * Test the local minidfscluster only/*from   www.  j av  a  2 s . c  o  m*/
 */
public static void main(String[] args) throws Exception {
    LocalMiniDFSCluster cluster = null;
    TachyonConf tachyonConf = new TachyonConf();
    try {
        cluster = new LocalMiniDFSCluster("/tmp/dfs", 1, 54321, tachyonConf);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());
        Thread.sleep(10);
        DistributedFileSystem dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));
        mkdirs(cluster.getUnderFilesystemAddress() + "/1/2", tachyonConf);
        FileStatus[] fs = dfs.listStatus(new Path(TachyonURI.SEPARATOR));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();

        cluster = new LocalMiniDFSCluster("/tmp/dfs", 3, tachyonConf);
        cluster.start();
        System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress());

        dfs = cluster.getDFSClient();
        dfs.mkdirs(new Path("/1"));

        UnderFileSystemUtils.touch(
                cluster.getUnderFilesystemAddress() + "/1" + "/_format_" + System.currentTimeMillis(),
                tachyonConf);
        fs = dfs.listStatus(new Path("/1"));
        assert fs.length != 0;
        System.out.println(fs[0].getPath().toUri());
        dfs.close();

        cluster.shutdown();
    } finally {
        if (cluster != null && cluster.isStarted()) {
            cluster.shutdown();
        }
    }
}