Example usage for org.apache.hadoop.hdfs MiniDFSCluster waitActive

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster waitActive

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster waitActive.

Prototype

public void waitActive() throws IOException 

Source Link

Document

Wait until the cluster is active and running.

Usage

From source file:a.TestConcatExample.java

License:Apache License

@Test
public void concatIsPermissive() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    conf.set("dfs.namenode.fs-limits.min-block-size", "1000"); // Allow tiny blocks for the test
    try {//w w  w. j  a  v a2  s  .co m
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
        final FileSystem dfs = cluster.getFileSystem();

        final FileSystem fs = dfs; // WebHDFS has a bug in getLocatedBlocks

        Path root = new Path("/dir");
        fs.mkdirs(root);

        short origRep = 3;
        short secondRep = (short) (origRep - 1);
        Path f1 = new Path("/dir/f1");
        long size1 = writeFile(fs, f1, /* blocksize */ 4096, origRep, 5);
        long f1NumBlocks = fs.getFileBlockLocations(f1, 0, size1).length;
        assertEquals(5, f1NumBlocks);

        Path f2 = new Path("/dir/f2");
        long size2 = writeFile(fs, f2, /* blocksize (must divide 512 for checksum) */ 4096 - 512, secondRep, 4);
        long f2NumBlocks = fs.getFileBlockLocations(f2, 0, size2).length;
        assertEquals(5, f2NumBlocks);

        fs.concat(f1, new Path[] { f2 });
        FileStatus[] fileStatuses = fs.listStatus(root);

        // Only one file should remain
        assertEquals(1, fileStatuses.length);
        FileStatus fileStatus = fileStatuses[0];

        // And it should be named after the first file
        assertEquals("f1", fileStatus.getPath().getName());

        // The entire file takes the replication of the first argument
        assertEquals(origRep, fileStatus.getReplication());

        // As expected, the new concated file is the length of both the previous files
        assertEquals(size1 + size2, fileStatus.getLen());

        // And we should have the same number of blocks
        assertEquals(f1NumBlocks + f2NumBlocks,
                fs.getFileBlockLocations(fileStatus.getPath(), 0, size1 + size2).length);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }

    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test/*from   w w  w  . java  2 s  .c om*/
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.mycompany.app.TestStagingDirectoryPermissions.java

License:Apache License

@Test
public void perms() throws IOException, InterruptedException {
    MiniDFSCluster minidfs = null;
    FileSystem fs = null;/*from   ww w  .  j av  a 2 s  .c  o  m*/
    MiniMRClientCluster minimr = null;
    try {
        Configuration conf = new Configuration(true);
        conf.set("fs.permission.umask-mode", "0077");
        minidfs = new MiniDFSCluster.Builder(conf).build();
        minidfs.waitActive();

        fs = minidfs.getFileSystem();
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
        Path p = path("/in");
        fs.mkdirs(p);

        FSDataOutputStream os = fs.create(new Path(p, "input.txt"));
        os.write("hello!".getBytes("UTF-8"));
        os.close();

        String user = UserGroupInformation.getCurrentUser().getUserName();
        Path home = new Path("/User/" + user);
        fs.mkdirs(home);
        minimr = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
        JobConf job = new JobConf(minimr.getConfig());

        job.setJobName("PermsTest");
        JobClient client = new JobClient(job);
        FileInputFormat.addInputPath(job, p);
        FileOutputFormat.setOutputPath(job, path("/out"));
        job.setInputFormat(TextInputFormat.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setMapperClass(MySleepMapper.class);

        job.setNumReduceTasks(1);
        RunningJob submittedJob = client.submitJob(job);

        // Sleep for a bit to let localization finish
        System.out.println("Sleeping...");
        Thread.sleep(3 * 1000l);
        System.out.println("Done sleeping...");
        assertFalse(UserGroupInformation.isSecurityEnabled());

        Path stagingRoot = path("/tmp/hadoop-yarn/staging/" + user + "/.staging/");
        assertTrue(fs.exists(stagingRoot));
        assertEquals(1, fs.listStatus(stagingRoot).length);
        Path staging = fs.listStatus(stagingRoot)[0].getPath();
        Path jobXml = path(staging + "/job.xml");

        assertTrue(fs.exists(jobXml));

        FileStatus fileStatus = fs.getFileStatus(jobXml);
        System.out.println("job.xml permission = " + fileStatus.getPermission());
        assertTrue(fileStatus.getPermission().getOtherAction().implies(FsAction.READ));
        assertTrue(fileStatus.getPermission().getGroupAction().implies(FsAction.READ));

        submittedJob.waitForCompletion();
    } finally {
        if (minimr != null) {
            minimr.stop();
        }
        if (fs != null) {
            fs.close();
        }
        if (minidfs != null) {
            minidfs.shutdown(true);
        }
    }
}

From source file:hdfs.jsr203.TestAttributes.java

License:Apache License

private static MiniDFSCluster startMini(String testName) throws IOException {
    File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);/*w ww .j  a  va2s .co  m*/
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
    hdfsCluster.waitActive();
    return hdfsCluster;
}

From source file:io.confluent.connect.hdfs.TestWithMiniDFSCluster.java

License:Apache License

private MiniDFSCluster createDFSCluster(Configuration conf) throws IOException {
    MiniDFSCluster cluster;
    String[] hosts = { "localhost", "localhost", "localhost" };
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.hosts(hosts).nameNodePort(9001).numDataNodes(3);
    cluster = builder.build();/*from   www.j av a 2s .c om*/
    cluster.waitActive();
    return cluster;
}

From source file:io.hops.security.TestUsersGroups.java

License:Apache License

@Test
public void testGroupMappingsRefresh() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();

    cluster.getNameNode().getRpcServer().refreshUserToGroupsMappings();

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    int userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);/*from w w w . ja va  2s . co m*/
    assertEquals(UsersGroups.getUser(userId), "user");

    int groupId = UsersGroups.getGroupID("group1");
    assertNotSame(0, groupId);
    assertEquals(UsersGroups.getGroup(groupId), "group1");

    assertEquals(UsersGroups.getGroups("user"), Arrays.asList("group1", "group2"));

    removeUser(userId);

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    cluster.getNameNode().getRpcServer().refreshUserToGroupsMappings();

    userId = UsersGroups.getUserID("user");
    assertEquals(0, userId);
    assertNull(UsersGroups.getGroups("user"));

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    assertEquals(Arrays.asList("group1", "group2"), UsersGroups.getGroups("user"));

    removeUser(userId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group3" });

    int newUserId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);
    assertEquals(userId, newUserId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    assertEquals(Arrays.asList("group3", "group1", "group2"), UsersGroups.getGroups("user"));
}

From source file:io.hops.transaction.lock.TestInodeLock.java

License:Apache License

@Test
public void testInodeLockWithWrongPath() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {//from  ww  w.j a  v a  2 s .c  om
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final MiniDFSCluster clusterFinal = cluster;
        final DistributedFileSystem hdfs = cluster.getFileSystem();

        hdfs.mkdirs(new Path("/tmp"));
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);

        new HopsTransactionalRequestHandler(HDFSOperationType.TEST) {
            @Override
            public void acquireLock(TransactionLocks locks) throws IOException {
                LockFactory lf = LockFactory.getInstance();
                INodeLock il = lf.getINodeLock(TransactionLockTypes.INodeLockType.READ_COMMITTED,
                        TransactionLockTypes.INodeResolveType.PATH, new String[] { "/tmp/f1", "/tmp/f2" })
                        .setNameNodeID(clusterFinal.getNameNode().getId())
                        .setActiveNameNodes(clusterFinal.getNameNode().getActiveNameNodes().getActiveNodes())
                        .skipReadingQuotaAttr(true);
                locks.add(il);

            }

            @Override
            public Object performTask() throws IOException {
                return null;
            }
        }.handle();

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.blur.HdfsMiniClusterUtil.java

License:Apache License

public static MiniDFSCluster startDfs(Configuration conf, boolean format, String path) {
    String perm;//from w  w w . j a  v  a 2 s. c  o  m
    Path p = new Path(new File("./target").getAbsolutePath());
    try {
        FileSystem fileSystem = p.getFileSystem(conf);
        FileStatus fileStatus = fileSystem.getFileStatus(p);
        FsPermission permission = fileStatus.getPermission();
        perm = permission.getUserAction().ordinal() + "" + permission.getGroupAction().ordinal() + ""
                + permission.getOtherAction().ordinal();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    LOG.info("dfs.datanode.data.dir.perm=" + perm);
    conf.set("dfs.datanode.data.dir.perm", perm);
    System.setProperty("test.build.data", path);
    try {
        MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
        cluster.waitActive();
        return cluster;
    } catch (Exception e) {
        LOG.error("error opening file system", e);
        throw new RuntimeException(e);
    }
}

From source file:pack.block.blockstore.hdfs.HdfsMiniClusterUtil.java

License:Apache License

public static MiniDFSCluster startDfs(Configuration conf, boolean format, String path) {
    String perm;/*w w w  .  ja  v a2 s .  c o m*/
    Path p = new Path(new File("./target").getAbsolutePath());
    try {
        FileSystem fileSystem = p.getFileSystem(conf);
        FileStatus fileStatus = fileSystem.getFileStatus(p);
        FsPermission permission = fileStatus.getPermission();
        perm = permission.getUserAction().ordinal() + "" + permission.getGroupAction().ordinal() + ""
                + permission.getOtherAction().ordinal();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    LOGGER.info("dfs.datanode.data.dir.perm={}", perm);
    conf.set("dfs.datanode.data.dir.perm", perm);
    System.setProperty("test.build.data", path);
    try {
        // MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, (String[])
        // null);
        Builder builder = new MiniDFSCluster.Builder(conf);
        MiniDFSCluster cluster = builder.build();
        cluster.waitActive();
        return cluster;
    } catch (Exception e) {
        LOGGER.error("error opening file system", e);
        throw new RuntimeException(e);
    }
}