Example usage for org.apache.hadoop.hdfs DistributedFileSystem getUri

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem getUri.

Prototype

@Override
    public URI getUri() 

Source Link

Usage

From source file:org.apache.ambari.servicemonitor.probes.DfsSafeModeProbe.java

License:Apache License

@Override
public ProbeStatus ping(boolean livePing) {
    ProbeStatus status = new ProbeStatus();
    try {/*  ww w  .  j  ava 2  s .co m*/
        DistributedFileSystem hdfs = DFSUtils.createUncachedDFS(conf);
        inSafeMode = hdfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
        boolean live = !inSafeMode;
        if (inSafeMode) {
            if (ignoreSafeModeManuallyTriggered) {
                live = safeModeExitedOnce;
            }
        } else {
            //it's easier to always set this on a live NN than it is to track whether there's just
            //been a state transition.
            safeModeExitedOnce = true;
        }
        status.succeed(this);
        status.setSuccess(live);
        status.setMessage(hdfs.getUri() + " up - safe mode state " + inSafeMode);
    } catch (IOException e) {
        status.fail(this, e);
    }
    return status;
}

From source file:org.apache.falcon.snapshots.retention.HdfsSnapshotEvictor.java

License:Apache License

protected static void evictSnapshots(DistributedFileSystem fs, String dirName, String ageLimit,
        int numSnapshots) throws FalconException {
    try {/*from  w w w  .jav a2s .c om*/
        LOG.info("Started evicting snapshots on dir {}{} using policy {}, agelimit {}, numSnapshot {}",
                fs.getUri(), dirName, ageLimit, numSnapshots);

        long evictionTime = System.currentTimeMillis() - EvictionHelper.evalExpressionToMilliSeconds(ageLimit);

        dirName = StringUtils.removeEnd(dirName, Path.SEPARATOR);
        String snapshotDir = dirName + Path.SEPARATOR + HdfsSnapshotUtil.SNAPSHOT_DIR_PREFIX + Path.SEPARATOR;
        FileStatus[] snapshots = fs.listStatus(new Path(snapshotDir));
        if (snapshots.length <= numSnapshots) {
            // no eviction needed
            return;
        }

        // Sort by last modified time, ascending order.
        Arrays.sort(snapshots, new Comparator<FileStatus>() {
            @Override
            public int compare(FileStatus f1, FileStatus f2) {
                return Long.compare(f1.getModificationTime(), f2.getModificationTime());
            }
        });

        for (int i = 0; i < (snapshots.length - numSnapshots); i++) {
            // delete if older than ageLimit while retaining numSnapshots
            if (snapshots[i].getModificationTime() < evictionTime) {
                fs.deleteSnapshot(new Path(dirName), snapshots[i].getPath().getName());
            }
        }

    } catch (ELException ele) {
        LOG.warn("Unable to parse retention age limit {} {}", ageLimit, ele.getMessage());
        throw new FalconException("Unable to parse retention age limit " + ageLimit, ele);
    } catch (IOException ioe) {
        LOG.warn("Unable to evict snapshots from dir {} {}", dirName, ioe);
        throw new FalconException("Unable to evict snapshots from dir " + dirName, ioe);
    }

}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();//from w  w  w  .  j a va2 s  . co  m
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*w w w  . j a va2  s  .  c  o m*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();/*www . ja  v a2 s  . c  o m*/
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileTablespace space = new FileTablespace("testGetSplit", fs.getUri());
        space.init(new TajoConf(conf));
        assertEquals(fs.getUri(), space.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(space.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(space.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*from  w  ww  .  j a va  2s. co  m*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));

        FileTablespace sm = new FileTablespace("testGetSplitWithBlockStorageLocationsBatching", fs.getUri());
        sm.init(new TajoConf(conf));

        assertEquals(fs.getUri(), sm.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tez.client.TestTezClientUtils.java

License:Apache License

public static void testLocalResourceVisibility(DistributedFileSystem remoteFs, Configuration conf)
        throws Exception {

    Path topLevelDir = null;//from www  .  j av a  2s .  c o  m
    try {
        FsPermission publicDirPerms = new FsPermission((short) 0755); // rwxr-xr-x
        FsPermission privateDirPerms = new FsPermission((short) 0754); // rwxr-xr--
        FsPermission publicFilePerms = new FsPermission((short) 0554); // r-xr-xr--
        FsPermission privateFilePerms = new FsPermission((short) 0550); // r-xr-x---

        String fsURI = remoteFs.getUri().toString();

        topLevelDir = new Path(fsURI, "/testLRVisibility");
        Assert.assertTrue(remoteFs.mkdirs(topLevelDir, publicDirPerms));

        Path publicSubDir = new Path(topLevelDir, "public_sub_dir");
        Assert.assertTrue(remoteFs.mkdirs(publicSubDir, publicDirPerms));

        Path privateSubDir = new Path(topLevelDir, "private_sub_dir");
        Assert.assertTrue(remoteFs.mkdirs(privateSubDir, privateDirPerms));

        Path publicFile = new Path(publicSubDir, "public_file");
        Assert.assertTrue(remoteFs.createNewFile(publicFile));
        remoteFs.setPermission(publicFile, publicFilePerms);

        Path privateFile = new Path(publicSubDir, "private_file");
        Assert.assertTrue(remoteFs.createNewFile(privateFile));
        remoteFs.setPermission(privateFile, privateFilePerms);

        Path publicFileInPrivateSubdir = new Path(privateSubDir, "public_file_in_private_subdir");
        Assert.assertTrue(remoteFs.createNewFile(publicFileInPrivateSubdir));
        remoteFs.setPermission(publicFileInPrivateSubdir, publicFilePerms);

        TezConfiguration tezConf = new TezConfiguration(conf);
        String tmpTezLibUris = String.format("%s,%s,%s,%s", topLevelDir, publicSubDir, privateSubDir,
                conf.get(TezConfiguration.TEZ_LIB_URIS, ""));
        tezConf.set(TezConfiguration.TEZ_LIB_URIS, tmpTezLibUris);

        Map<String, LocalResource> lrMap = new HashMap<String, LocalResource>();
        TezClientUtils.setupTezJarsLocalResources(tezConf, new Credentials(), lrMap);

        Assert.assertEquals(publicFile.getName(), LocalResourceVisibility.PUBLIC,
                lrMap.get(publicFile.getName()).getVisibility());

        Assert.assertEquals(privateFile.getName(), LocalResourceVisibility.PRIVATE,
                lrMap.get(privateFile.getName()).getVisibility());

        Assert.assertEquals(publicFileInPrivateSubdir.getName(), LocalResourceVisibility.PRIVATE,
                lrMap.get(publicFileInPrivateSubdir.getName()).getVisibility());

        // test tar.gz
        tezConf = new TezConfiguration(conf);
        Path tarFile = new Path(topLevelDir, "foo.tar.gz");
        Assert.assertTrue(remoteFs.createNewFile(tarFile));

        //public
        remoteFs.setPermission(tarFile, publicFilePerms);
        tezConf.set(TezConfiguration.TEZ_LIB_URIS, tarFile.toString());
        lrMap.clear();
        Assert.assertTrue(TezClientUtils.setupTezJarsLocalResources(tezConf, new Credentials(), lrMap));

        Assert.assertEquals(LocalResourceVisibility.PUBLIC,
                lrMap.get(TezConstants.TEZ_TAR_LR_NAME).getVisibility());

        //private
        remoteFs.setPermission(tarFile, privateFilePerms);
        lrMap.clear();
        TezClientUtils.setupTezJarsLocalResources(tezConf, new Credentials(), lrMap);
        Assert.assertEquals(LocalResourceVisibility.PRIVATE,
                lrMap.get(TezConstants.TEZ_TAR_LR_NAME).getVisibility());

    } finally {
        if (topLevelDir != null) {
            remoteFs.delete(topLevelDir, true);
        }
    }
}