Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_ACLS_ENABLED_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_ACLS_ENABLED_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_ACLS_ENABLED_KEY.

Prototype

String DFS_NAMENODE_ACLS_ENABLED_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_ACLS_ENABLED_KEY.

Click Source Link

Usage

From source file:org.apache.sentry.hdfs.SentryAuthorizationProvider.java

License:Apache License

@Override
public synchronized void start() {
    if (started) {
        throw new IllegalStateException("Provider already started");
    }/*from   w  w  w . j a v  a 2  s  .  co m*/
    started = true;
    try {
        if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false)) {
            throw new RuntimeException("HDFS ACLs must be enabled");
        }

        defaultAuthzProvider = new DefaultAuthorizationProvider();
        defaultAuthzProvider.start();
        // Configuration is read from hdfs-sentry.xml and NN configuration, in
        // that order of precedence.
        Configuration conf = new Configuration(this.conf);
        conf.addResource(SentryAuthorizationConstants.CONFIG_FILE);
        user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY,
                SentryAuthorizationConstants.HDFS_USER_DEFAULT);
        group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY,
                SentryAuthorizationConstants.HDFS_GROUP_DEFAULT);
        permission = FsPermission
                .createImmutable((short) conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY,
                        SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT));
        originalAuthzAsAcl = conf.getBoolean(SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY,
                SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT);

        LOG.info("Starting");
        LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " + "include-hdfs-authz-as-acl[{}]",
                new Object[] { user, group, permission, originalAuthzAsAcl });

        if (authzInfo == null) {
            authzInfo = new SentryAuthorizationInfo(conf);
        }
        authzInfo.start();
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.sentry.hdfs.SentryINodeAttributesProvider.java

License:Apache License

@Override
public void start() {
    if (started) {
        throw new IllegalStateException("Provider already started");
    }//from   w  w  w.ja v  a  2 s . c  om
    started = true;
    try {
        if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false)) {
            throw new RuntimeException("HDFS ACLs must be enabled");
        }
        Configuration conf = new Configuration(this.conf);
        conf.addResource(SentryAuthorizationConstants.CONFIG_FILE, true);
        user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY,
                SentryAuthorizationConstants.HDFS_USER_DEFAULT);
        group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY,
                SentryAuthorizationConstants.HDFS_GROUP_DEFAULT);
        permission = FsPermission
                .createImmutable((short) conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY,
                        SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT));
        originalAuthzAsAcl = conf.getBoolean(SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY,
                SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT);

        LOG.info("Starting");
        LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " + "include-hdfs-authz-as-acl[{}]",
                new Object[] { user, group, permission, originalAuthzAsAcl });

        if (authzInfo == null) {
            authzInfo = new SentryAuthorizationInfo(conf);
        }
        authzInfo.start();
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.sentry.hdfs.TestSentryAuthorizationProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*from  w  ww. ja  v a  2 s. c om*/
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
                    MockSentryAuthorizationProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.hdfs.TestSentryINodeAttributesProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from   w  w  w.j  a va2s.  co m
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    MockSentryINodeAttributesProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

private static void startDFSandYARN() throws IOException, InterruptedException {
    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*from w w w .j ava 2  s .co  m*/
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            hadoopConf = new HdfsConfiguration();
            hadoopConf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    SentryINodeAttributesProvider.class.getName());
            hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
            hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
            hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
            Configuration.addDefaultResource("test.xml");

            hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", MANAGED_PREFIXES);
            hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
            hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms",
                    String.valueOf(CACHE_REFRESH));

            hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms",
                    String.valueOf(STALE_THRESHOLD));

            hadoopConf.set("sentry.hdfs.service.security.mode", "none");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-addresses", "localhost");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
            Path tmpPath = new Path("/tmp");
            Path hivePath = new Path("/user/hive");
            Path warehousePath = new Path(hivePath, "warehouse");
            miniDFS.getFileSystem().mkdirs(warehousePath);
            boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
            LOGGER.info("\n\n Is dir :" + directory + "\n\n");
            LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
            fsURI = miniDFS.getFileSystem().getUri().toString();
            hadoopConf.set("fs.defaultFS", fsURI);

            // Create Yarn cluster
            // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

            miniDFS.getFileSystem().mkdirs(tmpPath);
            miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
            miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
            miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
            LOGGER.info("\n\n Owner :" + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + "\n\n");
            LOGGER.info("\n\n Owner tmp :" + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + "\n\n");

            int dfsSafeCheckRetry = 30;
            boolean hasStarted = false;
            for (int i = dfsSafeCheckRetry; i > 0; i--) {
                if (!miniDFS.getFileSystem().isInSafeMode()) {
                    hasStarted = true;
                    LOGGER.info("HDFS safemode check num times : " + (31 - i));
                    break;
                }
            }
            if (!hasStarted) {
                throw new RuntimeException("HDFS hasnt exited safe mode yet..");
            }

            return null;
        }
    });
}

From source file:org.trustedanalytics.auth.gateway.hdfs.integration.config.LocalConfiguration.java

License:Apache License

@Bean
@Qualifier(Qualifiers.CONFIGURATION)/*from  www  . j  a va 2  s .  c  o  m*/
public org.apache.hadoop.conf.Configuration initializeHdfsCluster()
        throws IOException, InterruptedException, URISyntaxException {
    File baseDir = new File("./target/hdfs/" + "testName").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(false);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    MiniDFSCluster cluster = builder.build();

    UserGroupInformation.createUserForTesting("cf", new String[] { "cf" });
    UserGroupInformation.createUserForTesting("super", new String[] { "supergroup" });

    return cluster.getConfiguration(0);
}

From source file:org.trustedanalytics.cfbroker.store.hdfs.service.SimpleHdfsClientTest.java

License:Apache License

@BeforeClass
public static void initialize() throws IOException {
    File baseDir = new File("./target/hdfs/" + "testName").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);// w w  w.  ja  v a  2s .com
    Configuration conf = new Configuration(false);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    cluster = builder.build();
    cluster.waitClusterUp();
}