Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:org.apache.oozie.service.TestAuthorizationService.java

License:Apache License

private void _testAuthorizationService(boolean useDefaultGroup) throws Exception {
    init(useDefaultGroup, true);//w ww  .  j ava2  s  .c  om
    Reader reader = IOUtils.getResourceAsReader("wf-ext-schema-valid.xml", -1);
    Writer writer = new FileWriter(new File(getTestCaseDir(), "workflow.xml"));
    IOUtils.copyCharStream(reader, writer);

    final DagEngine engine = new DagEngine(getTestUser());
    Configuration jobConf = new XConfiguration();
    jobConf.set(OozieClient.APP_PATH, getTestCaseFileUri("workflow.xml"));
    jobConf.set(OozieClient.USER_NAME, getTestUser());
    if (useDefaultGroup) {
        jobConf.set(OozieClient.GROUP_NAME, getTestGroup());
    } else {
        jobConf.set(OozieClient.GROUP_NAME, getTestGroup() + ",foo");
    }

    jobConf.set(OozieClient.LOG_TOKEN, "t");

    jobConf.set("external-status", "ok");
    jobConf.set("signal-value", "based_on_action_status");

    final String jobId = engine.submitJob(jobConf, true);

    HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
    URI uri = getFileSystem().getUri();
    Configuration fsConf = has.createJobConf(uri.getAuthority());
    FileSystem fileSystem = has.createFileSystem(getTestUser(), uri, fsConf);

    Path path = new Path(fileSystem.getWorkingDirectory(), UUID.randomUUID().toString());
    Path fsTestDir = fileSystem.makeQualified(path);
    System.out.println(XLog.format("Setting FS testcase work dir[{0}]", fsTestDir));
    fileSystem.delete(fsTestDir, true);
    if (!fileSystem.mkdirs(path)) {
        throw new IOException(XLog.format("Could not create FS testcase dir [{0}]", fsTestDir));
    }

    String appPath = fsTestDir.toString() + "/app";

    Path jobXmlPath = new Path(appPath, "workflow.xml");
    fileSystem.create(jobXmlPath).close();
    fileSystem.setOwner(jobXmlPath, getTestUser(), getTestGroup());

    FsPermission permissions = new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE);
    fileSystem.setPermission(jobXmlPath, permissions);

    AuthorizationService as = services.get(AuthorizationService.class);
    assertNotNull(as);
    as.authorizeForGroup(getTestUser(), getTestGroup());
    assertNotNull(as.getDefaultGroup(getTestUser()));

    as.authorizeForApp(getTestUser2(), getTestGroup(), appPath, jobConf);

    try {
        as.authorizeForApp(getTestUser3(), getTestGroup(), appPath, jobConf);
        fail();
    } catch (AuthorizationException ex) {
    }

    as.authorizeForJob(getTestUser(), jobId, false);
    as.authorizeForJob(getTestUser(), jobId, true);
    if (!useDefaultGroup) {
        as.authorizeForJob("foo", jobId, true);
    }
    try {
        as.authorizeForJob("bar", jobId, true);
        fail();
    } catch (AuthorizationException ex) {
    }
}

From source file:org.apache.oozie.service.TestAuthorizationService.java

License:Apache License

public void testErrors() throws Exception {
    init(false, true);/*from   ww w.  ja v  a  2s  .c o  m*/
    services.setService(ForTestAuthorizationService.class);
    AuthorizationService as = services.get(AuthorizationService.class);

    Configuration conf = new Configuration();

    HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
    URI uri = getFileSystem().getUri();
    Configuration fsConf = has.createJobConf(uri.getAuthority());
    FileSystem fileSystem = has.createFileSystem(getTestUser(), uri, fsConf);

    try {
        as.authorizeForGroup(getTestUser3(), getTestGroup());
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0502, ex.getErrorCode());
    }
    try {
        as.authorizeForAdmin(getTestUser(), true);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0503, ex.getErrorCode());
    }
    try {
        Path app = new Path(getFsTestCaseDir(), "w");
        as.authorizeForApp(getTestUser(), getTestGroup(), app.toString(), conf);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0504, ex.getErrorCode());
    }
    try {
        Path app = new Path(getFsTestCaseDir(), "w");
        fileSystem.mkdirs(app);
        as.authorizeForApp(getTestUser(), getTestGroup(), app.toString(), conf);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0505, ex.getErrorCode());
    }
    try {
        Path app = new Path(getFsTestCaseDir(), "w");
        Path wf = new Path(app, "workflow.xml");
        fileSystem.mkdirs(wf);
        as.authorizeForApp(getTestUser(), getTestGroup(), app.toString(), conf);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0506, ex.getErrorCode());
    }
    try {
        Path app = new Path(getFsTestCaseDir(), "ww");
        fileSystem.mkdirs(app);
        Path wf = new Path(app, "workflow.xml");
        fileSystem.create(wf).close();
        FsPermission fsPermission = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE);
        fileSystem.setPermission(app, fsPermission);

        as.authorizeForApp(getTestUser2(), getTestGroup() + "-invalid", app.toString(), conf);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0507, ex.getErrorCode());
    }

    try {
        as.authorizeForJob(getTestUser(), "1", true);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0604, ex.getErrorCode());
    }

    WorkflowJobBean job = this.addRecordToWfJobTable(WorkflowJob.Status.PREP, WorkflowInstance.Status.PREP);
    try {
        as.authorizeForJob(getTestUser3(), job.getId(), true);
        fail();
    } catch (AuthorizationException ex) {
        assertEquals(ErrorCode.E0508, ex.getErrorCode());
    }
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }//from  w w w . ja v a 2 s. c  om
        int taskTrackers = 2;
        int dataNodes = 2;
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop2() throws Exception {
    if (dfsCluster != null && dfsCluster2 == null) {
        // Trick dfs location for MiniDFSCluster since it doesn't accept location as input)
        String testBuildDataSaved = System.getProperty("test.build.data", "build/test/data");
        try {//from w  ww.jav  a  2  s . c o m
            System.setProperty("test.build.data", FilenameUtils.concat(testBuildDataSaved, "2"));
            // Only DFS cluster is created based upon current need
            dfsCluster2 = new MiniDFSCluster(createDFSConfig(), 2, true, null);
            FileSystem fileSystem = dfsCluster2.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            System.setProperty(OOZIE_TEST_NAME_NODE2, fileSystem.getConf().get("fs.default.name"));
        } catch (Exception ex) {
            shutdownMiniCluster2();
            throw ex;
        } finally {
            // Restore previus value
            System.setProperty("test.build.data", testBuildDataSaved);
        }
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler.java

License:Apache License

private static Path getCacheStagingDir(Configuration conf) throws IOException {
    String pigTempDir = conf.get(PigConfiguration.PIG_USER_CACHE_LOCATION,
            conf.get(PigConfiguration.PIG_TEMP_DIR, "/tmp"));
    String currentUser = System.getProperty("user.name");
    Path stagingDir = new Path(pigTempDir + "/" + currentUser + "/", ".pigcache");
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(stagingDir);/*from   w w w .  j av  a2  s .co  m*/
    fs.setPermission(stagingDir, FileLocalizer.OWNER_ONLY_PERMS);
    return stagingDir;
}

From source file:org.apache.ranger.services.hdfs.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void readTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//w  w  w.  j av a  2 s.  co m
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Now try to read the file as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                fs.open(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.ranger.services.hdfs.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void executeTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir3/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//from ww  w . j ava  2s  .  c o  m
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Change the parent directory permissions to be execute only for the owner
    Path parentDir = new Path("/tmp/tmpdir3");
    fileSystem.setPermission(parentDir, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE));

    // Try to read the directory as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Try to read the directory as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Now try to read the directory as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
                Assert.assertTrue(iter.hasNext());
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });

}

From source file:org.apache.rya.accumulo.mr.merge.CopyTool.java

License:Apache License

private void setupSplitsFile(final Job job, final TableOperations parentTableOperations,
        final String parentTableName, final String childTableName) throws Exception {
    final FileSystem fs = FileSystem.get(conf);
    fs.setPermission(getPath(baseOutputDir, childTableName),
            new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    final Path splitsPath = getPath(baseOutputDir, childTableName, "splits.txt");
    final Collection<Text> splits = parentTableOperations.listSplits(parentTableName, 100);
    log.info("Creating splits file at: " + splitsPath);
    try (PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(splitsPath)), false,
            StandardCharsets.UTF_8.name())) {
        for (final Text split : splits) {
            final String encoded = new String(Base64.encodeBase64(TextUtil.getBytes(split)),
                    StandardCharsets.UTF_8);
            out.println(encoded);//from  w  ww.j av a  2  s  .c o m
        }
    }
    fs.setPermission(splitsPath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    final String userDir = System.getProperty("user.dir");
    // The splits file has a symlink created in the user directory for some reason.
    // It might be better to copy the entire file for Windows but it doesn't seem to matter if
    // the user directory symlink is broken.
    java.nio.file.Files.deleteIfExists(new File(userDir, "splits.txt").toPath());
    //Files.copy(new File(splitsPath.toString()), new File(userDir, "splits.txt"));
    job.setPartitionerClass(KeyRangePartitioner.class);
    KeyRangePartitioner.setSplitFile(job, splitsPath.toString());
    job.setNumReduceTasks(splits.size() + 1);
}

From source file:org.apache.rya.accumulo.mr.merge.CopyTool.java

License:Apache License

@Override
protected void setupAccumuloOutput(final Job job, final String outputTable) throws AccumuloSecurityException {
    AccumuloOutputFormat.setConnectorInfo(job, childUserName, new PasswordToken(childPwd));
    AccumuloOutputFormat.setCreateTables(job, true);
    AccumuloOutputFormat.setDefaultTableName(job, outputTable);
    if (!childMock) {
        AccumuloOutputFormat.setZooKeeperInstance(job,
                new ClientConfiguration().withInstance(childInstance).withZkHosts(childZk));
    } else {//  w w  w .  java 2s. com
        AccumuloOutputFormat.setMockInstance(job, childInstance);
    }
    if (useCopyFileOutput) {
        log.info("Using file output format mode.");
        if (StringUtils.isNotBlank(baseOutputDir)) {
            Path baseOutputPath;
            Path filesOutputPath;
            if (StringUtils.isNotBlank(outputTable)) {
                filesOutputPath = getPath(baseOutputDir, outputTable, "files");
                baseOutputPath = filesOutputPath.getParent();
                job.setOutputFormatClass(AccumuloFileOutputFormat.class);
            } else {
                // If table name is not given, configure output for one level higher:
                // it's up to the job to handle subdirectories. Make sure the parent
                // exists.
                filesOutputPath = getPath(baseOutputDir);
                baseOutputPath = filesOutputPath;
                LazyOutputFormat.setOutputFormatClass(job, AccumuloFileOutputFormat.class);
                MultipleOutputs.setCountersEnabled(job, true);
            }
            log.info("File output destination: " + filesOutputPath);
            if (useCopyFileOutputDirectoryClear) {
                try {
                    clearOutputDir(baseOutputPath);
                } catch (final IOException e) {
                    log.error("Error clearing out output path.", e);
                }
            }
            try {
                final FileSystem fs = FileSystem.get(conf);
                fs.mkdirs(filesOutputPath.getParent());
                fs.setPermission(filesOutputPath.getParent(),
                        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
            } catch (final IOException e) {
                log.error("Failed to set permission for output path.", e);
            }
            FileOutputFormat.setOutputPath(job, filesOutputPath);

            if (StringUtils.isNotBlank(compressionType)) {
                if (isValidCompressionType(compressionType)) {
                    log.info("File compression type: " + compressionType);
                    AccumuloFileOutputFormat.setCompressionType(job, compressionType);
                } else {
                    log.warn("Invalid compression type: " + compressionType);
                }
            }
        }
    } else {
        log.info("Using accumulo output format mode.");
        job.setOutputFormatClass(AccumuloOutputFormat.class);
    }
}

From source file:org.apache.rya.accumulo.mr.merge.MergeTool.java

License:Apache License

/**
 * Imports the child files that hold the table data into the parent instance as a temporary table.
 * @param childTableName the name of the child table to import into a temporary parent table.
 * @throws Exception//from   www  . java 2s  .  c o  m
 */
public void importChildFilesToTempParentTable(final String childTableName) throws Exception {
    // Create a temporary table in the parent instance to import the child files to.  Then run the merge process on the parent table and temp child table.
    final String tempChildTable = childTableName + TEMP_SUFFIX;

    createTempTableIfNeeded(tempChildTable);

    final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
    parentAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
    final TableOperations parentTableOperations = parentConnector.tableOperations();

    final Path localWorkDir = CopyTool.getPath(localMergeFileImportDir, childTableName);
    final Path hdfsBaseWorkDir = CopyTool.getPath(baseImportDir, childTableName);

    CopyTool.copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir, conf);

    final Path files = CopyTool.getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = CopyTool.getPath(hdfsBaseWorkDir.toString(), "failures");
    final FileSystem fs = FileSystem.get(conf);
    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    fs.setPermission(hdfsBaseWorkDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    parentTableOperations.importDirectory(tempChildTable, files.toString(), failures.toString(), false);

    AccumuloRyaUtils.printTablePretty(tempChildTable, conf);
}