Example usage for org.apache.hadoop.fs.permission AclEntry parseAclSpec

List of usage examples for org.apache.hadoop.fs.permission AclEntry parseAclSpec

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission AclEntry parseAclSpec.

Prototype

public static List<AclEntry> parseAclSpec(String aclSpec, boolean includePermission) 

Source Link

Document

Parses a string representation of an ACL spec into a list of AclEntry objects.

Usage

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();//from www  .  j av  a  2  s  .  co  m
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs((PrivilegedExceptionAction<Void>) () -> {
                Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                LOG.info("Working on file: " + workingFile);

                // Create empty file if configured
                if (actions.taskType == TaskType.CREATE_EMPTY_FILE) {
                    ensureDirectoryExists(fs, workingFile.getParent());
                    if (!fs.createNewFile(workingFile)) {
                        throw new IOException("Can't create file (probably already exists): " + workingFile);
                    }
                }

                if (actions.taskType == TaskType.CHANGE_EXISTING_FILE
                        && (actions.shouldMoveFile || actions.shouldRename)) {
                    Path newPath = workingFile.getParent();
                    String newName = workingFile.getName();
                    if (actions.shouldMoveFile) {
                        newPath = new Path(evaluate(variables, "newLocation", actions.newLocation));
                    }
                    if (actions.shouldRename) {
                        newName = evaluate(variables, "newName", actions.newName);
                    }

                    Path destinationFile = new Path(newPath, newName);
                    ensureDirectoryExists(fs, newPath);

                    LOG.debug("Renaming to: {}", destinationFile);
                    if (!fs.rename(workingFile, destinationFile)) {
                        throw new IOException(
                                Utils.format("Can't rename '{}' to '{}''", workingFile, destinationFile));
                    }
                    workingFile = destinationFile;
                }

                if (actions.taskType.isOneOf(TaskType.CHANGE_EXISTING_FILE, TaskType.CREATE_EMPTY_FILE)) {
                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = HdfsUtils.parseFsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }
                }

                if (actions.taskType == TaskType.REMOVE_FILE) {
                    fs.delete(workingFile, true);
                }

                // Issue event with the final file name (e.g. the renamed one if applicable)
                actions.taskType.getEventCreator().create(getContext()).with("filepath", workingFile.toString())
                        .with("filename", workingFile.getName()).createAndSend();

                LOG.debug("Done changing metadata on file: {}", workingFile);
                return null;
            });
        } catch (Throwable e) {
            // Hadoop libraries will wrap any non InterruptedException, RuntimeException, Error or IOException to UndeclaredThrowableException,
            // so we manually unwrap it here and properly propagate it to user.
            if (e instanceof UndeclaredThrowableException) {
                e = e.getCause();
            }
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutorIT.java

License:Apache License

@Test
public void testSetAcls() throws Exception {
    HdfsConnectionConfig conn = new HdfsConnectionConfig();
    conn.hdfsConfDir = confDir;//from   www  . ja  v a 2 s  .co m

    HdfsActionsConfig actions = new HdfsActionsConfig();
    actions.filePath = "${record:value('/path')}";
    actions.shouldSetAcls = true;
    actions.newAcls = "${record:value('/acls')}";

    HdfsMetadataExecutor executor = new HdfsMetadataExecutor(conn, actions);

    TargetRunner runner = new TargetRunner.Builder(HdfsMetadataDExecutor.class, executor)
            .setOnRecordError(OnRecordError.STOP_PIPELINE).build();
    runner.runInit();

    runner.runWrite(ImmutableList.of(getTestRecord()));
    assertEvent(runner.getEventRecords(), inputPath);
    runner.runDestroy();

    assertFile(inputPath, "CONTENT");
    assertPermissions(inputPath, "760");
    // From some reason HDFS returns group in the ACL listing
    assertAcls(inputPath, AclEntry.parseAclSpec("user:sith:rw-,group::r--", true));
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();/* ww w . j  a v a 2s  .  c o m*/
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                    LOG.info("Working on file: " + workingFile);

                    if (actions.shouldMoveFile) {
                        Path destinationFile = new Path(
                                evaluate(variables, "newLocation", actions.newLocation));

                        Path destinationParent = destinationFile.getParent();
                        if (!fs.exists(destinationParent)) {
                            LOG.debug("Creating parent directory for destination file: {}", destinationParent);
                            if (!fs.mkdirs(destinationParent)) {
                                throw new IOException("Can't create directory: " + destinationParent);
                            }
                        }

                        LOG.debug("Renaming to: {}", destinationFile);
                        if (!fs.rename(workingFile, destinationFile)) {
                            throw new IOException("Can't rename file to: " + destinationFile);
                        }
                        workingFile = destinationFile;
                    }

                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = new FsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }

                    // Issue event with the final file name (e.g. the renamed one if applicable)
                    EventRecord event = getContext().createEventRecord("file-changed", 1);
                    event.set(Field.create(Field.Type.MAP, new ImmutableMap.Builder<String, Field>()
                            .put("filepath", Field.create(Field.Type.STRING, workingFile.toString())).build()));
                    getContext().toEvent(event);

                    LOG.debug("Done changing metadata on file: {}", workingFile);
                    return null;
                }
            });
        } catch (Exception e) {
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:org.apache.oozie.action.hadoop.TestGitActionExecutor.java

License:Apache License

public void testAccessKeyACLsSecure() throws Exception {
    final GitActionExecutor ae = new GitActionExecutor();

    final Path testKey = new Path(getAppPath().toString() + "/test_key");
    createTestFile(testKey);/*www.j ava 2  s. com*/
    // set file permissions to be secure -- allowing only the owner to read
    final FileSystem fs = getFileSystem();
    fs.setPermission(testKey, FsPermission.valueOf("-r--------"));
    fs.setAcl(testKey, AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));

    final String repoUrl = "https://github.com/apache/oozie";
    final String keyUrl = testKey.toString();
    final String destDir = "repoDir";
    final String branch = "myBranch";
    final Element actionXml = XmlUtils.parseXml("<git>" + "<resource-manager>" + getJobTrackerUri()
            + "</resource-manager>" + "<name-node>" + getNameNodeUri() + "</name-node>" + "<git-uri>" + repoUrl
            + "</git-uri>" + "<branch>" + branch + "</branch>" + "<key-path>" + keyUrl + "</key-path>"
            + "<destination-uri>" + destDir + "</destination-uri>" + "</git>");

    final XConfiguration protoConf = new XConfiguration();
    protoConf.set(WorkflowAppService.HADOOP_USER, getTestUser());

    final WorkflowJobBean wf = createBaseWorkflow(protoConf, GitActionExecutor.GIT_ACTION_TYPE + "-action");
    final WorkflowActionBean action = (WorkflowActionBean) wf.getActions().get(0);
    action.setType(ae.getType());

    final Context context = new Context(wf, action);
    final Configuration conf = ae.createBaseHadoopConf(context, actionXml);
    try {
        ae.setupActionConf(conf, context, actionXml, getFsTestCaseDir());
    } catch (final ActionExecutorException e) {
        fail("Unexpected exception, could not check ACLs: " + e.getMessage());
    }
}

From source file:org.apache.oozie.action.hadoop.TestGitActionExecutor.java

License:Apache License

public void testAccessKeyACLsInsecure() throws Exception {
    final GitActionExecutor ae = new GitActionExecutor();

    final Path testKey = new Path(getAppPath().toString() + "/test_key");
    createTestFile(testKey);//w w  w  .ja v  a  2 s .  co m
    // set file permissions to be secure -- allowing only the owner to read
    final FileSystem fs = getFileSystem();
    fs.setPermission(testKey, FsPermission.valueOf("-r--------"));
    fs.setAcl(testKey, AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::r--", true));

    final String repoUrl = "https://github.com/apache/oozie";
    final String keyUrl = testKey.toString();
    final String destDir = "repoDir";
    final String branch = "myBranch";
    final Element actionXml = XmlUtils.parseXml("<git>" + "<resource-manager>" + getJobTrackerUri()
            + "</resource-manager>" + "<name-node>" + getNameNodeUri() + "</name-node>" + "<git-uri>" + repoUrl
            + "</git-uri>" + "<branch>" + branch + "</branch>" + "<key-path>" + keyUrl + "</key-path>"
            + "<destination-uri>" + destDir + "</destination-uri>" + "</git>");

    final XConfiguration protoConf = new XConfiguration();
    protoConf.set(WorkflowAppService.HADOOP_USER, getTestUser());

    final WorkflowJobBean wf = createBaseWorkflow(protoConf, GitActionExecutor.GIT_ACTION_TYPE + "-action");
    final WorkflowActionBean action = (WorkflowActionBean) wf.getActions().get(0);
    action.setType(ae.getType());

    final Context context = new Context(wf, action);

    try {
        ae.createBaseHadoopConf(context, actionXml);
    } catch (final Exception e) {
        fail("Unexpected exception, could not create Hadoop configuration with insecure setup: "
                + e.getMessage());
    }
}

From source file:org.trafodion.sql.HBaseAccess.HBaseClient.java

License:Apache License

private boolean updatePermissionForEntries(FileStatus[] entries, String hbaseUser, FileSystem fs)
        throws IOException {
    if (entries == null) {
        return true;
    }//  w w w .  jav a 2 s  . com

    for (FileStatus child : entries) {
        Path path = child.getPath();
        List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true);
        try {
            fs.modifyAclEntries(path, lacl);
        } catch (IOException e) {
            //if failure just log exception and continue
            if (logger.isTraceEnabled())
                logger.trace("[Snapshot Scan] SnapshotScanHelper.updatePermissionForEntries() exception. " + e);
        }
        if (child.isDir()) {
            FileStatus[] files = FSUtils.listStatus(fs, path);
            updatePermissionForEntries(files, hbaseUser, fs);
        }
    }
    return true;
}

From source file:org.trafodion.sql.HBaseAccess.HBaseClient.java

License:Apache License

public boolean setArchivePermissions(String tabName) throws IOException, ServiceException {
    if (logger.isTraceEnabled())
        logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() called. ");
    Path rootDir = FSUtils.getRootDir(config);
    FileSystem myfs = FileSystem.get(rootDir.toUri(), config);
    FileStatus fstatus = myfs.getFileStatus(rootDir);
    String hbaseUser = fstatus.getOwner();
    assert (hbaseUser != null && hbaseUser.length() != 0);
    Path tabArcPath = HFileArchiveUtil.getTableArchivePath(config, TableName.valueOf(tabName));
    if (tabArcPath == null)
        return true;
    List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true);
    try {//  w  w w .  j a v  a 2 s  . c om
        myfs.modifyAclEntries(tabArcPath, lacl);
    } catch (IOException e) {
        //if failure just log exception and continue
        if (logger.isTraceEnabled())
            logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() exception. " + e);
    }
    FileStatus[] files = FSUtils.listStatus(myfs, tabArcPath);
    updatePermissionForEntries(files, hbaseUser, myfs);
    return true;
}