Example usage for org.apache.hadoop.fs FileSystem setOwner

List of usage examples for org.apache.hadoop.fs FileSystem setOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setOwner.

Prototype

public void setOwner(Path p, String username, String groupname) throws IOException 

Source Link

Document

Set owner of a path (i.e.

Usage

From source file:org.apache.falcon.resource.EntityManagerJerseyTest.java

License:Apache License

private List<Path> createTestData() throws Exception {
    List<Path> list = new ArrayList<Path>();
    FileSystem fs = cluster.getFileSystem();
    fs.mkdirs(new Path("/user/guest"));
    fs.setOwner(new Path("/user/guest"), REMOTE_USER, "users");

    DateFormat formatter = new SimpleDateFormat("yyyy/MM/dd/HH/mm");
    formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
    Date date = new Date(System.currentTimeMillis() + 3 * 3600000);
    Path path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();/*w  w w . jav  a  2 s  .  com*/
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    new FsShell(cluster.getConf())
            .run(new String[] { "-chown", "-R", "guest:users", "/examples/input-data/rawLogs" });
    return list;
}

From source file:org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils.java

License:Apache License

/**
 * Creates a staging directory with the permission as in source directory.
 * @param fs filesystem object//from w w  w .  j  av a2s . com
 * @param destination staging directory location
 * @param conversionEntity conversion entity used to get source directory permissions
 * @param workUnit workunit
 */
public static void createStagingDirectory(FileSystem fs, String destination,
        HiveProcessingEntity conversionEntity, WorkUnitState workUnit) {
    /*
     * Create staging data location with the same permissions as source data location
     *
     * Note that hive can also automatically create the non-existing directories but it does not
     * seem to create it with the desired permissions.
     * According to hive docs permissions for newly created directories/files can be controlled using uMask like,
     *
     * SET hive.warehouse.subdir.inherit.perms=false;
     * SET fs.permissions.umask-mode=022;
     * Upon testing, this did not work
     */
    Path destinationPath = new Path(destination);
    try {
        FsPermission permission;
        String group = null;
        if (conversionEntity.getTable().getDataLocation() != null) {
            FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
            permission = sourceDataFileStatus.getPermission();
            group = sourceDataFileStatus.getGroup();
        } else {
            permission = FsPermission.getDefault();
        }

        if (!fs.mkdirs(destinationPath, permission)) {
            throw new RuntimeException(
                    String.format("Failed to create path %s with permissions %s", destinationPath, permission));
        } else {
            fs.setPermission(destinationPath, permission);
            // Set the same group as source
            if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP,
                    DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
                fs.setOwner(destinationPath, null, group);
            }
            log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission,
                    group));
        }
    } catch (IOException e) {
        Throwables.propagate(e);
    }
}

From source file:org.apache.hcatalog.cli.HCatDriver.java

License:Apache License

private int setFSPermsNGrp(SessionState ss) {

    Configuration conf = ss.getConf();

    String tblName = conf.get(HCatConstants.HCAT_CREATE_TBL_NAME, "");
    if (tblName.isEmpty()) {
        tblName = conf.get("import.destination.table", "");
        conf.set("import.destination.table", "");
    }/*from   ww w. j  av  a 2s . c o m*/
    String dbName = conf.get(HCatConstants.HCAT_CREATE_DB_NAME, "");
    String grp = conf.get(HCatConstants.HCAT_GROUP, null);
    String permsStr = conf.get(HCatConstants.HCAT_PERMS, null);

    if (tblName.isEmpty() && dbName.isEmpty()) {
        // it wasn't create db/table
        return 0;
    }

    if (null == grp && null == permsStr) {
        // there were no grp and perms to begin with.
        return 0;
    }

    FsPermission perms = FsPermission.valueOf(permsStr);

    if (!tblName.isEmpty()) {
        Hive db = null;
        try {
            db = Hive.get();
            Table tbl = db.getTable(tblName);
            Path tblPath = tbl.getPath();

            FileSystem fs = tblPath.getFileSystem(conf);
            if (null != perms) {
                fs.setPermission(tblPath, perms);
            }
            if (null != grp) {
                fs.setOwner(tblPath, null, grp);
            }
            return 0;

        } catch (Exception e) {
            ss.err.println(String.format("Failed to set permissions/groups on TABLE: <%s> %s", tblName,
                    e.getMessage()));
            try { // We need to drop the table.
                if (null != db) {
                    db.dropTable(tblName);
                }
            } catch (HiveException he) {
                ss.err.println(String.format(
                        "Failed to drop TABLE <%s> after failing to set permissions/groups on it. %s", tblName,
                        e.getMessage()));
            }
            return 1;
        }
    } else {
        // looks like a db operation
        if (dbName.isEmpty() || dbName.equals(MetaStoreUtils.DEFAULT_DATABASE_NAME)) {
            // We dont set perms or groups for default dir.
            return 0;
        } else {
            try {
                Hive db = Hive.get();
                Path dbPath = new Warehouse(conf).getDatabasePath(db.getDatabase(dbName));
                FileSystem fs = dbPath.getFileSystem(conf);
                if (perms != null) {
                    fs.setPermission(dbPath, perms);
                }
                if (null != grp) {
                    fs.setOwner(dbPath, null, grp);
                }
                return 0;
            } catch (Exception e) {
                ss.err.println(String.format("Failed to set permissions and/or group on DB: <%s> %s", dbName,
                        e.getMessage()));
                try {
                    Hive.get().dropDatabase(dbName);
                } catch (Exception e1) {
                    ss.err.println(String.format(
                            "Failed to drop DB <%s> after failing to set permissions/group on it. %s", dbName,
                            e1.getMessage()));
                }
                return 1;
            }
        }
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.delegate.HadoopIgfsSecondaryFileSystemDelegateImpl.java

License:Apache License

/** {@inheritDoc} */
@Nullable/*from   w  ww .  j  av a 2  s  . com*/
@Override
public IgfsFile update(IgfsPath path, Map<String, String> props) {
    HadoopIgfsProperties props0 = new HadoopIgfsProperties(props);

    final FileSystem fileSys = fileSystemForUser();

    Path hadoopPath = convert(path);

    try {
        if (!fileSys.exists(hadoopPath))
            return null;

        if (props0.userName() != null || props0.groupName() != null)
            fileSys.setOwner(hadoopPath, props0.userName(), props0.groupName());

        if (props0.permission() != null)
            fileSys.setPermission(hadoopPath, props0.permission());
    } catch (IOException e) {
        throw handleSecondaryFsError(e, "Failed to update file properties [path=" + path + "]");
    }

    return info(path);
}

From source file:org.apache.ivory.resource.EntityManagerJerseyTest.java

License:Apache License

private List<Path> createTestData() throws Exception {
    List<Path> list = new ArrayList<Path>();
    Configuration conf = new Configuration();
    conf.set("fs.default.name", "hdfs://localhost:8020");
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(new Path("/user/guest"));
    fs.setOwner(new Path("/user/guest"), "guest", "users");

    DateFormat formatter = new SimpleDateFormat("yyyy/MM/dd/HH/mm");
    formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
    Date date = new Date(System.currentTimeMillis() + 3 * 3600000);
    Path path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();/*  w w  w .  ja va 2s  . c o m*/
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    date = new Date(date.getTime() - 3600000);
    path = new Path("/examples/input-data/rawLogs/" + formatter.format(date) + "/file");
    list.add(path);
    fs.create(path).close();
    new FsShell(conf).run(new String[] { "-chown", "-R", "guest:users", "/examples/input-data/rawLogs" });
    return list;
}

From source file:org.apache.nifi.processors.hadoop.AbstractPutHDFSRecord.java

License:Apache License

/**
 * Changes the ownership of the given file.
 *
 * @param fileSystem the filesystem where the file exists
 * @param path the file to change ownership on
 * @param remoteOwner the new owner for the file
 * @param remoteGroup the new group for the file
 *///from  w  w  w  .  j  ava 2s  .  co m
protected void changeOwner(final FileSystem fileSystem, final Path path, final String remoteOwner,
        final String remoteGroup) {
    try {
        // Change owner and group of file if configured to do so
        if (remoteOwner != null || remoteGroup != null) {
            fileSystem.setOwner(path, remoteOwner, remoteGroup);
        }
    } catch (Exception e) {
        getLogger().warn("Could not change owner or group of {} on due to {}", new Object[] { path, e });
    }
}

From source file:org.apache.nifi.processors.hadoop.MoveHDFS.java

License:Apache License

protected void changeOwner(final ProcessContext context, final FileSystem hdfs, final Path name) {
    try {//w  w w  .  j av  a  2  s .com
        // Change owner and group of file if configured to do so
        String owner = context.getProperty(REMOTE_OWNER).getValue();
        String group = context.getProperty(REMOTE_GROUP).getValue();
        if (owner != null || group != null) {
            hdfs.setOwner(name, owner, group);
        }
    } catch (Exception e) {
        getLogger().warn("Could not change owner or group of {} on HDFS due to {}",
                new Object[] { name, e.getMessage() }, e);
    }
}

From source file:org.apache.nifi.processors.hadoop.PutHDFS.java

License:Apache License

protected void changeOwner(final ProcessContext context, final FileSystem hdfs, final Path name,
        final FlowFile flowFile) {
    try {//from w w w.ja  v a  2s.  c  o  m
        // Change owner and group of file if configured to do so
        String owner = context.getProperty(REMOTE_OWNER).evaluateAttributeExpressions(flowFile).getValue();
        String group = context.getProperty(REMOTE_GROUP).evaluateAttributeExpressions(flowFile).getValue();

        owner = owner == null || owner.isEmpty() ? null : owner;
        group = group == null || group.isEmpty() ? null : group;

        if (owner != null || group != null) {
            hdfs.setOwner(name, owner, group);
        }
    } catch (Exception e) {
        getLogger().warn("Could not change owner or group of {} on HDFS due to {}", new Object[] { name, e });
    }
}

From source file:org.apache.oozie.action.hadoop.FsActionExecutor.java

License:Apache License

private void doFsOperation(String op, FileSystem fs, Path p, Map<String, String> argsMap)
        throws ActionExecutorException, IOException {
    if (op.equals("chmod")) {
        String permissions = argsMap.get("permissions");
        FsPermission newFsPermission = createShortPermission(permissions, p);
        fs.setPermission(p, newFsPermission);
    } else if (op.equals("chgrp")) {
        String user = argsMap.get("user");
        String group = argsMap.get("group");
        fs.setOwner(p, user, group);
    }// w  w  w  .j  ava2s.c om
}

From source file:org.apache.oozie.action.hadoop.TestFsActionExecutor.java

License:Apache License

public void testChgrp() throws Exception {
    FsActionExecutor ae = new FsActionExecutor();
    FileSystem fs = getFileSystem();

    Path path = new Path(getFsTestCaseDir(), "dir");
    Path child = new Path(path, "child");
    Path grandchild = new Path(child, "grandchild");
    fs.mkdirs(grandchild);//  w  ww. ja va 2s.c om
    String testUser = getTestUser();
    String testGroup = getTestGroup();
    String testGroup2 = getTestGroup2();

    fs.setOwner(path, testUser, testGroup);
    fs.setOwner(child, testUser, testGroup);
    fs.setOwner(grandchild, testUser, testGroup);
    assertEquals(testGroup, fs.getFileStatus(path).getGroup().toString());
    assertEquals(testGroup, fs.getFileStatus(child).getGroup().toString());
    assertEquals(testGroup, fs.getFileStatus(grandchild).getGroup().toString());

    Context context = createContext("<fs/>");

    // Test case where dir-files=false, recursive=false
    ae.chgrp(context, null, null, path, testUser, testGroup2, false, false);
    assertEquals(testGroup2, fs.getFileStatus(path).getGroup().toString());
    assertEquals(testGroup, fs.getFileStatus(child).getGroup().toString());
    assertEquals(testGroup, fs.getFileStatus(grandchild).getGroup().toString());

    // Test case where dir-files=true, recursive=false

    fs.setOwner(path, testUser, testGroup);// revert to testgroup
    fs.setOwner(child, testUser, testGroup);// revert to testgroup
    fs.setOwner(grandchild, testUser, testGroup);// revert to testgroup

    ae.chgrp(context, null, null, path, testUser, testGroup2, true, false);
    assertEquals(testGroup2, fs.getFileStatus(path).getGroup().toString());
    assertEquals(testGroup2, fs.getFileStatus(child).getGroup().toString());
    assertEquals(testGroup, fs.getFileStatus(grandchild).getGroup().toString());

    // Test case where dir-files=true, recursive=true

    fs.setOwner(path, testUser, testGroup);// revert to testgroup
    fs.setOwner(child, testUser, testGroup);// revert to testgroup
    fs.setOwner(grandchild, testUser, testGroup);// revert to testgroup

    ae.chgrp(context, null, null, path, testUser, testGroup2, true, true);
    assertEquals(testGroup2, fs.getFileStatus(path).getGroup().toString());
    assertEquals(testGroup2, fs.getFileStatus(child).getGroup().toString());
    assertEquals(testGroup2, fs.getFileStatus(grandchild).getGroup().toString());
}