Example usage for org.apache.hadoop.fs FileStatus getOwner

List of usage examples for org.apache.hadoop.fs FileStatus getOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getOwner.

Prototype

public String getOwner() 

Source Link

Document

Get the owner of the file.

Usage

From source file:io.aos.hdfs.ShowFileStatusTest.java

License:Apache License

@Test
public void fileStatusForDirectory() throws IOException {
    Path dir = new Path("/dir");
    FileStatus stat = fs.getFileStatus(dir);
    assertThat(stat.getPath().toUri().getPath(), is("/dir"));
    assertThat(stat.isDir(), is(true));//w  ww .  ja  v a2  s. com
    assertThat(stat.getLen(), is(0L));
    assertThat(stat.getModificationTime(), is(lessThanOrEqualTo(System.currentTimeMillis())));
    assertThat(stat.getReplication(), is((short) 0));
    assertThat(stat.getBlockSize(), is(0L));
    assertThat(stat.getOwner(), is("tom"));
    assertThat(stat.getGroup(), is("supergroup"));
    assertThat(stat.getPermission().toString(), is("rwxr-xr-x"));
}

From source file:io.apigee.lembos.node.types.DistributedCacheWrap.java

License:Apache License

/**
 * Java wrapper for {@link DistributedCache#getFileStatus(Configuration, URI)}.
 *
 * @param ctx the JavaScript context//from ww  w.ja  v  a  2s . c  om
 * @param thisObj the 'this' object
 * @param args the function arguments
 * @param func the function being called
 *
 * @return array of archive class paths
 */
@JSStaticFunction
public static Object getFileStatus(final Context ctx, final Scriptable thisObj, final Object[] args,
        final Function func) {
    final Object arg0 = args.length >= 1 ? args[0] : Undefined.instance;
    final Object arg1 = args.length >= 2 ? args[1] : Undefined.instance;

    if (args.length < 2) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.TWO_ARGS_EXPECTED);
    } else if (!JavaScriptUtils.isDefined(arg0)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_REQUIRED);
    } else if (!JavaScriptUtils.isDefined(arg1)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.SECOND_ARG_REQUIRED);
    } else if (!(arg0 instanceof ConfigurationWrap)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_MUST_BE_CONF);
    }

    final URI hdfsUri = URI.create(arg1.toString());
    FileStatus status;

    try {
        status = DistributedCache.getFileStatus(((ConfigurationWrap) arg0).getConf(), hdfsUri);
    } catch (IOException e) {
        throw Utils.makeError(ctx, thisObj, e.getMessage());
    }

    if (status == null) {
        throw Utils.makeError(ctx, thisObj, "Unable to get file status for HDFS uri: " + hdfsUri.toString());
    }

    final Scriptable jsStatus = ctx.newObject(thisObj);

    ScriptableObject.defineProperty(jsStatus, "accessTime", status.getAccessTime(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "blockSize", status.getBlockSize(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "group", status.getGroup(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "len", status.getLen(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "modificationTime", status.getModificationTime(),
            ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "owner", status.getOwner(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "path", status.getPath().toString(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "permission", status.getPermission().toString(),
            ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "replication", status.getReplication(),
            ScriptableObject.READONLY);

    return jsStatus;
}

From source file:it.crs4.pydoop.mapreduce.pipes.TaskLog.java

License:Apache License

/**
 * Obtain the owner of the log dir. This is
 * determined by checking the job's log directory.
 *//*  ww w. j av a 2  s.  c om*/
static String obtainLogDirOwner(TaskAttemptID taskid) throws IOException {
    Configuration conf = new Configuration();
    FileSystem raw = FileSystem.getLocal(conf).getRaw();
    Path jobLogDir = new Path(getJobDir(taskid.getJobID()).getAbsolutePath());
    FileStatus jobStat = raw.getFileStatus(jobLogDir);
    return jobStat.getOwner();
}

From source file:org.apache.accumulo.server.util.ChangeSecret.java

License:Apache License

private static void checkHdfsAccessPermissions(FileStatus stat, FsAction mode) throws Exception {
    FsPermission perm = stat.getPermission();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    String user = ugi.getShortUserName();
    List<String> groups = Arrays.asList(ugi.getGroupNames());
    if (user.equals(stat.getOwner())) {
        if (perm.getUserAction().implies(mode)) {
            return;
        }/*  w w w . j a v a2s .com*/
    } else if (groups.contains(stat.getGroup())) {
        if (perm.getGroupAction().implies(mode)) {
            return;
        }
    } else {
        if (perm.getOtherAction().implies(mode)) {
            return;
        }
    }
    throw new Exception(String.format("Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user,
            stat.getPath(), stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}

From source file:org.apache.ambari.view.filebrowser.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL.//  w  w  w.  ja  v a  2 s .com
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */

public Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString());
    json.put("replication", status.getReplication());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi));
    json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi));
    json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi));
    return json;
}

From source file:org.apache.ambari.view.filebrowser.HdfsApi.java

License:Apache License

public static boolean checkAccessPermissions(FileStatus stat, FsAction mode, UserGroupInformation ugi) {
    FsPermission perm = stat.getPermission();
    String user = ugi.getShortUserName();
    List<String> groups = Arrays.asList(ugi.getGroupNames());
    if (user.equals(stat.getOwner())) {
        if (perm.getUserAction().implies(mode)) {
            return true;
        }//from   ww  w .ja  va2  s  .  c  om
    } else if (groups.contains(stat.getGroup())) {
        if (perm.getGroupAction().implies(mode)) {
            return true;
        }
    } else {
        if (perm.getOtherAction().implies(mode)) {
            return true;
        }
    }
    return false;
}

From source file:org.apache.ambari.view.hive.utils.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL.// ww w .  ja  va2  s .co m
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */

public static Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", status.getPath().toString());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    return json;
}

From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL./*from w ww .j a  v  a2s  .  c  o  m*/
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */
public Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString());
    json.put("replication", status.getReplication());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi));
    json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi));
    json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi));
    return json;
}

From source file:org.apache.camel.component.hdfs2.HdfsConsumer.java

License:Apache License

protected int doPoll() throws Exception {
    class ExcludePathFilter implements PathFilter {
        public boolean accept(Path path) {
            return !(path.toString().endsWith(config.getOpenedSuffix())
                    || path.toString().endsWith(config.getReadSuffix()));
        }//from w ww.j a  v a 2s.  co m
    }

    int numMessages = 0;

    HdfsInfo info = setupHdfs(false);
    FileStatus fileStatuses[];
    if (info.getFileSystem().isFile(info.getPath())) {
        fileStatuses = info.getFileSystem().globStatus(info.getPath());
    } else {
        Path pattern = info.getPath().suffix("/" + this.config.getPattern());
        fileStatuses = info.getFileSystem().globStatus(pattern, new ExcludePathFilter());
    }

    for (FileStatus status : fileStatuses) {

        if (normalFileIsDirectoryNoSuccessFile(status, info)) {
            continue;
        }

        if (config.getOwner() != null) {
            // must match owner
            if (!config.getOwner().equals(status.getOwner())) {
                if (log.isDebugEnabled()) {
                    log.debug("Skipping file: {} as not matching owner: {}", status.getPath().toString(),
                            config.getOwner());
                }
                continue;
            }
        }

        try {
            this.rwlock.writeLock().lock();
            this.istream = HdfsInputStream.createInputStream(status.getPath().toString(), this.config);
        } finally {
            this.rwlock.writeLock().unlock();
        }

        try {
            Holder<Object> key = new Holder<Object>();
            Holder<Object> value = new Holder<Object>();
            while (this.istream.next(key, value) != 0) {
                Exchange exchange = this.getEndpoint().createExchange();
                Message message = new DefaultMessage();
                String fileName = StringUtils.substringAfterLast(status.getPath().toString(), "/");
                message.setHeader(Exchange.FILE_NAME, fileName);
                if (key.value != null) {
                    message.setHeader(HdfsHeader.KEY.name(), key.value);
                }
                message.setBody(value.value);
                exchange.setIn(message);

                log.debug("Processing file {}", fileName);
                try {
                    processor.process(exchange);
                } catch (Exception e) {
                    exchange.setException(e);
                }

                // in case of unhandled exceptions then let the exception handler handle them
                if (exchange.getException() != null) {
                    getExceptionHandler().handleException(exchange.getException());
                }

                numMessages++;
            }
        } finally {
            IOHelper.close(istream, "input stream", log);
        }
    }

    return numMessages;
}

From source file:org.apache.drill.exec.expr.fn.registry.RemoteFunctionRegistry.java

License:Apache License

/**
 * Concatenates udf are with root directory.
 * Creates udf area, if area does not exist.
 * Checks if area exists and is directory, if it is writable for current user,
 * throws {@link DrillRuntimeException} otherwise.
 *
 * @param fs file system where area should be created or checked
 * @param root root directory/*ww w  .  j av a 2s . c om*/
 * @param directory directory path
 * @return path to area
 */
private Path createArea(FileSystem fs, String root, String directory) {
    Path path = new Path(new File(root, directory).toURI().getPath());
    String fullPath = path.toUri().getPath();
    try {
        fs.mkdirs(path);
        Preconditions.checkState(fs.exists(path), "Area [%s] must exist", fullPath);
        FileStatus fileStatus = fs.getFileStatus(path);
        Preconditions.checkState(fileStatus.isDirectory(), "Area [%s] must be a directory", fullPath);
        FsPermission permission = fileStatus.getPermission();
        // It is considered that process user has write rights on directory if:
        // 1. process user is owner of the directory and has write rights
        // 2. process user is in group that has write rights
        // 3. any user has write rights
        Preconditions.checkState(
                (ImpersonationUtil.getProcessUserName().equals(fileStatus.getOwner())
                        && permission.getUserAction().implies(FsAction.WRITE))
                        || (Sets.newHashSet(ImpersonationUtil.getProcessUserGroupNames()).contains(
                                fileStatus.getGroup()) && permission.getGroupAction().implies(FsAction.WRITE))
                        || permission.getOtherAction().implies(FsAction.WRITE),
                "Area [%s] must be writable and executable for application user", fullPath);
    } catch (Exception e) {
        if (e instanceof DrillRuntimeException) {
            throw (DrillRuntimeException) e;
        }
        // throws
        DrillRuntimeException.format(e, "Error during udf area creation [%s] on file system [%s]", fullPath,
                fs.getUri());
    }
    logger.info("Created remote udf area [{}] on file system [{}]", fullPath, fs.getUri());
    return path;
}