Example usage for org.apache.hadoop.fs FileStatus getOwner

List of usage examples for org.apache.hadoop.fs FileStatus getOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getOwner.

Prototype

public String getOwner() 

Source Link

Document

Get the owner of the file.

Usage

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

protected Map<String, String> getProperties(String path, FileStatus stat) throws RemoteException {
    Map<String, String> prop = new LinkedHashMap<String, String>();
    try {/* w w  w.java 2  s .com*/
        if (stat == null) {
            logger.debug("File status not available for " + path);
            return null;
        } else {
            if (stat.isDir()) {
                prop.put(key_type, "directory");
                prop.put(key_children, "true");
            } else {
                prop.put(key_type, "file");
                prop.put(key_children, "false");
                double res = stat.getBlockSize();
                boolean end = res < 1024;
                int pow = 0;
                while (!end) {
                    res /= 1024;
                    ++pow;
                    end = res < 1024;
                }
                DecimalFormat df = new DecimalFormat();
                df.setMaximumFractionDigits(1);
                String size = df.format(res);
                if (pow == 1) {
                    size += "K";
                } else if (pow == 2) {
                    size += "M";
                } else if (pow == 3) {
                    size += "G";
                } else if (pow == 4) {
                    size += "T";
                } else if (pow == 5) {
                    size += "P";
                } else if (pow == 6) {
                    size += "E";
                } else if (pow == 7) {
                    size += "Z";
                } else if (pow == 8) {
                    size += "Y";
                }

                prop.put(key_size, size);
            }
        }
        prop.put(key_owner, stat.getOwner());
        prop.put(key_group, stat.getGroup());
        prop.put(key_permission, stat.getPermission().toString());

        // fs.close();
    } catch (Exception e) {
        logger.error("Not expected exception: " + e);
        logger.error(e.getMessage());
    }
    logger.debug("Properties of " + path + ": " + prop.toString());
    return prop;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change Ownership of a Path//from  ww w.  j a  va2s. c o  m
 * 
 * @param path
 * @param owner
 * @param group
 * @param recursive
 * @return Error Message
 */
protected String changeOwnership(Path path, String owner, String group, boolean recursive) {
    String error = null;
    try {
        FileSystem fs = NameNodeVar.getFS();
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {
            if (recursive) {
                FileStatus[] fsA = fs.listStatus(path);

                for (int i = 0; i < fsA.length && error == null; ++i) {
                    error = changeOwnership(fs, fsA[i].getPath(), owner, group, recursive);
                }
            }
            if (error == null) {
                fs.setOwner(path, owner, group);
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
        // fs.close();
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }

    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change Ownership of a Path/*from  w  w w . java2 s. c  om*/
 * 
 * @param fs
 * @param path
 * @param owner
 * @param group
 * @param recursive
 * @return Error Message
 */
protected String changeOwnership(FileSystem fs, Path path, String owner, String group, boolean recursive) {
    String error = null;
    try {
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {

            if (recursive) {
                FileStatus[] fsA = fs.listStatus(path);

                for (int i = 0; i < fsA.length && error == null; ++i) {
                    error = changeOwnership(fs, fsA[i].getPath(), owner, group, recursive);
                }
            }
            if (error == null) {
                fs.setOwner(path, owner, group);
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
        // fs.close();
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }
    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change the permissions of a path/*from   ww w  . j a  v a 2 s. co  m*/
 * 
 * @param fs
 * @param path
 * @param permission
 * @param recursive
 * @return Error Message
 */
protected String changePermission(FileSystem fs, Path path, String permission, boolean recursive) {
    String error = null;
    try {
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {
            if (recursive) {
                FileStatus[] child = fs.listStatus(path);
                for (int i = 0; i < child.length && error == null; ++i) {
                    error = changePermission(fs, child[i].getPath(), permission, recursive);
                }
            }
            if (error == null) {
                logger.debug("1 ----- path " + path.getName() + " new perms " + permission);
                fs.setPermission(path, new FsPermission(permission));
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }
    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change the permission of a path/*from  ww  w .java2s . c  o  m*/
 * 
 * @param path
 * @param permission
 * @param recursive
 * @return Error Message
 */
protected String changePermission(Path path, String permission, boolean recursive) {
    String error = null;
    try {
        logger.debug("1 " + path.getName());
        FileSystem fs = NameNodeVar.getFS();
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {
            FileStatus[] child = fs.listStatus(path);
            if (recursive) {
                logger.debug("children : " + child.length);
                for (int i = 0; i < child.length && error == null; ++i) {
                    error = changePermission(fs, child[i].getPath(), permission, recursive);
                }
            }
            if (error == null) {
                logger.debug("set permissions  : " + path.toString() + " , "
                        + new FsPermission(permission).toString());
                fs.setPermission(path, new FsPermission(permission));
                logger.debug(getProperties(path.getName()));
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
        // fs.close();
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }
    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.revolutionanalytics.hadoop.hdfs.FileUtils.java

License:Apache License

private static void ls__(FileSystem srcFS, String path, ArrayList<String> lsco, boolean dorecurse)
        throws IOException, FileNotFoundException {
    Path spath = new Path(path);
    FileStatus[] srcs;//from  w  w  w.  ja va 2  s .  c  om
    srcs = srcFS.globStatus(spath);
    if (srcs == null || srcs.length == 0) {
        throw new FileNotFoundException("Cannot access " + path + ": No such file or directory.");
    }
    if (srcs.length == 1 && srcs[0].isDir())
        srcs = srcFS.listStatus(srcs[0].getPath());
    Calendar c = Calendar.getInstance();
    for (FileStatus status : srcs) {
        StringBuilder sb = new StringBuilder();
        boolean idir = status.isDir();
        String x = idir ? "d" : "-";
        if (dorecurse && idir)
            ls__(srcFS, status.getPath().toUri().getPath(), lsco, dorecurse);
        else {
            sb.append(x);
            sb.append(status.getPermission().toString());
            sb.append(fsep);

            sb.append(status.getOwner());
            sb.append(fsep);

            sb.append(status.getGroup());
            sb.append(fsep);

            sb.append(status.getLen());
            sb.append(fsep);

            Date d = new Date(status.getModificationTime());
            sb.append(formatter.format(d));
            sb.append(fsep);

            sb.append(status.getPath().toUri().getPath());
            lsco.add(sb.toString());
        }
    }
}

From source file:com.ruizhan.hadoop.hdfs.Ls.java

License:Apache License

@Override
protected void processPath(PathData item) throws IOException {
    FileStatus stat = item.stat;
    String line = String.format(lineFormat, (stat.isDirectory() ? "d" : "-"), stat.getPermission(),
            (stat.isFile() ? stat.getReplication() : "-"), stat.getOwner(), stat.getGroup(),
            formatSize(stat.getLen()), dateFormat.format(new Date(stat.getModificationTime())), item);
    out.println(line);/*from w w  w .  j  a  v a 2s .co  m*/
}

From source file:com.ruizhan.hadoop.hdfs.Ls.java

License:Apache License

/**
 * Compute column widths and rebuild the format string
 * @param items to find the max field width for each column
 *///from w ww  .  ja v  a 2s  .  co m
private void adjustColumnWidths(PathData items[]) {
    for (PathData item : items) {
        FileStatus stat = item.stat;
        maxRepl = maxLength(maxRepl, stat.getReplication());
        maxLen = maxLength(maxLen, stat.getLen());
        maxOwner = maxLength(maxOwner, stat.getOwner());
        maxGroup = maxLength(maxGroup, stat.getGroup());
    }

    StringBuilder fmt = new StringBuilder();
    fmt.append("%s%s "); // permission string
    fmt.append("%" + maxRepl + "s ");
    // Do not use '%-0s' as a formatting conversion, since it will throw a
    // a MissingFormatWidthException if it is used in String.format().
    // http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html#intFlags
    fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s");
    fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s");
    fmt.append("%" + maxLen + "s ");
    fmt.append("%s %s"); // mod time & path
    lineFormat = fmt.toString();
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutorIT.java

License:Apache License

/**
 * Validate that target path have expected ownership.
 *//*from  w w w.  j  a v a2s  .c  o  m*/
private void assertOwnership(Path path, String user, String group) throws IOException {
    Assert.assertTrue("File doesn't exists: " + path, fs.exists(path));
    Assert.assertTrue("Not a file: " + path, fs.isFile(path));

    FileStatus[] statuses = fs.listStatus(path);
    Assert.assertEquals(1, statuses.length);

    FileStatus status = statuses[0];
    Assert.assertNotNull(status);
    Assert.assertEquals(user, status.getOwner());
    Assert.assertEquals(group, status.getGroup());
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.spooler.HdfsFile.java

License:Apache License

@SuppressWarnings("unchecked")
public Map<String, Object> getFileMetadata() throws IOException {
    FileStatus file = fs.getFileStatus(filePath);
    Map<String, Object> metadata = new HashMap<>();
    metadata.put(HeaderAttributeConstants.FILE_NAME, file.getPath().getName());
    metadata.put(HeaderAttributeConstants.FILE, file.getPath().toUri().getPath());
    metadata.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, file.getModificationTime());
    metadata.put(HeaderAttributeConstants.LAST_ACCESS_TIME, file.getAccessTime());
    metadata.put(HeaderAttributeConstants.IS_DIRECTORY, file.isDirectory());
    metadata.put(HeaderAttributeConstants.IS_SYMBOLIC_LINK, file.isSymlink());
    metadata.put(HeaderAttributeConstants.SIZE, file.getLen());
    metadata.put(HeaderAttributeConstants.OWNER, file.getOwner());
    metadata.put(HeaderAttributeConstants.GROUP, file.getGroup());
    metadata.put(HeaderAttributeConstants.BLOCK_SIZE, file.getBlockSize());
    metadata.put(HeaderAttributeConstants.REPLICATION, file.getReplication());
    metadata.put(HeaderAttributeConstants.IS_ENCRYPTED, file.isEncrypted());

    FsPermission permission = file.getPermission();
    if (permission != null) {
        metadata.put(PERMISSIONS, permission.toString());
    }//from w ww . j av a  2s .  co m

    return metadata;
}