Example usage for org.apache.hadoop.fs FileStatus isDirectory

List of usage examples for org.apache.hadoop.fs FileStatus isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus isDirectory.

Prototype

public boolean isDirectory() 

Source Link

Document

Is this a directory?

Usage

From source file:org.apache.accumulo.master.tableOps.bulkVer1.BulkImport.java

License:Apache License

private String prepareBulkImport(Master master, final VolumeManager fs, String dir, Table.ID tableId)
        throws Exception {
    final Path bulkDir = createNewBulkDir(fs, tableId);

    MetadataTableUtil.addBulkLoadInProgressFlag(master,
            "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());

    Path dirPath = new Path(dir);
    FileStatus[] mapFiles = fs.listStatus(dirPath);

    final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();

    int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
    SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move");
    List<Future<Exception>> results = new ArrayList<>();

    for (FileStatus file : mapFiles) {
        final FileStatus fileStatus = file;
        results.add(workers.submit(() -> {
            try {
                String sa[] = fileStatus.getPath().getName().split("\\.");
                String extension = "";
                if (sa.length > 1) {
                    extension = sa[sa.length - 1];

                    if (!FileOperations.getValidExtensions().contains(extension)) {
                        log.warn("{} does not have a valid extension, ignoring", fileStatus.getPath());
                        return null;
                    }//  w  w w .ja  v  a  2  s. c o m
                } else {
                    // assume it is a map file
                    extension = Constants.MAPFILE_EXTENSION;
                }

                if (extension.equals(Constants.MAPFILE_EXTENSION)) {
                    if (!fileStatus.isDirectory()) {
                        log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                        return null;
                    }

                    if (fileStatus.getPath().getName().equals("_logs")) {
                        log.info("{} is probably a log directory from a map/reduce task, skipping",
                                fileStatus.getPath());
                        return null;
                    }
                    try {
                        FileStatus dataStatus = fs
                                .getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME));
                        if (dataStatus.isDirectory()) {
                            log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                            return null;
                        }
                    } catch (FileNotFoundException fnfe) {
                        log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                        return null;
                    }
                }

                String newName = "I" + namer.getNextName() + "." + extension;
                Path newPath = new Path(bulkDir, newName);
                try {
                    fs.rename(fileStatus.getPath(), newPath);
                    log.debug("Moved {} to {}", fileStatus.getPath(), newPath);
                } catch (IOException E1) {
                    log.error("Could not move: {} {}", fileStatus.getPath(), E1.getMessage());
                }

            } catch (Exception ex) {
                return ex;
            }
            return null;
        }));
    }
    workers.shutdown();
    while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {
    }

    for (Future<Exception> ex : results) {
        if (ex.get() != null) {
            throw ex.get();
        }
    }
    return bulkDir.toString();
}

From source file:org.apache.accumulo.master.tableOps.CleanUp.java

License:Apache License

protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
    for (FileStatus child : fs.listStatus(src)) {
        final String childName = child.getPath().getName();
        final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);

        if (child.isFile()) {
            if (fs.exists(childInDest)) {
                log.warn("File already exists in archive, ignoring. " + childInDest);
            } else {
                fs.rename(childInSrc, childInDest);
            }/*from   w  w w  .j a va2s . com*/
        } else if (child.isDirectory()) {
            if (fs.exists(childInDest)) {
                // Recurse
                merge(fs, childInSrc, childInDest);
            } else {
                fs.rename(childInSrc, childInDest);
            }
        } else {
            // Symlinks shouldn't exist in table directories..
            log.warn("Ignoring archiving of non file/directory: " + child);
        }
    }
}

From source file:org.apache.accumulo.server.fs.VolumeUtil.java

License:Apache License

private static HashSet<String> getFileNames(FileStatus[] filesStatuses) {
    HashSet<String> names = new HashSet<>();
    for (FileStatus fileStatus : filesStatuses)
        if (fileStatus.isDirectory())
            throw new IllegalArgumentException("expected " + fileStatus.getPath() + " to be a file");
        else/*w  w w.  j a  v a  2s. c  om*/
            names.add(fileStatus.getPath().getName());
    return names;
}

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

private static void createDirectories(VolumeManager fs, String... dirs) throws IOException {
    for (String s : dirs) {
        Path dir = new Path(s);
        try {// www  .j  av a 2s .c  om
            FileStatus fstat = fs.getFileStatus(dir);
            if (!fstat.isDirectory()) {
                log.error("FATAL: location " + dir + " exists but is not a directory");
                return;
            }
        } catch (FileNotFoundException fnfe) {
            // attempt to create directory, since it doesn't exist
            if (!fs.mkdirs(dir)) {
                log.error("FATAL: unable to create directory " + dir);
                return;
            }
        }
    }
}

From source file:org.apache.accumulo.server.util.ChangeSecret.java

License:Apache License

private static void checkHdfsAccessPermissions(FileStatus stat, FsAction mode) throws Exception {
    FsPermission perm = stat.getPermission();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    String user = ugi.getShortUserName();
    List<String> groups = Arrays.asList(ugi.getGroupNames());
    if (user.equals(stat.getOwner())) {
        if (perm.getUserAction().implies(mode)) {
            return;
        }//from  ww  w  .  jav a  2s. c o  m
    } else if (groups.contains(stat.getGroup())) {
        if (perm.getGroupAction().implies(mode)) {
            return;
        }
    } else {
        if (perm.getOtherAction().implies(mode)) {
            return;
        }
    }
    throw new Exception(String.format("Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user,
            stat.getPath(), stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}

From source file:org.apache.accumulo.test.performance.scan.CollectTabletStats.java

License:Apache License

private static void reportHdfsBlockLocations(List<FileRef> files) throws Exception {
    VolumeManager fs = VolumeManagerImpl.get();

    System.out.println("\t\tFile block report : ");
    for (FileRef file : files) {
        FileStatus status = fs.getFileStatus(file.path());

        if (status.isDirectory()) {
            // assume it is a map file
            status = fs.getFileStatus(new Path(file + "/data"));
        }/*from  w  w  w . ja  v  a  2 s  . c  o m*/
        FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
        BlockLocation[] locs = ns.getFileBlockLocations(status, 0, status.getLen());

        System.out.println("\t\t\tBlocks for : " + file);

        for (BlockLocation blockLocation : locs) {
            System.out.printf("\t\t\t\t offset : %,13d  hosts :", blockLocation.getOffset());
            for (String host : blockLocation.getHosts()) {
                System.out.print(" " + host);
            }
            System.out.println();
        }
    }

    System.out.println();

}

From source file:org.apache.ambari.view.filebrowser.DownloadService.java

License:Apache License

/**
 * Download ZIP of passed file list/*from  w  w w  .  j av  a  2  s  .  c o m*/
 * @param request download request
 * @return response with zip
 */
@POST
@Path("/zip")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_OCTET_STREAM)
public Response downloadGZip(final DownloadRequest request) {
    try {
        StreamingOutput result = new StreamingOutput() {
            public void write(OutputStream output) throws IOException, ServiceFormattedException {
                ZipOutputStream zip = new ZipOutputStream(output);
                try {
                    HdfsApi api = getApi(context);
                    Queue<String> files = new LinkedList<String>();
                    for (String file : request.entries) {
                        files.add(file);
                    }
                    while (!files.isEmpty()) {
                        String path = files.poll();
                        FileStatus status = api.getFileStatus(path);
                        if (status.isDirectory()) {
                            FileStatus[] subdir;
                            try {
                                subdir = api.listdir(path);
                            } catch (AccessControlException ex) {
                                logger.error("Error zipping directory " + path.substring(1)
                                        + "/ (directory ignored)" + ": " + ex.getMessage());
                                continue;
                            }
                            for (FileStatus file : subdir) {
                                files.add(org.apache.hadoop.fs.Path
                                        .getPathWithoutSchemeAndAuthority(file.getPath()).toString());
                            }
                            zipDirectory(zip, path);
                        } else {
                            zipFile(zip, path);
                        }
                    }
                } catch (Exception ex) {
                    logger.error("Error occurred: " + ex.getMessage());
                    throw new ServiceFormattedException(ex.getMessage(), ex);
                } finally {
                    zip.close();
                }
            }
        };
        return Response.ok(result).header("Content-Disposition", "inline; filename=\"hdfs.zip\"").build();
    } catch (WebApplicationException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new ServiceFormattedException(ex.getMessage(), ex);
    }
}

From source file:org.apache.ambari.view.filebrowser.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL./*w w  w.j ava2s.c  om*/
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */

public Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString());
    json.put("replication", status.getReplication());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi));
    json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi));
    json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi));
    return json;
}

From source file:org.apache.ambari.view.hive.utils.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL.//from ww w .  j a v  a 2  s. c om
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */

public static Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", status.getPath().toString());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    return json;
}

From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java

License:Apache License

/**
 * Converts a Hadoop <code>FileStatus</code> object into a JSON array object.
 * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the
 * specified URL./* www  .j  a  v a  2s. c  o m*/
 * <p/>
 *
 * @param status
 *          Hadoop file status.
 * @return The JSON representation of the file status.
 */
public Map<String, Object> fileStatusToJSON(FileStatus status) {
    Map<String, Object> json = new LinkedHashMap<String, Object>();
    json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString());
    json.put("replication", status.getReplication());
    json.put("isDirectory", status.isDirectory());
    json.put("len", status.getLen());
    json.put("owner", status.getOwner());
    json.put("group", status.getGroup());
    json.put("permission", permissionToString(status.getPermission()));
    json.put("accessTime", status.getAccessTime());
    json.put("modificationTime", status.getModificationTime());
    json.put("blockSize", status.getBlockSize());
    json.put("replication", status.getReplication());
    json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi));
    json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi));
    json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi));
    return json;
}