Example usage for org.apache.hadoop.fs FileStatus isDirectory

List of usage examples for org.apache.hadoop.fs FileStatus isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus isDirectory.

Prototype

public boolean isDirectory() 

Source Link

Document

Is this a directory?

Usage

From source file:gobblin.util.AvroUtils.java

License:Apache License

private static void getAllNestedAvroFiles(FileStatus dir, List<FileStatus> files, FileSystem fs)
        throws IOException {
    if (dir.isDirectory()) {
        FileStatus[] filesInDir = fs.listStatus(dir.getPath());
        if (filesInDir != null) {
            for (FileStatus f : filesInDir) {
                getAllNestedAvroFiles(f, files, fs);
            }//from w w w .j  av  a 2s  .c o m
        }
    } else if (dir.getPath().getName().endsWith(AVRO_SUFFIX)) {
        files.add(dir);
    }
}

From source file:gobblin.util.FileListUtils.java

License:Apache License

private static List<FileStatus> listFilesRecursivelyHelper(FileSystem fs, List<FileStatus> files,
        FileStatus fileStatus, PathFilter fileFilter, boolean applyFilterToDirectories)
        throws FileNotFoundException, IOException {
    if (fileStatus.isDirectory()) {
        for (FileStatus status : fs.listStatus(fileStatus.getPath(),
                applyFilterToDirectories ? fileFilter : NO_OP_PATH_FILTER)) {
            if (fileStatus.isDirectory()) {
                listFilesRecursivelyHelper(fs, files, status, fileFilter, applyFilterToDirectories);
            } else {
                files.add(fileStatus);//from   w  w  w  . j a v a 2s.  co m
            }
        }
    } else if (fileFilter.accept(fileStatus.getPath())) {
        files.add(fileStatus);
    }
    return files;
}

From source file:gobblin.util.FileListUtils.java

License:Apache License

private static List<FileStatus> listMostNestedPathRecursivelyHelper(FileSystem fs, List<FileStatus> files,
        FileStatus fileStatus, PathFilter fileFilter) throws IOException {
    if (fileStatus.isDirectory()) {
        FileStatus[] curFileStatus = fs.listStatus(fileStatus.getPath());
        if (ArrayUtils.isEmpty(curFileStatus)) {
            files.add(fileStatus);/*w w  w.  j  a  v a2  s .  co m*/
        } else {
            for (FileStatus status : curFileStatus) {
                listMostNestedPathRecursivelyHelper(fs, files, status, fileFilter);
            }
        }
    } else if (fileFilter.accept(fileStatus.getPath())) {
        files.add(fileStatus);
    }
    return files;
}

From source file:gobblin.util.FileListUtils.java

License:Apache License

private static List<FileStatus> listPathsRecursivelyHelper(FileSystem fs, List<FileStatus> files,
        FileStatus fileStatus, PathFilter fileFilter) {
    if (fileFilter.accept(fileStatus.getPath())) {
        files.add(fileStatus);/*from  www . j  a  v a  2 s . c o m*/
    }
    if (fileStatus.isDirectory()) {
        try {
            for (FileStatus status : fs.listStatus(fileStatus.getPath())) {
                listPathsRecursivelyHelper(fs, files, status, fileFilter);
            }
        } catch (IOException ioe) {
            LOG.error("Could not list contents of path " + fileStatus.getPath());
        }
    }
    return files;
}

From source file:gobblin.util.HadoopUtils.java

License:Apache License

private static void walk(List<FileStatus> results, FileSystem fileSystem, Path path) throws IOException {
    for (FileStatus status : fileSystem.listStatus(path)) {
        if (!status.isDirectory()) {
            results.add(status);/*from w  w w. j av a  2  s .  com*/
        } else {
            walk(results, fileSystem, status.getPath());
        }
    }
}

From source file:gobblin.util.PullFileLoader.java

License:Apache License

private Collection<Config> loadPullFilesRecursivelyHelper(Path path, Config fallback,
        boolean loadGlobalProperties) {
    List<Config> pullFiles = Lists.newArrayList();

    try {/*from  ww  w.  j  a va2 s  . co m*/
        if (loadGlobalProperties) {
            fallback = findAndLoadGlobalConfigInDirectory(path, fallback);
        }

        FileStatus[] statuses = this.fs.listStatus(path);
        if (statuses == null) {
            log.error("Path does not exist: " + path);
            return pullFiles;
        }

        for (FileStatus status : statuses) {
            try {
                if (status.isDirectory()) {
                    pullFiles.addAll(
                            loadPullFilesRecursivelyHelper(status.getPath(), fallback, loadGlobalProperties));
                } else if (this.javaPropsPullFileFilter.accept(status.getPath())) {
                    pullFiles.add(loadJavaPropsWithFallback(status.getPath(), fallback).resolve());
                } else if (this.hoconPullFileFilter.accept(status.getPath())) {
                    pullFiles.add(loadHoconConfigAtPath(status.getPath()).withFallback(fallback).resolve());
                }
            } catch (IOException ioe) {
                // Failed to load specific subpath, try with the other subpaths in this directory
                log.error(String.format("Failed to load %s. Skipping.", status.getPath()));
            }
        }

        return pullFiles;
    } catch (IOException ioe) {
        log.error("Could not load properties at path: " + path, ioe);
        return Lists.newArrayList();
    }
}

From source file:hdfs.jsr203.attribute.HadoopFileAttributeView.java

License:Apache License

Object attribute(AttrID id, FileStatus hfas) {
    switch (id) {
    case accessTime:
        return hfas.getAccessTime();
    case blockSize:
        return hfas.getBlockSize();
    case group://w ww . java2 s .c o  m
        return hfas.getGroup();
    case len:
        return hfas.getLen();
    case modificationTime:
        return hfas.getModificationTime();
    case owner:
        return hfas.getOwner();
    case replication:
        return hfas.getReplication();
    case isDirectory:
        return hfas.isDirectory();
    // TODO enable encryption
    //case isEncrypted:
    //    return hfas.isEncrypted();
    case isFile:
        return hfas.isFile();
    case isSymLink:
        return hfas.isSymlink();
    }
    return null;
}

From source file:hdfs.jsr203.HadoopDirectoryStream.java

License:Apache License

HadoopDirectoryStream(HadoopPath hadoopPath, DirectoryStream.Filter<? super java.nio.file.Path> filter)
        throws IOException {
    this.hadoopfs = hadoopPath.getFileSystem();
    //this.path = hadoopPath.getResolvedPath();
    this.path = hadoopPath;
    this.filter = filter;

    // sanity check
    FileStatus stat = hadoopPath.getFileSystem().getHDFS().getFileStatus(hadoopPath.getRawResolvedPath());
    if (!stat.isDirectory())
        throw new NotDirectoryException(hadoopPath.toString());
}

From source file:hdfs.jsr203.HadoopFileSystem.java

License:Apache License

public void deleteFile(org.apache.hadoop.fs.Path hadoopPath, boolean failIfNotExists) throws IOException {
    checkWritable();//from  www . j  a  va  2  s . co m

    // If no exist
    if (!this.fs.exists(hadoopPath)) {
        if (failIfNotExists)
            throw new NoSuchFileException(hadoopPath.toString());
    } else {
        FileStatus stat = this.fs.getFileStatus(hadoopPath);
        if (stat.isDirectory()) {
            FileStatus[] stats = this.fs.listStatus(hadoopPath);
            if (stats.length > 0)
                throw new DirectoryNotEmptyException(hadoopPath.toString());
        }
        // Try to delete with no recursion
        this.fs.delete(hadoopPath, false);
    }

    /*IndexNode inode = getInode(hadoopPath);
    if (inode == null) {
    if (hadoopPath != null && hadoopPath.length == 0)
        throw new ZipException("root directory </> can't not be delete");
    if (failIfNotExists)
        throw new NoSuchFileException(getString(hadoopPath));
    } else {
    if (inode.isDir() && inode.child != null)
        throw new DirectoryNotEmptyException(getString(hadoopPath));
    updateDelete(inode);
    }*/
}

From source file:hdfs.jsr203.HadoopFileSystem.java

License:Apache License

SeekableByteChannel newByteChannel(org.apache.hadoop.fs.Path path, Set<? extends OpenOption> options,
        FileAttribute<?>... attrs) throws IOException {
    // simple one : this.fs.create(hadoopPath);
    // TODO Auto-generated method stub
    //this.hdfs./*from   w  w w .ja v a2s  .  c om*/
    //      throw new IOException("Not implemented");

    checkOptions(options);

    // Check that file not exists
    if (options.contains(CREATE_NEW) && this.fs.exists(path)) {
        throw new FileAlreadyExistsException(path.toString());
    }

    if (options.contains(WRITE) || options.contains(APPEND)) {
        checkWritable();
        beginRead();
        try {
            final WritableByteChannel wbc = Channels.newChannel(newOutputStream(path, options, attrs));
            long leftover = 0;
            if (options.contains(APPEND)) {
                /*Entry e = getEntry0(path);
                if (e != null && e.size >= 0)
                leftover = e.size;*/
                throw new IOException("APPEND NOT IMPLEMENTED");
            }
            final long offset = leftover;
            return new SeekableByteChannel() {
                long written = offset;

                public boolean isOpen() {
                    return wbc.isOpen();
                }

                public long position() throws IOException {
                    return written;
                }

                public SeekableByteChannel position(long pos) throws IOException {
                    throw new UnsupportedOperationException();
                }

                public int read(ByteBuffer dst) throws IOException {
                    throw new UnsupportedOperationException();
                }

                public SeekableByteChannel truncate(long size) throws IOException {
                    throw new UnsupportedOperationException();
                }

                public int write(ByteBuffer src) throws IOException {
                    int n = wbc.write(src);
                    written += n;
                    return n;
                }

                public long size() throws IOException {
                    return written;
                }

                public void close() throws IOException {
                    wbc.close();
                }
            };
        } finally {
            endRead();
        }
    } else {
        beginRead();
        try {
            ensureOpen();
            FileStatus e = this.fs.getFileStatus(path);
            if (e == null || e.isDirectory())
                throw new NoSuchFileException(path.toString());
            final FSDataInputStream inputStream = getInputStream(path);
            final ReadableByteChannel rbc = Channels.newChannel(inputStream);
            final long size = e.getLen();
            return new SeekableByteChannel() {
                long read = 0;

                public boolean isOpen() {
                    return rbc.isOpen();
                }

                public long position() throws IOException {
                    return read;
                }

                public SeekableByteChannel position(long pos) throws IOException {
                    // ReadableByteChannel is not buffered, so it reads through
                    inputStream.seek(pos);
                    read = pos;
                    return this;
                }

                public int read(ByteBuffer dst) throws IOException {
                    int n = rbc.read(dst);
                    if (n > 0) {
                        read += n;
                    }
                    return n;
                }

                public SeekableByteChannel truncate(long size) throws IOException {
                    throw new NonWritableChannelException();
                }

                public int write(ByteBuffer src) throws IOException {
                    throw new NonWritableChannelException();
                }

                public long size() throws IOException {
                    return size;
                }

                public void close() throws IOException {
                    rbc.close();
                }
            };
        } finally {
            endRead();
        }
    }
}