Example usage for org.apache.hadoop.fs FileStatus isFile

List of usage examples for org.apache.hadoop.fs FileStatus isFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus isFile.

Prototype

public boolean isFile() 

Source Link

Document

Is this a file?

Usage

From source file:hdfs.jsr203.attribute.HadoopFileAttributeView.java

License:Apache License

Object attribute(AttrID id, FileStatus hfas) {
    switch (id) {
    case accessTime:
        return hfas.getAccessTime();
    case blockSize:
        return hfas.getBlockSize();
    case group://from  w  w w. j  a va 2 s.c  o m
        return hfas.getGroup();
    case len:
        return hfas.getLen();
    case modificationTime:
        return hfas.getModificationTime();
    case owner:
        return hfas.getOwner();
    case replication:
        return hfas.getReplication();
    case isDirectory:
        return hfas.isDirectory();
    // TODO enable encryption
    //case isEncrypted:
    //    return hfas.isEncrypted();
    case isFile:
        return hfas.isFile();
    case isSymLink:
        return hfas.isSymlink();
    }
    return null;
}

From source file:husky.client.HuskyYarnClient.java

License:Apache License

private Pair<String, LocalResource> constructLocalResource(String name, String path, LocalResourceType type)
        throws IOException {
    LOG.info("To copy " + name + "(" + path + ") from local file system");

    Path resourcePath = new Path(path);
    if (path.startsWith("hdfs://")) {
        FileStatus fileStatus = mFileSystem.getFileStatus(resourcePath);
        if (!fileStatus.isFile()) {
            throw new RuntimeException("Only files can be provided as local resources.");
        }/*from   w  w  w  .j av a 2  s . c  o  m*/
    } else {
        File file = new File(path);
        if (!file.exists()) {
            throw new RuntimeException("File not exist: " + path);
        }
        if (!file.isFile()) {
            throw new RuntimeException("Only files can be provided as local resources.");
        }
    }

    // if the file is not on hdfs, upload it to hdfs first.
    if (!path.startsWith("hdfs://")) {
        Path src = resourcePath;
        String newPath = mLocalResourceHDFSPaths + '/' + mAppName + '/' + mAppId + '/' + name;
        resourcePath = new Path(newPath);
        mFileSystem.copyFromLocalFile(false, true, src, resourcePath);
        LOG.info("Upload " + path + " to " + newPath);
        path = newPath;
    }

    FileStatus fileStatus = mFileSystem.getFileStatus(resourcePath);

    LocalResource resource = Records.newRecord(LocalResource.class);
    resource.setType(type);
    resource.setVisibility(LocalResourceVisibility.APPLICATION);
    resource.setResource(ConverterUtils.getYarnUrlFromPath(resourcePath));
    resource.setTimestamp(fileStatus.getModificationTime());
    resource.setSize(fileStatus.getLen());

    return new Pair<String, LocalResource>(path, resource);
}

From source file:io.druid.storage.hdfs.HdfsFileTimestampVersionFinder.java

License:Apache License

private URI mostRecentInDir(final Path dir, final Pattern pattern) throws IOException {
    final PathFilter filter = new PathFilter() {
        @Override/*from ww  w  .ja v a 2s . c o m*/
        public boolean accept(Path path) {
            return pattern == null || pattern.matcher(path.getName()).matches();
        }
    };
    long modifiedTime = Long.MIN_VALUE;
    URI mostRecentURI = null;
    final FileSystem fs = dir.getFileSystem(config);
    for (FileStatus status : fs.listStatus(dir, filter)) {
        if (status.isFile()) {
            final long thisModifiedTime = status.getModificationTime();
            if (thisModifiedTime >= modifiedTime) {
                modifiedTime = thisModifiedTime;
                mostRecentURI = status.getPath().toUri();
            }
        }
    }

    return mostRecentURI;
}

From source file:io.github.thammegowda.Local2SeqFile.java

License:Apache License

private boolean filter(FileStatus file) {
    return file.isFile() && file.getLen() >= minFileSize && file.getLen() <= maxFileSize;
}

From source file:io.prestosql.plugin.hive.AbstractTestHiveClient.java

License:Apache License

protected Set<String> listAllDataFiles(HdfsContext context, Path path) throws IOException {
    Set<String> result = new HashSet<>();
    FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, path);
    if (fileSystem.exists(path)) {
        for (FileStatus fileStatus : fileSystem.listStatus(path)) {
            if (fileStatus.getPath().getName().startsWith(".presto")) {
                // skip hidden files
            } else if (fileStatus.isFile()) {
                result.add(fileStatus.getPath().toString());
            } else if (fileStatus.isDirectory()) {
                result.addAll(listAllDataFiles(context, fileStatus.getPath()));
            }// w  ww. j  ava 2  s  .  c om
        }
    }
    return result;
}

From source file:io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

private static RecursiveDeleteResult doRecursiveDeleteFiles(FileSystem fileSystem, Path directory,
        List<String> filePrefixes, boolean deleteEmptyDirectories) {
    // don't delete hidden presto directories
    if (directory.getName().startsWith(".presto")) {
        return new RecursiveDeleteResult(false, ImmutableList.of());
    }/*from   w w w. j ava 2 s.c  o  m*/

    FileStatus[] allFiles;
    try {
        allFiles = fileSystem.listStatus(directory);
    } catch (IOException e) {
        ImmutableList.Builder<String> notDeletedItems = ImmutableList.builder();
        notDeletedItems.add(directory.toString() + "/**");
        return new RecursiveDeleteResult(false, notDeletedItems.build());
    }

    boolean allDescendentsDeleted = true;
    ImmutableList.Builder<String> notDeletedEligibleItems = ImmutableList.builder();
    for (FileStatus fileStatus : allFiles) {
        if (fileStatus.isFile()) {
            Path filePath = fileStatus.getPath();
            String fileName = filePath.getName();
            boolean eligible = false;
            // never delete presto dot files
            if (!fileName.startsWith(".presto")) {
                eligible = filePrefixes.stream().anyMatch(fileName::startsWith);
            }
            if (eligible) {
                if (!deleteIfExists(fileSystem, filePath, false)) {
                    allDescendentsDeleted = false;
                    notDeletedEligibleItems.add(filePath.toString());
                }
            } else {
                allDescendentsDeleted = false;
            }
        } else if (fileStatus.isDirectory()) {
            RecursiveDeleteResult subResult = doRecursiveDeleteFiles(fileSystem, fileStatus.getPath(),
                    filePrefixes, deleteEmptyDirectories);
            if (!subResult.isDirectoryNoLongerExists()) {
                allDescendentsDeleted = false;
            }
            if (!subResult.getNotDeletedEligibleItems().isEmpty()) {
                notDeletedEligibleItems.addAll(subResult.getNotDeletedEligibleItems());
            }
        } else {
            allDescendentsDeleted = false;
            notDeletedEligibleItems.add(fileStatus.getPath().toString());
        }
    }
    if (allDescendentsDeleted && deleteEmptyDirectories) {
        verify(notDeletedEligibleItems.build().isEmpty());
        if (!deleteIfExists(fileSystem, directory, false)) {
            return new RecursiveDeleteResult(false, ImmutableList.of(directory.toString() + "/"));
        }
        return new RecursiveDeleteResult(true, ImmutableList.of());
    }
    return new RecursiveDeleteResult(false, notDeletedEligibleItems.build());
}

From source file:kogiri.mapreduce.preprocess.common.helpers.KmerIndexHelper.java

License:Open Source License

public static Path[] getAllKmerIndexIndexFilePath(Configuration conf, Path[] inputPaths) throws IOException {
    List<Path> inputFiles = new ArrayList<Path>();
    KmerIndexIndexPathFilter filter = new KmerIndexIndexPathFilter();

    for (Path path : inputPaths) {
        FileSystem fs = path.getFileSystem(conf);
        if (fs.exists(path)) {
            FileStatus status = fs.getFileStatus(path);
            if (status.isDir()) {
                // check child
                FileStatus[] entries = fs.listStatus(path);
                for (FileStatus entry : entries) {
                    if (entry.isFile()) {
                        if (filter.accept(entry.getPath())) {
                            inputFiles.add(entry.getPath());
                        }//from www  .ja  v a 2 s.com
                    }
                }
            } else if (status.isFile()) {
                if (filter.accept(status.getPath())) {
                    inputFiles.add(status.getPath());
                }
            }
        }
    }

    Path[] files = inputFiles.toArray(new Path[0]);
    return files;
}

From source file:net.sf.jfilesync.plugins.net.items.THdfs_plugin.java

License:Apache License

@Override
public boolean isFile(String path) throws IOException {
    Path file = new Path(path);
    FileStatus stat = fs.getFileStatus(file);
    return stat.isFile();
}

From source file:nl.surfsara.newsreader.loader.ReadNewsreaderDocs.java

License:Apache License

@Override
public Long run() {
    long numfilesread = 0;
    Path sPath = new Path(source);
    File destDir = new File(dest);
    if (destDir.isDirectory()) {
        destDir.mkdirs();/*  ww  w.j  a va  2s  .  co  m*/
        try {
            FileSystem fileSystem = FileSystem.get(conf);
            FileStatus[] globStatus = fileSystem.globStatus(sPath);
            for (FileStatus fss : globStatus) {
                if (fss.isFile()) {
                    Option optPath = SequenceFile.Reader.file(fss.getPath());
                    SequenceFile.Reader r = new SequenceFile.Reader(conf, optPath);

                    Text key = new Text();
                    Text val = new Text();

                    while (r.next(key, val)) {
                        File outputFile = new File(destDir, key.toString());
                        FileOutputStream fos = new FileOutputStream(outputFile);
                        InputStream is = IOUtils.toInputStream(val.toString());
                        IOUtils.copy(is, fos);
                        fos.flush();
                        fos.close();
                        numfilesread++;
                    }
                    r.close();
                }

            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    } else {
        System.out.println("Destination should be a directory.");
    }
    return numfilesread;
}

From source file:org.apache.accumulo.master.tableOps.CleanUp.java

License:Apache License

protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
    for (FileStatus child : fs.listStatus(src)) {
        final String childName = child.getPath().getName();
        final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);

        if (child.isFile()) {
            if (fs.exists(childInDest)) {
                log.warn("File already exists in archive, ignoring. " + childInDest);
            } else {
                fs.rename(childInSrc, childInDest);
            }// w ww .jav  a 2  s .c o  m
        } else if (child.isDirectory()) {
            if (fs.exists(childInDest)) {
                // Recurse
                merge(fs, childInSrc, childInDest);
            } else {
                fs.rename(childInSrc, childInDest);
            }
        } else {
            // Symlinks shouldn't exist in table directories..
            log.warn("Ignoring archiving of non file/directory: " + child);
        }
    }
}