Example usage for org.apache.hadoop.fs FileStatus isDirectory

List of usage examples for org.apache.hadoop.fs FileStatus isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus isDirectory.

Prototype

public boolean isDirectory() 

Source Link

Document

Is this a directory?

Usage

From source file:com.run.mapred.hbase2tsv.HFileInputFormat_mr2.java

License:Apache License

@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
    List<FileStatus> result = new ArrayList<FileStatus>();

    // Explode out directories that match the original FileInputFormat
    // filters since HFiles are written to directories where the
    // directory name is the column name
    for (FileStatus status : super.listStatus(job)) {
        if (status.isDirectory()) {
            FileSystem fs = status.getPath().getFileSystem(job);
            for (FileStatus match : fs.listStatus(status.getPath(), HIDDEN_FILE_FILTER)) {
                result.add(match);//from w  ww  .  j a va  2s .co  m
            }
        } else {
            result.add(status);
        }
    }

    return (FileStatus[]) result.toArray();
}

From source file:com.splicemachine.storage.HNIOFileSystem.java

License:Apache License

@Override
public boolean createDirectory(Path path, boolean errorIfExists) throws IOException {
    org.apache.hadoop.fs.Path f = toHPath(path);
    if (LOG.isTraceEnabled())
        SpliceLogUtils.trace(LOG, "createDirectory(): path=%s", f);
    try {/* w  w w . jav  a  2  s .  c  o  m*/
        FileStatus fileStatus = fs.getFileStatus(f);
        return !errorIfExists && fileStatus.isDirectory();
    } catch (FileNotFoundException fnfe) {
        return fs.mkdirs(f);
    }
}

From source file:com.splicemachine.storage.HNIOFileSystem.java

License:Apache License

@Override
public boolean createDirectory(String fullPath, boolean errorIfExists) throws IOException {
    boolean isTrace = LOG.isTraceEnabled();
    if (isTrace)/*from   ww w  .j  a  va  2  s  .c  o  m*/
        SpliceLogUtils.trace(LOG, "createDirectory(): path string=%s", fullPath);
    org.apache.hadoop.fs.Path f = new org.apache.hadoop.fs.Path(fullPath);
    if (isTrace)
        SpliceLogUtils.trace(LOG, "createDirectory(): hdfs path=%s", f);
    try {
        FileStatus fileStatus = fs.getFileStatus(f);
        if (isTrace)
            SpliceLogUtils.trace(LOG, "createDirectory(): file status=%s", fileStatus);
        return !errorIfExists && fileStatus.isDirectory();
    } catch (FileNotFoundException fnfe) {
        if (isTrace)
            SpliceLogUtils.trace(LOG, "createDirectory(): directory not found so we will create it: %s", f);
        boolean created = fs.mkdirs(f);
        if (isTrace)
            SpliceLogUtils.trace(LOG, "createDirectory(): created=%s", created);
        return created;
    }
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.spooler.HdfsFile.java

License:Apache License

@SuppressWarnings("unchecked")
public Map<String, Object> getFileMetadata() throws IOException {
    FileStatus file = fs.getFileStatus(filePath);
    Map<String, Object> metadata = new HashMap<>();
    metadata.put(HeaderAttributeConstants.FILE_NAME, file.getPath().getName());
    metadata.put(HeaderAttributeConstants.FILE, file.getPath().toUri().getPath());
    metadata.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, file.getModificationTime());
    metadata.put(HeaderAttributeConstants.LAST_ACCESS_TIME, file.getAccessTime());
    metadata.put(HeaderAttributeConstants.IS_DIRECTORY, file.isDirectory());
    metadata.put(HeaderAttributeConstants.IS_SYMBOLIC_LINK, file.isSymlink());
    metadata.put(HeaderAttributeConstants.SIZE, file.getLen());
    metadata.put(HeaderAttributeConstants.OWNER, file.getOwner());
    metadata.put(HeaderAttributeConstants.GROUP, file.getGroup());
    metadata.put(HeaderAttributeConstants.BLOCK_SIZE, file.getBlockSize());
    metadata.put(HeaderAttributeConstants.REPLICATION, file.getReplication());
    metadata.put(HeaderAttributeConstants.IS_ENCRYPTED, file.isEncrypted());

    FsPermission permission = file.getPermission();
    if (permission != null) {
        metadata.put(PERMISSIONS, permission.toString());
    }/*from   w  w w .  j a va2 s. c  o m*/

    return metadata;
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.spooler.HdfsFileSystem.java

License:Apache License

public void addFiles(WrappedFile dirFile, WrappedFile startingFile, List<WrappedFile> toProcess,
        boolean includeStartingFile, boolean useLastModified) throws IOException {
    final long scanTime = System.currentTimeMillis();

    PathFilter pathFilter = new PathFilter() {
        @Override//from   w w w .  ja  v a 2 s.c  om
        public boolean accept(Path entry) {
            try {
                FileStatus fileStatus = fs.getFileStatus(entry);
                if (fileStatus.isDirectory()) {
                    return false;
                }

                if (!patternMatches(entry.getName())) {
                    return false;
                }

                HdfsFile hdfsFile = new HdfsFile(fs, entry);
                // SDC-3551: Pick up only files with mtime strictly less than scan time.
                if (fileStatus.getModificationTime() < scanTime) {
                    if (startingFile == null || startingFile.toString().isEmpty()) {
                        toProcess.add(hdfsFile);
                    } else {
                        int compares = compare(hdfsFile, startingFile, useLastModified);
                        if (includeStartingFile) {
                            if (compares >= 0) {
                                toProcess.add(hdfsFile);
                            }
                        } else {
                            if (compares > 0) {
                                toProcess.add(hdfsFile);
                            }
                        }
                    }
                }
            } catch (IOException ex) {
                LOG.error("Failed to open file {}", entry.toString());
            }
            return false;
        }
    };

    fs.globStatus(new Path(dirFile.getAbsolutePath(), "*"), pathFilter);
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.spooler.HdfsFileSystem.java

License:Apache License

public void addDirectory(WrappedFile dirPath, List<WrappedFile> directories) throws Exception {
    PathFilter pathFilter = new PathFilter() {
        @Override/* w ww.ja v  a2  s  .c o m*/
        public boolean accept(Path entry) {
            try {
                FileStatus fileStatus = fs.getFileStatus(entry);
                if (fileStatus.isDirectory()) {
                    if (processSubdirectories) {
                        directories.add(new HdfsFile(fs, entry));
                    }
                    return false;
                }
            } catch (IOException ex) {
                LOG.error("Failed to open file {}", entry.toString(), ex);
            }
            return false;
        }
    };

    fs.globStatus(new Path(dirPath.getAbsolutePath(), "*"), pathFilter);
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

private void listAllDirAndFlushPolicy(FileSystem fileSystem, Path path)
        throws FileNotFoundException, IOException {

    FileStatus[] fileStatus = fileSystem.listStatus(path);

    for (FileStatus status : fileStatus) {

        // Apply ACL recursively on each file/directory.
        if (status.isDirectory()) {

            // Flush ACL before creating new one.
            flushAcl(fileSystem, status.getPath());

            listAllDirAndFlushPolicy(fileSystem, status.getPath());
        } else {//  w  ww . j av  a2 s  .c o  m

            // Flush ACL before creating new one.
            flushAcl(fileSystem, status.getPath());
        }
    }
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

/**
 * @param fileSystem : HDFS fileSystem object
 * @param path       : Path on which ACL needs to be created
 * @param groups     : List of group to which permission needs to be granted.
 *///from  w w  w  . java2 s.c o  m

public void listAllDirAndApplyPolicy(FileSystem fileSystem, Path path, String groups, String hdfsPermission)
        throws FileNotFoundException, IOException {
    FsAction fsActionObject = getFinalPermission(hdfsPermission);
    FileStatus[] fileStatus = fileSystem.listStatus(path);

    for (FileStatus status : fileStatus) {

        // Flush ACL before creating new one.
        flushAcl(fileSystem, status.getPath());

        // Apply ACL recursively on each file/directory.
        if (status.isDirectory()) {
            String[] groupListForPermission = groups.split(",");
            for (int groupCounter = 0; groupCounter < groupListForPermission.length; groupCounter++) {

                // Create HDFS ACL for each for each Path on HDFS
                AclEntry aclEntryOwner = new AclEntry.Builder().setName(groupListForPermission[groupCounter])
                        .setPermission(fsActionObject).setScope(AclEntryScope.ACCESS)
                        .setType(AclEntryType.GROUP).build();

                AclEntry aclEntryOther = new AclEntry.Builder().setPermission(FsAction.NONE)
                        .setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build();

                // Apply ACL on Path
                applyAcl(fileSystem, status.getPath(), aclEntryOwner);
                applyAcl(fileSystem, status.getPath(), aclEntryOther);

            }

            // Recursive call made to apply acl on each sub directory
            listAllDirAndApplyPolicy(fileSystem, status.getPath(), groups, hdfsPermission);
        } else {
            String[] groupListForPermission = groups.split(",");
            for (int groupCounter = 0; groupCounter < groupListForPermission.length; groupCounter++) {

                // Create HDFS ACL for each for each Path on HDFS
                AclEntry aclEntryOwner = new AclEntry.Builder().setName(groupListForPermission[groupCounter])
                        .setPermission(fsActionObject).setScope(AclEntryScope.ACCESS)
                        .setType(AclEntryType.GROUP).build();

                AclEntry aclEntryOther = new AclEntry.Builder().setPermission(FsAction.NONE)
                        .setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build();

                // Apply ACL on Path
                applyAcl(fileSystem, status.getPath(), aclEntryOwner);
                applyAcl(fileSystem, status.getPath(), aclEntryOther);

            }
        }
    }
}

From source file:com.thinkbiganalytics.kylo.catalog.spark.sources.spark.HighWaterMarkInputFormat.java

License:Apache License

@Nonnull
@Override/*from w w  w .  j a  v a  2 s .c om*/
public List<FileStatus> listStatus(@Nonnull final JobContext job) throws IOException {
    // Get job configuration
    long highWaterMark = Math.max(lastHighWaterMark, getHighWaterMark(job));
    final long maxAge = HighWaterMarkInputFormat.getMaxFileAge(job);
    final long minAge = HighWaterMarkInputFormat.getMinFileAge(job);

    if (minAge > maxAge) {
        throw new IOException(MIN_FILE_AGE + " cannot be greater than " + MAX_FILE_AGE);
    }

    // List and filter files
    final List<FileStatus> allFiles = super.listStatus(job);
    final List<FileStatus> jobFiles = new ArrayList<>(allFiles.size());
    final long currentTime = currentTimeMillis();

    for (final FileStatus file : allFiles) {
        final long fileTime = file.getModificationTime();
        final long fileAge = currentTime - fileTime;

        if (!file.isDirectory() && fileAge >= minAge && fileAge <= maxAge && fileTime > highWaterMark) {
            jobFiles.add(file);

            if (fileTime > lastHighWaterMark) {
                lastHighWaterMark = fileTime;
            }
        }
    }

    lastHighWaterMark = Math.max(lastHighWaterMark, highWaterMark);
    return jobFiles;
}

From source file:com.turn.camino.Camino.java

License:Open Source License

/**
 * Materialize path/*from w w  w  .ja  v a 2 s. co m*/
 *
 * Converts a path or path pattern into zero or more actual paths
 *
 * @param value rendered value of path
 * @param fileSystem file system
 * @return path status
 * @throws IOException
 */
protected List<PathDetail> materializePath(String value, FileSystem fileSystem) throws IOException {

    // using value to find path
    FileStatus[] fss = fileSystem.globStatus(new org.apache.hadoop.fs.Path(value));

    // path doesn't exist
    if (fss == null || fss.length == 0) {
        return Collections.emptyList();
    }

    // found match(es)
    List<PathDetail> pathDetails = Lists.newArrayListWithExpectedSize(fss.length);
    for (FileStatus fs : fss) {
        PathDetail pathDetail = new PathDetail(fs.getPath().toString(), fs.isDirectory(), fs.getLen(),
                fs.getModificationTime());
        pathDetails.add(pathDetail);
    }

    // return path details
    return pathDetails;
}