Example usage for org.apache.hadoop.fs FileStatus isDirectory

List of usage examples for org.apache.hadoop.fs FileStatus isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus isDirectory.

Prototype

public boolean isDirectory() 

Source Link

Document

Is this a directory?

Usage

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

@Override
public HdfsListInfo list(String path, int start, int end, final String filter) throws Exception {
    HdfsListInfo hdfsListInfo = new HdfsListInfo();

    this.indexCheck(start, end);
    this.mustExists(path);

    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);

    FileStatus fileStatus = fs.getFileStatus(fsPath);
    if (!fileStatus.isDirectory()) {
        this.notDirectoryException(fsPath.toString());
    }/*from  w w w .  j  a v a2  s . c o m*/

    List<HdfsFileInfo> listStatus = new ArrayList<>();
    int count = 0;
    FileStatus fileStatuses = null;
    LocatedFileStatus next = null;
    RemoteIterator<LocatedFileStatus> remoteIterator = fs.listLocatedStatus(fsPath);
    while (remoteIterator.hasNext()) {
        next = remoteIterator.next();
        if (!StringUtils.isEmpty(filter)) {
            if (next.getPath().getName().contains(filter)) {
                count++;
                if (count >= start && count <= end) {
                    fileStatuses = fs.getFileStatus(next.getPath());
                    listStatus
                            .add(new HdfsFileInfo(fileStatuses, fs.getContentSummary(fileStatuses.getPath())));
                }
            }
        } else {
            count++;
            if (count >= start && count <= end) {
                fileStatuses = fs.getFileStatus(next.getPath());
                listStatus.add(new HdfsFileInfo(fileStatuses, fs.getContentSummary(fileStatuses.getPath())));
            }
        }
    }

    hdfsListInfo.setFileInfoList(listStatus);
    hdfsListInfo.setCount(count);
    return hdfsListInfo;
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

@Override
public boolean setOwner(String path, String owner, String group, boolean recursive) {
    try {//from   w w w . ja v a2s.c o m
        this.rootCheck(path);
        this.mustExists(path);

        FileSystem fs = fileSystemFactory.getFileSystem();
        Path fsPath = new Path(path);

        FileStatus fileStatus = fs.getFileStatus(fsPath);
        if (fileStatus.isDirectory()) {
            this.runChown(recursive, owner, group, path);
        } else {
            this._setOwner(path, owner, group);
        }
        return true;

    } catch (Exception ex) {
        ex.printStackTrace();
        throw new ServiceException("?   .", ex);
    }
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

@Override
public boolean setPermission(String path, String permission, boolean recursive) {
    try {//from w w w  .j  a va  2  s  .  c  o  m
        this.rootCheck(path);
        this.mustExists(path);

        FileSystem fs = fileSystemFactory.getFileSystem();
        Path fsPath = new Path(path);

        FileStatus fileStatus = fs.getFileStatus(fsPath);
        if (fileStatus.isDirectory()) {
            this.runChmod(recursive, permission, path);
        } else {
            this._setPermission(path, permission);
        }
        return true;

    } catch (Exception ex) {
        ex.printStackTrace();
        throw new ServiceException("?   .", ex);
    }
}

From source file:org.opencloudengine.garuda.model.HdfsFileInfo.java

License:Open Source License

public HdfsFileInfo(FileStatus fileStatus, ContentSummary contentSummary) {
    this.fullyQualifiedPath = fileStatus.getPath().toUri().getPath();
    this.filename = isEmpty(getFilename(fullyQualifiedPath)) ? getDirectoryName(fullyQualifiedPath)
            : getFilename(fullyQualifiedPath);
    this.length = fileStatus.getLen();
    this.path = getPath(fullyQualifiedPath);
    this.directory = fileStatus.isDirectory();
    this.file = !fileStatus.isDirectory();
    this.owner = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.blockSize = fileStatus.getBlockSize();
    this.replication = fileStatus.getReplication();
    this.modificationTime = fileStatus.getModificationTime();
    if (contentSummary != null) {
        this.spaceConsumed = contentSummary.getSpaceConsumed();
        this.quota = contentSummary.getQuota();
        this.spaceQuota = contentSummary.getSpaceQuota();
        this.directoryCount = contentSummary.getDirectoryCount();
        this.fileCount = contentSummary.getFileCount();
    }/*  www  .jav a2 s  .  c  o  m*/
    this.accessTime = fileStatus.getAccessTime();
    this.permission = fileStatus.getPermission().toString();
}

From source file:org.shaf.core.io.Location.java

License:Apache License

/**
 * Returns directories selected in this I/O location.
 * //from  w  ww .j ava  2s.  c  om
 * @param recursive
 *            the flag, which indicates the recursive scan directories.
 * @return the selected directories.
 * @throws IOException
 *             if an I/O error occurs.
 * @throws FileNotFoundException
 *             if a file is not found.
 */
public final Path[] getDirectories(final boolean recursive) throws FileNotFoundException, IOException {
    Set<Path> content = new TreeSet<>();

    Stack<Path> lookup = new Stack<>();
    lookup.push(this.getAbsolutePath());

    while (!lookup.empty()) {
        for (FileStatus status : this.fs.listStatus(lookup.pop())) {
            if (status.isDirectory()) {
                content.add(new Path(IOUtils.normalizePath(status.getPath()).toString()
                        .substring(this.getAbsolutePath().toString().length())));

                if (recursive) {
                    lookup.push(status.getPath());
                }
            }
        }
    }

    return content.toArray(new Path[content.size()]);
}

From source file:org.shaf.core.io.Location.java

License:Apache License

public final Location[] getContent() throws FileNotFoundException, IOException {
    Set<Path> paths = new TreeSet<>();

    if (this.isDirectory()) {
        Stack<Path> lookup = new Stack<>();
        lookup.push(this.getAbsolutePath());

        boolean isRoot = true;
        while (!lookup.empty()) {
            FileStatus[] statuses = this.fs.listStatus(lookup.pop());
            if (statuses != null) {
                for (FileStatus status : statuses) {

                    if (isRoot && !this.filter.accept(status.getPath())) {
                        continue;
                    }/*from w  w  w . j a v a 2 s .  com*/

                    paths.add(new Path(IOUtils.normalizePath(status.getPath()).toString()
                            .substring(this.getAbsolutePath().toString().length() + 1)));

                    if (status.isDirectory()) {
                        lookup.push(status.getPath());
                    }
                }
            }
            isRoot = false;
        }
    } else {
        paths.add(this.path);
    }

    Location[] content = new Location[0];
    for (Path path : paths) {
        content = ObjectArrays.concat(content, this.resolve(path));
    }

    return content;
}

From source file:org.springframework.xd.integration.test.JdbcHdfsTest.java

License:Apache License

/**
 * Asserts that jdbcHdfsJob has written the test data from a jdbc source table to a hdfs file system.
 *
 *//*from   ww w.  j a  v a  2  s  .c om*/
@Test
public void testIncrementalJdbcHdfsJobWithColumnsAndTablePartitionDifferentFromCheck() {
    // Deploy stream and job.
    jdbcSink.getJdbcTemplate().getDataSource();
    IncrementalJdbcHdfsJob job = new IncrementalJdbcHdfsJob(IncrementalJdbcHdfsJob.DEFAULT_DIRECTORY,
            IncrementalJdbcHdfsJob.DEFAULT_FILE_NAME, IncrementalJdbcHdfsJob.DEFAULT_TABLE,
            "payload,checkColumn", "payload", 3, "checkColumn", -1);
    // Use a trigger to send data to JDBC
    jdbcSink.columns("payload,checkColumn");
    stream("dataSender", sources.http() + XD_DELIMITER + jdbcSink);

    sources.httpSource("dataSender").postData("{\"payload\": 1, \"checkColumn\": 1}");
    sources.httpSource("dataSender").postData("{\"payload\": 2, \"checkColumn\": 1}");
    sources.httpSource("dataSender").postData("{\"payload\": 3, \"checkColumn\": 1}");

    job(jobName, job.toDSL(), true);
    jobLaunch(jobName);
    waitForJobToComplete(jobName);

    // Evaluate the results of the test.
    String dir = JdbcHdfsJob.DEFAULT_DIRECTORY + "/";
    String path0 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p0" + "-0.csv";
    String path1 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p1" + "-0.csv";
    String path2 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p2" + "-0.csv";
    assertPathsExists(path0, path1, path2);
    Collection<FileStatus> fileStatuses = hadoopUtil.listDir(dir);
    assertEquals("The number of files in list result should only be 4. The directory itself and 3 files. ", 4,
            fileStatuses.size());
    for (FileStatus fileStatus : fileStatuses) {
        if (!fileStatus.isDirectory()) {
            assertTrue("The file should be of reasonable size",
                    fileStatus.getLen() > 2 && fileStatus.getLen() < 10);
        }
    }

    sources.httpSource("dataSender").postData("{\"payload\": 4, \"checkColumn\": 2}");
    sources.httpSource("dataSender").postData("{\"payload\": 5, \"checkColumn\": 2}");
    sources.httpSource("dataSender").postData("{\"payload\": 6, \"checkColumn\": 2}");

    jobLaunch(jobName);
    waitForJobToComplete(jobName, 2);

    // Evaluate the results of the test.
    String path3 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p0" + "-1.csv";
    String path4 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p1" + "-1.csv";
    String path5 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p2" + "-1.csv";
    assertPathsExists(path3, path4, path5);
    fileStatuses = hadoopUtil.listDir(dir);
    assertEquals("The number of files in list result should only be 7. The directory itself and 5 files. ", 7,
            fileStatuses.size());
    for (FileStatus fileStatus : fileStatuses) {
        if (!fileStatus.isDirectory()) {
            assertTrue("The file should be of reasonable size",
                    fileStatus.getLen() > 2 && fileStatus.getLen() < 10);
        }
    }
}

From source file:org.springframework.xd.integration.test.JdbcHdfsTest.java

License:Apache License

/**
 * Asserts that jdbcHdfsJob has written the test data from a jdbc source table to a hdfs file system.
 *
 *///from   w  w w  . ja  va2  s .c  o  m
@Test
public void testPartitionedJdbcHdfsJobWithColumnsTable() {
    // Deploy stream and job.
    jdbcSink.columns(PartitionedJdbcHdfsJob.DEFAULT_COLUMN_NAMES);
    String data0 = "{\"id\":1,\"name\":\"Sven\"}";
    String data1 = "{\"id\":2,\"name\":\"Anna\"}";
    String data2 = "{\"id\":3,\"name\":\"Nisse\"}";
    jdbcSink.getJdbcTemplate().getDataSource();
    PartitionedJdbcHdfsJob job = jobs.partitionedJdbcHdfsJob();
    // Use a trigger to send data to JDBC
    stream("dataSender", sources.http() + XD_DELIMITER + jdbcSink);
    sources.httpSource("dataSender").postData(data0);
    sources.httpSource("dataSender").postData(data1);
    sources.httpSource("dataSender").postData(data2);

    job(jobName, job.toDSL(), true);
    jobLaunch(jobName);
    waitForJobToComplete(jobName);
    // Evaluate the results of the test.
    String dir = JdbcHdfsJob.DEFAULT_DIRECTORY + "/";
    String path0 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p0" + "-0.csv";
    String path1 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p1" + "-0.csv";
    String path2 = JdbcHdfsJob.DEFAULT_DIRECTORY + "/" + JdbcHdfsJob.DEFAULT_FILE_NAME + "-p2" + "-0.csv";
    assertPathsExists(path0, path1, path2);
    Collection<FileStatus> fileStatuses = hadoopUtil.listDir(dir);
    assertEquals("The number of files should be 4. The directory and 3 files. ", 4, fileStatuses.size());
    for (FileStatus fileStatus : fileStatuses) {
        if (!fileStatus.isDirectory()) {
            assertTrue("The file should be of reasonable size",
                    fileStatus.getLen() > 5 && fileStatus.getLen() < 10);
        }
    }
}

From source file:org.springframework.xd.shell.hadoop.FsShellCommands.java

License:Apache License

@CliCommand(value = PREFIX + "rm", help = "Remove files in the HDFS")
public void rm(@CliOption(key = { "",
        PATH }, mandatory = false, unspecifiedDefaultValue = ".", help = "path to be deleted") final String path,
        @CliOption(key = {/*  w w w .ja v a 2s.  c o m*/
                "skipTrash" }, mandatory = false, specifiedDefaultValue = TRUE, unspecifiedDefaultValue = FALSE, help = "whether to skip trash") final boolean skipTrash,
        @CliOption(key = {
                RECURSIVE }, mandatory = false, specifiedDefaultValue = TRUE, unspecifiedDefaultValue = FALSE, help = "whether to recurse") final boolean recursive) {
    try {
        Path file = new Path(path);
        FileSystem fs = file.getFileSystem(getHadoopConfiguration());
        for (Path p : FileUtil.stat2Paths(fs.globStatus(file), file)) {
            FileStatus status = fs.getFileStatus(p);
            if (status.isDirectory() && !recursive) {
                LOG.error("To remove directory, please use 'fs rm </path/to/dir> --recursive' instead");
                return;
            }
            if (!skipTrash) {
                Trash trash = new Trash(fs, getHadoopConfiguration());
                trash.moveToTrash(p);
            }
            fs.delete(p, recursive);
        }
    } catch (Exception t) {
        LOG.error("Exception: run HDFS shell failed. Message is: " + t.getMessage());
    } catch (Error t) {
        LOG.error("Error: run HDFS shell failed. Message is: " + t.getMessage());
    }
}

From source file:org.trustedanalytics.auth.gateway.hdfs.HdfsClient.java

License:Apache License

/**
 * Get list of children for path.//from  w w  w  .  j a v  a  2 s .c o m
 * @param path Path to directory
 * @param recursive Optional recursive parameter
 * @return
 * @throws IOException
 */
private List<FileStatus> getAllChildens(Path path, boolean recursive) throws IOException {
    List<FileStatus> files = new ArrayList<>();
    FileStatus[] statuses = fileSystem.listStatus(path);

    for (FileStatus status : statuses) {
        files.add(status);
        if (status.isDirectory() && recursive)
            files.addAll(getAllChildens(status.getPath(), recursive));
    }

    return files;
}