Example usage for org.apache.hadoop.hdfs.protocol DirectoryListing hasMore

List of usage examples for org.apache.hadoop.hdfs.protocol DirectoryListing hasMore

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol DirectoryListing hasMore.

Prototype

public boolean hasMore() 

Source Link

Document

Check if there are more entries that are left to be listed

Usage

From source file:MStress_Client.java

License:Open Source License

private static int listDFSPaths() {
    Date alpha = new Date();
    int inodeCount = 0;

    String basePath = new String(TEST_BASE_DIR) + "/" + hostName_ + "_" + processName_;
    Queue<String> pending = new LinkedList<String>();
    pending.add(basePath);/*from w  w w  .  j  a va 2  s  .c  o  m*/

    while (!pending.isEmpty()) {
        String parent = pending.remove();
        DirectoryListing thisListing;
        try {
            thisListing = dfsClient_.listPaths(parent, HdfsFileStatus.EMPTY_NAME);
            if (thisListing == null || thisListing.getPartialListing().length == 0) {
                //System.out.println("Empty directory");
                continue;
            }
            do {
                HdfsFileStatus[] children = thisListing.getPartialListing();
                for (int i = 0; i < children.length; i++) {
                    String localName = children[i].getLocalName();
                    //System.out.printf("Readdir going through [%s/%s]\n", parent, localName);
                    if (localName.equals(".") || localName.equals("..")) {
                        continue;
                    }
                    inodeCount++;
                    if (inodeCount % COUNT_INCR == 0) {
                        System.out.printf("Readdir paths so far: %d\n", inodeCount);
                    }
                    if (children[i].isDir()) {
                        pending.add(parent + "/" + localName);
                    }
                }
                if (!thisListing.hasMore()) {
                    break;
                } else {
                    //System.out.println("Remaining entries " + Integer.toString(thisListing.getRemainingEntries()));
                }
                thisListing = dfsClient_.listPaths(parent, thisListing.getLastName());
            } while (thisListing != null);
        } catch (IOException e) {
            e.printStackTrace();
            return -1;
        }
    }

    Date zigma = new Date();
    System.out.printf("Client: Directory walk done over %d inodes in %d msec\n", inodeCount,
            timeDiffMilliSec(alpha, zigma));
    return 0;
}

From source file:com.mellanox.r4h.DistributedFileSystem.java

License:Apache License

private FileStatus[] listStatusInternal(Path p) throws IOException {
    String src = getPathName(p);//from   w w w .jav a2s .  c om

    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME);

    if (thisListing == null) { // the directory does not exist
        throw new FileNotFoundException("File " + p + " does not exist.");
    }

    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
        FileStatus[] stats = new FileStatus[partialListing.length];
        for (int i = 0; i < partialListing.length; i++) {
            stats[i] = partialListing[i].makeQualified(getUri(), p);
        }
        statistics.incrementReadOps(1);
        return stats;
    }

    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries = partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing = new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(fileStatus.makeQualified(getUri(), p));
    }
    statistics.incrementLargeReadOps(1);

    // now fetch more entries
    do {
        thisListing = dfs.listPaths(src, thisListing.getLastName());

        if (thisListing == null) { // the directory is deleted
            throw new FileNotFoundException("File " + p + " does not exist.");
        }

        partialListing = thisListing.getPartialListing();
        for (HdfsFileStatus fileStatus : partialListing) {
            listing.add(fileStatus.makeQualified(getUri(), p));
        }
        statistics.incrementLargeReadOps(1);
    } while (thisListing.hasMore());

    return listing.toArray(new FileStatus[listing.size()]);
}

From source file:com.pinterest.terrapin.TerrapinUtil.java

License:Apache License

/**
 * Retrieve list of files under @hdfsDir for @hdfsClient.
 *///www . j  a  va  2 s  .  co m
public static List<HdfsFileStatus> getHdfsFileList(DFSClient hdfsClient, String hdfsDir) throws IOException {
    List<HdfsFileStatus> fileList = Lists.newArrayList();
    // Build a list of files.
    DirectoryListing listing = null;
    String continuation = "";
    while (true) {
        listing = hdfsClient.listPaths(hdfsDir, continuation.getBytes());
        for (HdfsFileStatus fileStatus : listing.getPartialListing()) {
            fileList.add(fileStatus);
        }
        // Go through the listing and paginate.
        if (!listing.hasMore()) {
            break;
        } else {
            continuation = new String(listing.getLastName());
        }
    }
    return fileList;
}