Example usage for org.apache.hadoop.fs FileStatus getReplication

List of usage examples for org.apache.hadoop.fs FileStatus getReplication

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getReplication.

Prototype

public short getReplication() 

Source Link

Document

Get the replication factor of a file.

Usage

From source file:org.opencloudengine.garuda.model.HdfsFileInfo.java

License:Open Source License

public HdfsFileInfo(FileStatus fileStatus, ContentSummary contentSummary) {
    this.fullyQualifiedPath = fileStatus.getPath().toUri().getPath();
    this.filename = isEmpty(getFilename(fullyQualifiedPath)) ? getDirectoryName(fullyQualifiedPath)
            : getFilename(fullyQualifiedPath);
    this.length = fileStatus.getLen();
    this.path = getPath(fullyQualifiedPath);
    this.directory = fileStatus.isDirectory();
    this.file = !fileStatus.isDirectory();
    this.owner = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.blockSize = fileStatus.getBlockSize();
    this.replication = fileStatus.getReplication();
    this.modificationTime = fileStatus.getModificationTime();
    if (contentSummary != null) {
        this.spaceConsumed = contentSummary.getSpaceConsumed();
        this.quota = contentSummary.getQuota();
        this.spaceQuota = contentSummary.getSpaceQuota();
        this.directoryCount = contentSummary.getDirectoryCount();
        this.fileCount = contentSummary.getFileCount();
    }//from  w w w.  j  av a2  s. c o  m
    this.accessTime = fileStatus.getAccessTime();
    this.permission = fileStatus.getPermission().toString();
}

From source file:org.openflamingo.fs.hdfs.HdfsFileInfo.java

License:Apache License

/**
 *  ??.//from   w w w  .  ja  v a 2 s .c om
 *
 * @param fileStatus HDFS File Status
 */
public HdfsFileInfo(FileStatus fileStatus) {
    this.fullyQualifiedPath = fileStatus.getPath().toUri().getPath();
    this.filename = StringUtils.isEmpty(FileUtils.getFilename(fullyQualifiedPath))
            ? FileUtils.getDirectoryName(fullyQualifiedPath)
            : FileUtils.getFilename(fullyQualifiedPath);
    this.length = fileStatus.getLen();
    this.path = FileUtils.getPath(fullyQualifiedPath);
    this.directory = fileStatus.isDir();
    this.file = !fileStatus.isDir();
    this.owner = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.blockSize = fileStatus.getBlockSize();
    this.replication = fileStatus.getReplication();
    this.modificationTime = fileStatus.getModificationTime();
    this.accessTime = fileStatus.getAccessTime();
    this.setText(this.filename);
    this.setLeaf(file ? true : false);
    this.setCls(directory ? "folder" : "file");
    this.setId(fullyQualifiedPath);
    this.permission = fileStatus.getPermission().toString();
}

From source file:org.openflamingo.fs.hdfs.HdfsFileSystemProvider.java

License:Apache License

@Override
public FileInfo getFileInfo(String path) {
    try {//w w w  .  ja  va 2s  .c  o m
        FileStatus fileStatus = fs.getFileStatus(new Path(path));
        HdfsFileInfo hdfsFileInfo = new HdfsFileInfo(fileStatus);

        ContentSummary summary = fs.getContentSummary(new Path(path));
        hdfsFileInfo.setBlockSize(fileStatus.getBlockSize());
        hdfsFileInfo.setReplication(fileStatus.getReplication());
        hdfsFileInfo.setDirectoryCount(summary.getDirectoryCount());
        hdfsFileInfo.setFileCount(summary.getFileCount());
        hdfsFileInfo.setQuota(summary.getQuota());
        hdfsFileInfo.setSpaceQuota(summary.getSpaceQuota());
        hdfsFileInfo.setSpaceConsumed(StringUtils.byteDesc(summary.getSpaceConsumed()));
        hdfsFileInfo.setLength(summary.getLength());

        return hdfsFileInfo;
    } catch (Exception ex) {
        throw new FileSystemException(bundle.message("S_FS", "CANNOT_GET_FILE_INFO", path), ex);
    }
}

From source file:org.smartfrog.services.hadoop.operations.dfs.DfsListDirImpl.java

License:Open Source License

/**
 * do the work/*from  ww  w .j ava2  s .c  om*/
 *
 * @param fileSystem the filesystem; this is closed afterwards
 * @param conf       the configuration driving this operation
 * @throws Exception on any failure
 */
@Override
protected void performDfsOperation(FileSystem fileSystem, ManagedConfiguration conf) throws Exception {
    Path path = getPath();
    if (path == null) {
        throw new SmartFrogLivenessException("No path for the DfsListDir operation", this);
    }
    int minFileCount = sfResolve(ATTR_MIN_FILE_COUNT, 0, true);
    int maxFileCount = sfResolve(ATTR_MAX_FILE_COUNT, 0, true);
    long minTotalFileSize = sfResolve(ATTR_MIN_TOTAL_FILE_SIZE, 0L, true);
    long maxTotalFileSize = sfResolve(ATTR_MAX_TOTAL_FILE_SIZE, 0L, true);
    try {
        long size = 0;
        FileStatus[] stats = fileSystem.listStatus(path);
        if (stats == null) {
            throw new SmartFrogLivenessException("Path not found in the remote filesystem: " + path, this);
        }
        StringBuilder builder = new StringBuilder();
        builder.append("Listing of ").append(path).append("/\n");
        for (FileStatus file : stats) {
            size += file.getLen();
            builder.append(file.getPath().getName());
            builder.append("\n  size=").append(file.getLen());
            builder.append("\n  replication=").append(file.getReplication());
            builder.append("\n  last modified=").append(new Date(file.getModificationTime()).toString());
            builder.append("\n  owner=").append(file.getOwner());
            builder.append("\n  group=").append(file.getGroup());
            builder.append("\n  permissions=").append(file.getPermission()).append('\n');
        }
        String listing = builder.toString();
        sfLog().info(listing);
        int count = stats.length;
        sfLog().info("Files: " + count + "  total size=" + size);
        if (count < minFileCount) {
            throw new SmartFrogLivenessException("File count " + count + " is below the minFileCount value of "
                    + minFileCount + "\n" + listing, this);
        }
        if (maxFileCount > -1 && count > maxFileCount) {
            throw new SmartFrogLivenessException("File count " + count + " is above the maxFileCount value of "
                    + minFileCount + "\n" + listing, this);
        }
        if (size < minTotalFileSize) {
            throw new SmartFrogLivenessException("File size " + size
                    + " is below the minTotalFileSize value of " + minTotalFileSize + "\n" + listing, this);
        }
        if (maxFileCount > -1 && size > maxFileCount) {
            throw new SmartFrogLivenessException("File size " + size
                    + " is above the maxTotalFileSize value of " + maxTotalFileSize + "\n" + listing, this);
        }

    } catch (IOException e) {
        if (isIdempotent()) {
            sfLog().info("Failed to stat " + path, e);
        } else {
            throw e;
        }
    }
}

From source file:org.smartfrog.services.hadoop.operations.dfs.DfsPathExistsImpl.java

License:Open Source License

/**
 * check that a path exists//www .  j av  a 2  s.c o  m
 *
 * @throws SmartFrogLivenessException if it does not, or it is the wrong type/size
 */
private void checkPathExists() throws SmartFrogLivenessException {
    String filename = getPathName() + " in " + dfs.toString();
    try {
        if (!doesPathExist()) {
            throw new SmartFrogLivenessException("Missing path " + filename);
        }
        FileStatus status = dfs.getFileStatus(getPath());
        if (verbose) {
            sfLog().info("Path " + getPath() + " size " + status.getLen() + " last modified:"
                    + status.getModificationTime());
        }
        if (status.isDir()) {
            //it is a directory. Run the directory checks

            FileStatus[] statuses = dfs.listStatus(getPath());
            if (statuses == null) {
                throw new SmartFrogLivenessException("Unable to list the status of " + filename);
            }
            int fileCount = statuses.length;
            StringBuilder filenames = new StringBuilder();

            long totalFileSize = 0;
            for (FileStatus fstat : statuses) {
                totalFileSize += fstat.getLen();
                filenames.append(fstat.getPath() + "\t").append('\t').append(fstat.getBlockSize()).append("\n");
                filenames.append('\n');
                if (verbose) {
                    sfLog().info(fstat.getPath() + "\t" + fstat.getBlockSize() + "\n");
                }
            }

            if (!canBeDir) {
                throw new SmartFrogLivenessException("Expected a file, got a directory: " + filename
                        + " containing " + fileCount + " file(s):\n" + filenames);
            }
            if (fileCount < minFileCount) {
                throw new SmartFrogLivenessException("Not enough files under " + filename + " required "
                        + minFileCount + " found " + fileCount + " :\n" + filenames);
            }
            if (maxFileCount >= 0 && fileCount > maxFileCount) {
                throw new SmartFrogLivenessException("Too many files under " + filename + " maximum "
                        + maxFileCount + " found " + fileCount + " :\n" + filenames);
            }
            if (totalFileSize < minTotalFileSize) {
                throw new SmartFrogLivenessException("not enough file content " + filename + " required "
                        + minTotalFileSize + " found " + totalFileSize + " :\n" + filenames);
            }
            if (maxTotalFileSize >= 0 && totalFileSize > maxTotalFileSize) {
                throw new SmartFrogLivenessException("too much enough file content " + filename + " maximum "
                        + minTotalFileSize + " found " + totalFileSize + " :\n" + filenames);
            }
        } else {
            if (!canBeFile) {
                throw new SmartFrogLivenessException("Not allowed to be a file: " + filename);
            }
            long size = status.getLen();
            if (size < minFileSize) {
                throw new SmartFrogLivenessException("File " + filename + " is too small at " + size
                        + " bytes for the minimum size " + minFileSize);
            }
            if (maxFileSize >= 0 && size > maxFileSize) {
                throw new SmartFrogLivenessException("File " + filename + " is too big at " + size
                        + " bytes for the maximum size " + maxFileSize);
            }
            short replication = status.getReplication();
            if (replication < minReplication) {
                throw new SmartFrogLivenessException("File  " + filename + " has a replication factor of"
                        + replication + " which is less than the minimum value of " + minReplication);
            }
            if (maxReplication >= 0 && replication > maxReplication) {
                throw new SmartFrogLivenessException("File  " + filename + " has a replication factor of"
                        + replication + " which is less than the maximum value of " + maxReplication);
            }
        }
    } catch (IOException e) {
        throw new SmartFrogLivenessException("Missing path " + filename, e);
    }
}

From source file:org.springframework.data.hadoop.fs.FsShell.java

License:Apache License

public Collection<FileStatus> ls(boolean recursive, String... match) {

    Collection<FileStatus> results = new PrettyPrintList<FileStatus>(new ListPrinter<FileStatus>() {
        @Override//  w  w  w .java  2  s . c  o m
        public String toString(FileStatus stat) throws Exception {
            final SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm");
            int maxReplication = 3, maxLen = 10, maxOwner = 10, maxGroup = 10;

            StringBuilder sb = new StringBuilder();
            sb.append((stat.isDir() ? "d" : "-") + stat.getPermission() + " ");
            sb.append(
                    String.format("%" + maxReplication + "s ", (!stat.isDir() ? stat.getReplication() : "-")));
            sb.append(String.format("%-" + maxOwner + "s ", stat.getOwner()));
            sb.append(String.format("%-" + maxGroup + "s ", stat.getGroup()));
            sb.append(String.format("%" + maxLen + "d ", stat.getLen()));
            sb.append(df.format(new Date(stat.getModificationTime())) + " ");
            sb.append(stat.getPath().toUri().getPath());
            return sb.toString();
        }
    });

    try {
        for (String src : match) {
            Path srcPath = new Path(src);

            FileSystem srcFs = getFS(srcPath);
            FileStatus[] srcs = srcFs.globStatus(srcPath);
            if (!ObjectUtils.isEmpty(srcs)) {
                for (FileStatus status : srcs) {
                    ls(status, srcFs, recursive, results);
                }
            } else {
                throw new IllegalArgumentException("Cannot access " + srcPath + ": No such file or directory.");
            }
        }

        return Collections.unmodifiableCollection(results);

    } catch (IOException ex) {
        throw new HadoopException("Cannot list resources " + ex.getMessage(), ex);
    }
}

From source file:tachyon.hadoop.HadoopUtils.java

License:Apache License

/**
 * Returns a string representation of a Hadoop {@link FileStatus}.
 *
 * @param fs Hadoop {@link FileStatus}/*from   w w  w.j  a  v  a 2  s. c o  m*/
 * @return its string representation
 */
public static String toStringHadoopFileStatus(FileStatus fs) {
    StringBuilder sb = new StringBuilder();
    sb.append("HadoopFileStatus: Path: ").append(fs.getPath());
    sb.append(" , Length: ").append(fs.getLen());
    sb.append(" , IsDir: ").append(fs.isDir());
    sb.append(" , BlockReplication: ").append(fs.getReplication());
    sb.append(" , BlockSize: ").append(fs.getBlockSize());
    sb.append(" , ModificationTime: ").append(fs.getModificationTime());
    sb.append(" , AccessTime: ").append(fs.getAccessTime());
    sb.append(" , Permission: ").append(fs.getPermission());
    sb.append(" , Owner: ").append(fs.getOwner());
    sb.append(" , Group: ").append(fs.getGroup());
    return sb.toString();
}