Example usage for org.apache.hadoop.fs FileStatus getReplication

List of usage examples for org.apache.hadoop.fs FileStatus getReplication

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getReplication.

Prototype

public short getReplication() 

Source Link

Document

Get the replication factor of a file.

Usage

From source file:org.apache.hive.common.util.MockFileSystem.java

License:Apache License

public void touch(MockFile file) {
    if (fileStatusMap.containsKey(file)) {
        FileStatus fileStatus = fileStatusMap.get(file);
        FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(),
                fileStatus.getReplication(), fileStatus.getBlockSize(), fileStatus.getModificationTime() + 1,
                fileStatus.getAccessTime(), fileStatus.getPermission(), fileStatus.getOwner(),
                fileStatus.getGroup(), fileStatus.getPath());
        fileStatusMap.put(file, fileStatusNew);
    }//www.j a  v  a  2s  . c o m
}

From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java

License:Apache License

/**
 * Convert a file status obtained from the secondary file system to a status of the primary file system.
 *
 * @param status Secondary file system status.
 * @return Primary file system status./*from w ww  .  j a  va2  s.  com*/
 */
@SuppressWarnings("deprecation")
private FileStatus toPrimary(FileStatus status) {
    return status != null
            ? new FileStatus(status.getLen(), status.isDir(), status.getReplication(), status.getBlockSize(),
                    status.getModificationTime(), status.getAccessTime(), status.getPermission(),
                    status.getOwner(), status.getGroup(), toPrimary(status.getPath()))
            : null;
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

License:Apache License

/**
 * Convert a file status obtained from the secondary file system to a status of the primary file system.
 *
 * @param status Secondary file system status.
 * @return Primary file system status./*from   w  w w . j a  v a  2  s. c  o  m*/
 */
private FileStatus toPrimary(FileStatus status) {
    return status != null
            ? new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(),
                    status.getBlockSize(), status.getModificationTime(), status.getAccessTime(),
                    status.getPermission(), status.getOwner(), status.getGroup(), toPrimary(status.getPath()))
            : null;
}

From source file:org.apache.nifi.processors.hadoop.ListHDFS.java

License:Apache License

private Map<String, String> createAttributes(final FileStatus status) {
    final Map<String, String> attributes = new HashMap<>();
    attributes.put(CoreAttributes.FILENAME.key(), status.getPath().getName());
    attributes.put(CoreAttributes.PATH.key(), getAbsolutePath(status.getPath().getParent()));

    attributes.put("hdfs.owner", status.getOwner());
    attributes.put("hdfs.group", status.getGroup());
    attributes.put("hdfs.lastModified", String.valueOf(status.getModificationTime()));
    attributes.put("hdfs.length", String.valueOf(status.getLen()));
    attributes.put("hdfs.replication", String.valueOf(status.getReplication()));

    final FsPermission permission = status.getPermission();
    final String perms = getPerms(permission.getUserAction()) + getPerms(permission.getGroupAction())
            + getPerms(permission.getOtherAction());
    attributes.put("hdfs.permissions", perms);
    return attributes;
}

From source file:org.apache.orc.tools.FileDump.java

License:Apache License

private static void recoverFile(final Path corruptPath, final FileSystem fs, final Configuration conf,
        final List<Long> footerOffsets, final String backup) throws IOException {

    // first recover the file to .recovered file and then once successful rename it to actual file
    Path recoveredPath = getRecoveryFile(corruptPath);

    // make sure that file does not exist
    if (fs.exists(recoveredPath)) {
        fs.delete(recoveredPath, false);
    }/*from  ww  w.  j  a va2s. c  o m*/

    // if there are no valid footers, the file should still be readable so create an empty orc file
    if (footerOffsets == null || footerOffsets.isEmpty()) {
        System.err.println("No readable footers found. Creating empty orc file.");
        TypeDescription schema = TypeDescription.createStruct();
        Writer writer = OrcFile.createWriter(recoveredPath, OrcFile.writerOptions(conf).setSchema(schema));
        writer.close();
    } else {
        FSDataInputStream fdis = fs.open(corruptPath);
        FileStatus fileStatus = fs.getFileStatus(corruptPath);
        // read corrupt file and copy it to recovered file until last valid footer
        FSDataOutputStream fdos = fs.create(recoveredPath, true, conf.getInt("io.file.buffer.size", 4096),
                fileStatus.getReplication(), fileStatus.getBlockSize());
        try {
            long fileLen = footerOffsets.get(footerOffsets.size() - 1);
            long remaining = fileLen;

            while (remaining > 0) {
                int toRead = (int) Math.min(DEFAULT_BLOCK_SIZE, remaining);
                byte[] data = new byte[toRead];
                long startPos = fileLen - remaining;
                fdis.readFully(startPos, data, 0, toRead);
                fdos.write(data);
                System.err.println("Copying data to recovery file - startPos: " + startPos + " toRead: "
                        + toRead + " remaining: " + remaining);
                remaining = remaining - toRead;
            }
        } catch (Exception e) {
            fs.delete(recoveredPath, false);
            throw new IOException(e);
        } finally {
            fdis.close();
            fdos.close();
        }
    }

    // validate the recovered file once again and start moving corrupt files to backup folder
    if (isReadable(recoveredPath, conf, Long.MAX_VALUE)) {
        Path backupDataPath;
        String scheme = corruptPath.toUri().getScheme();
        String authority = corruptPath.toUri().getAuthority();
        String filePath = corruptPath.toUri().getPath();

        // use the same filesystem as corrupt file if backup-path is not explicitly specified
        if (backup.equals(DEFAULT_BACKUP_PATH)) {
            backupDataPath = new Path(scheme, authority, DEFAULT_BACKUP_PATH + filePath);
        } else {
            backupDataPath = Path.mergePaths(new Path(backup), corruptPath);
        }

        // Move data file to backup path
        moveFiles(fs, corruptPath, backupDataPath);

        // Move side file to backup path
        Path sideFilePath = OrcAcidUtils.getSideFile(corruptPath);
        Path backupSideFilePath = new Path(backupDataPath.getParent(), sideFilePath.getName());
        moveFiles(fs, sideFilePath, backupSideFilePath);

        // finally move recovered file to actual file
        moveFiles(fs, recoveredPath, corruptPath);

        // we are done recovering, backing up and validating
        System.err.println("Validation of recovered file successful!");
    }
}

From source file:org.apache.pig.backend.hadoop.datastorage.HPath.java

License:Apache License

public Map<String, Object> getStatistics() throws IOException {
    HashMap<String, Object> props = new HashMap<String, Object>();

    FileStatus fileStatus = fs.getHFS().getFileStatus(path);

    props.put(BLOCK_SIZE_KEY, fileStatus.getBlockSize());
    props.put(BLOCK_REPLICATION_KEY, fileStatus.getReplication());
    props.put(LENGTH_KEY, fileStatus.getLen());
    props.put(MODIFICATION_TIME_KEY, fileStatus.getModificationTime());

    return props;
}

From source file:org.elasticsearch.hadoop.mr.NTFSLocalFileSystem.java

License:Apache License

@Override
public FileStatus getFileStatus(Path f) throws IOException {
    // it's the RawFS in place which messes things up as it dynamically returns the permissions...
    // workaround by doing a copy
    FileStatus fs = super.getFileStatus(f);

    // work-around for Hive 0.14
    if (SCRATCH_DIR.equals(f.toString())) {
        System.out.println("Faking scratch dir permissions on Windows...");

        return new FileStatus(fs.getLen(), fs.isDir(), fs.getReplication(), fs.getBlockSize(),
                fs.getModificationTime(), fs.getAccessTime(), SCRATCH_DIR_PERMS, fs.getOwner(), fs.getGroup(),
                fs.getPath());/*from w  w w .  ja v  a2  s. c o  m*/
        // this doesn't work since the RawFS impl has its own algo that does the lookup dynamically
        //fs.getPermission().fromShort((short) 777);
    }
    return fs;
}

From source file:org.exem.flamingo.agent.nn.hdfs.HdfsFileInfo.java

License:Apache License

public HdfsFileInfo(FileStatus fileStatus, ContentSummary contentSummary) {
    this.fullyQualifiedPath = fileStatus.getPath().toUri().getPath();
    this.filename = isEmpty(getFilename(fullyQualifiedPath)) ? getDirectoryName(fullyQualifiedPath)
            : getFilename(fullyQualifiedPath);
    this.length = fileStatus.isFile() ? fileStatus.getLen() : contentSummary.getLength();
    this.path = getPath(fullyQualifiedPath);
    this.directory = fileStatus.isDirectory();
    this.file = !fileStatus.isDirectory();
    this.owner = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.blockSize = fileStatus.getBlockSize();
    this.replication = fileStatus.getReplication();
    this.modificationTime = fileStatus.getModificationTime();
    if (contentSummary != null) {
        this.spaceConsumed = contentSummary.getSpaceConsumed();
        this.spaceQuota = contentSummary.getSpaceQuota();
        this.quota = contentSummary.getQuota();
        this.directoryCount = contentSummary.getDirectoryCount();
        this.fileCount = contentSummary.getFileCount();
    }/*w w  w .  j  a va  2  s  . c o m*/
    this.accessTime = fileStatus.getAccessTime();
    this.permission = fileStatus.getPermission().toString();
}

From source file:org.exem.flamingo.agent.nn.hdfs.HdfsFileOnlyInfo.java

License:Apache License

public HdfsFileOnlyInfo(FileStatus fileStatus, ContentSummary contentSummary) {
    String qualifiedPath = fileStatus.getPath().toUri().getPath();
    this.filename = isEmpty(getFilename(qualifiedPath)) ? getDirectoryName(qualifiedPath)
            : getFilename(qualifiedPath);
    this.length = fileStatus.getLen();
    this.path = getPath(qualifiedPath);
    this.directory = fileStatus.isDirectory();
    this.modificationTime = fileStatus.getModificationTime();
    this.file = !fileStatus.isDirectory();
    this.replication = fileStatus.getReplication();
    this.owner = fileStatus.getOwner();
    this.group = fileStatus.getGroup();
    this.permission = fileStatus.getPermission().toString();
    if (contentSummary != null) {
        this.spaceConsumed = contentSummary.getSpaceConsumed();
        this.spaceQuota = contentSummary.getSpaceQuota();
    }/*from www . j  av  a 2 s  .  c o m*/
}

From source file:org.mrgeo.cmd.mrsimageinfo.MrsImageInfo.java

License:Apache License

private void printFileInfo(final Path pfile, PrintStream out) throws IOException {
    // TODO: The following is HDFS-sepcific; needs to be re-factored
    final FileSystem fs = pfile.getFileSystem(config);
    final FileStatus stat = fs.getFileStatus(pfile);

    out.print("    date: " + DateTimeFormat.shortDateTime().print(stat.getModificationTime()));
    out.println("  size: " + human(stat.getLen()));

    final FsPermission p = stat.getPermission();

    if (debug) {/*from   w ww.j  a  v a  2s .  co  m*/
        out.print("    ");
        out.print(stat.isDir() ? "d" : "f");
        out.print(" u: " + stat.getOwner() + " (" + p.getUserAction().toString().toLowerCase() + ")");
        out.print(" g: " + stat.getGroup() + " (" + p.getGroupAction().toString().toLowerCase() + ")");
        out.print(" o: " + "(" + p.getOtherAction().toString().toLowerCase() + ")");

        out.print(" blk: " + human(stat.getBlockSize()));
        out.println(" repl: " + stat.getReplication());
    }
}