Example usage for org.apache.hadoop.hdfs.protocol HdfsFileStatus getReplication

List of usage examples for org.apache.hadoop.hdfs.protocol HdfsFileStatus getReplication

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol HdfsFileStatus getReplication.

Prototype

short getReplication();

Source Link

Document

See FileStatus#getReplication() .

Usage

From source file:com.bigstep.datalake.DLFileSystem.java

License:Apache License

private FileStatus makeQualified(HdfsFileStatus f, Path parent) {

    return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), f.getBlockSize(), f.getModificationTime(),
            f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(),
            f.isSymlink() ? new Path(f.getSymlink()) : null,
            makeQualified(f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())));
}

From source file:com.bigstep.datalake.JsonUtil.java

License:Apache License

/**
 * Convert a HdfsFileStatus object to a Json string.
 * @param status input status/*from   w  w w .java 2  s  .  c om*/
 * @param includeType type to use
 * @return the json
 */
public static String toJsonString(final HdfsFileStatus status, boolean includeType) {
    if (status == null) {
        return null;
    }
    final Map<String, Object> m = new TreeMap<String, Object>();
    m.put("pathSuffix", status.getLocalName());
    m.put("type", PathType.valueOf(status));
    if (status.isSymlink()) {
        m.put("symlink", status.getSymlink());
    }

    m.put("length", status.getLen());
    m.put("owner", status.getOwner());
    m.put("group", status.getGroup());
    FsPermission perm = status.getPermission();
    m.put("permission", toString(perm));
    if (perm.getAclBit()) {
        m.put("aclBit", true);
    }
    if (perm.getEncryptedBit()) {
        m.put("encBit", true);
    }
    m.put("accessTime", status.getAccessTime());
    m.put("modificationTime", status.getModificationTime());
    m.put("blockSize", status.getBlockSize());
    m.put("replication", status.getReplication());
    m.put("fileId", status.getFileId());
    m.put("childrenNum", status.getChildrenNum());
    m.put("storagePolicy", status.getStoragePolicy());

    Gson gson = new Gson();

    return includeType ? toJsonString(FileStatus.class, m) : gson.toJson(m);

}

From source file:com.mellanox.r4h.DFSOutputStream.java

License:Apache License

/**
 * @return the object for computing checksum.
 *         The type is NULL if checksum is not computed.
 *//* w  ww .java2 s  .  c  o m*/
private static DataChecksum getChecksum4Compute(DataChecksum checksum, HdfsFileStatus stat) {
    if (isLazyPersist(stat) && stat.getReplication() == 1) {
        // do not compute checksum for writing to single replica to memory
        return DataChecksum.newDataChecksum(Type.NULL, checksum.getBytesPerChecksum());
    }
    return checksum;
}

From source file:com.mellanox.r4h.DFSOutputStream.java

License:Apache License

protected DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, HdfsFileStatus stat,
        DataChecksum checksum) throws IOException {
    super(getChecksum4Compute(checksum, stat));
    this.dfsClient = dfsClient;
    this.src = src;
    this.fileId = stat.getFileId();
    this.blockSize = stat.getBlockSize();
    this.blockReplication = stat.getReplication();
    this.fileEncryptionInfo = stat.getFileEncryptionInfo();
    this.progress = progress;
    this.cachingStrategy = new AtomicReference<CachingStrategy>(dfsClient.getDefaultWriteCachingStrategy());
    this.jxioResource = dfsClient.getJXIOResource();
    this.usingJxioClientResource = true;
    this.eventQHandler = jxioResource.getEqh();
    this.msgPool = jxioResource.getMsgPool();
    this.headerAckTimeoutUsec = dfsClient.getConf().getHeaderAckTimeoutUsec();

    if ((progress != null) && DFSOutputStream.LOG.isDebugEnabled()) {
        DFSOutputStream.LOG.debug("Set non-null progress callback on DFSOutputStream " + src);
    }/*w  w w.ja va 2  s. co  m*/

    this.bytesPerChecksum = checksum.getBytesPerChecksum();
    if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
        throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum + ") and blockSize(" + blockSize
                + ") do not match. " + "blockSize should be a " + "multiple of io.bytes.per.checksum");

    }
    this.name = String.format("[hash=%X] ", DFSOutputStream.this.hashCode());
    this.checksum = checksum;

    if (toPrintBreakdown) {
        long now = System.nanoTime();
        DFSOutputStream.LOG.info(String.format("%.3f", (float) now / 1000000000.) + ", "
                + (now - lastOperationTS) / 1000000000. + " : DFSOutputStream constructed successfully.");
        synchronized (lastOperationTSLock) {
            lastOperationTS = now;
        }
        // } else {
        // LOG.info("Mellanox RDMA-accelerated DFSOutputStream constructed successfully.");
    }
}