Example usage for org.apache.hadoop.fs FsStatus getUsed

List of usage examples for org.apache.hadoop.fs FsStatus getUsed

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FsStatus getUsed.

Prototype

public long getUsed() 

Source Link

Document

Return the number of bytes used on the file system

Usage

From source file:co.cask.cdap.operations.hdfs.HDFSStorage.java

License:Apache License

@Override
public synchronized void collect() throws IOException {
    try (DistributedFileSystem dfs = createDFS()) {
        if (dfs == null) {
            return;
        }//from  w ww .j  av a2s . c o m
        FsStatus status = dfs.getStatus();
        this.totalBytes = status.getCapacity();
        this.availableBytes = status.getRemaining();
        this.usedBytes = status.getUsed();
        this.missingBlocks = dfs.getMissingBlocksCount();
        this.underReplicatedBlocks = dfs.getUnderReplicatedBlocksCount();
        this.corruptBlocks = dfs.getCorruptBlocksCount();
    }
}

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

@Override
public FuseStatfs getStatus(int uid) {
    FileSystem dfs = null;//from   ww  w.  j  av  a 2s  . c o m
    try {
        dfs = getDfs(uid);
        FsStatus status = dfs.getStatus();
        long cap = status.getCapacity();
        long bsize = dfs.getDefaultBlockSize();
        long used = status.getUsed();

        FuseStatfs statFS = new FuseStatfs();
        statFS.blockSize = (int) bsize;
        statFS.blocks = (int) (cap / bsize);
        statFS.blocksFree = (int) ((cap - used) / bsize);
        statFS.blocksAvail = (int) ((cap - used) / bsize);
        statFS.files = 1000;
        statFS.filesFree = 500;
        statFS.namelen = 1023;
        return statFS;
    } catch (Exception e) {
        e.printStackTrace();
        return null;
    }
}

From source file:hdfs.jsr203.HadoopFileStore.java

License:Apache License

@Override
public long getUnallocatedSpace() throws IOException {
    FsStatus status = this.system.getHDFS().getStatus();
    return status.getCapacity() - status.getUsed();
}

From source file:org.apache.crunchts.CrunchTSApp.java

License:Apache License

/**
 * Gives a report for a time series bucket.
 *
 * @exception IOException if the tsbucket does not exist.
 *//*from  w  w w.ja  v a 2 s  .c o m*/
public void report(String tsbFilePath) throws IOException {

    System.out.println("Time-Series-Bucket report (TSBr) is comming soon ...");

    System.out.println("> path: " + tsbFilePath);

    if (fs instanceof DistributedFileSystem) {

        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        FsStatus ds = dfs.getStatus();

        long capacity = ds.getCapacity();
        long used = ds.getUsed();
        long remaining = ds.getRemaining();
        long presentCapacity = used + remaining;

        System.out.println("FS Configured Capacity: " + capacity + " (" + StringUtils.byteDesc(capacity) + ")");
        System.out.println(
                "FS Present Capacity: " + presentCapacity + " (" + StringUtils.byteDesc(presentCapacity) + ")");
        System.out.println("DFS Remaining: " + remaining + " (" + StringUtils.byteDesc(remaining) + ")");
        System.out.println("DFS Used: " + used + " (" + StringUtils.byteDesc(used) + ")");
        System.out.println(
                "DFS Used%: " + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100) + "%");
    }
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testStatus() throws Exception {

    try (FSDataOutputStream file = fs.create(new Path("/file1"), EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()))) {
        file.write(new byte[1024 * 1024]);
    }//w w w  .j a  v a2s  . c  o m

    FsStatus status = fs.getFsStatus();

    assertEquals(4, grid(0).cluster().nodes().size());

    long used = 0, max = 0;

    for (int i = 0; i < 4; i++) {
        IgniteFileSystem igfs = grid(i).fileSystem("igfs");

        IgfsMetrics metrics = igfs.metrics();

        used += metrics.localSpaceSize();
        max += metrics.maxSpaceSize();
    }

    assertEquals(used, status.getUsed());
    assertEquals(max, status.getCapacity());
}

From source file:org.apache.ignite.igfs.IgfsHadoop20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testStatus() throws Exception {

    try (FSDataOutputStream file = fs.create(new Path("/file1"), EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()))) {
        file.write(new byte[1024 * 1024]);
    }//  w ww .  ja v  a  2s  .c  om

    FsStatus status = fs.getFsStatus();

    assertEquals(4, grid(0).nodes().size());

    long used = 0, max = 0;

    for (int i = 0; i < 4; i++) {
        IgniteFs igfs = grid(i).fileSystem("igfs");

        IgfsMetrics metrics = igfs.metrics();

        used += metrics.localSpaceSize();
        max += metrics.maxSpaceSize();
    }

    assertEquals(used, status.getUsed());
    assertEquals(max, status.getCapacity());
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testStatus() throws Exception {
    Path file1 = new Path("/file1");

    try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()))) {
        file.write(new byte[1024 * 1024]);
    }//from  www  .j  a  va2  s . c om

    FsStatus status = fs.getFsStatus();

    assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner());

    assertEquals(4, grid(0).cluster().nodes().size());

    long used = 0, max = 0;

    for (int i = 0; i < 4; i++) {
        IgniteFileSystem igfs = grid(i).fileSystem("igfs");

        IgfsMetrics metrics = igfs.metrics();

        used += metrics.localSpaceSize();
        max += metrics.maxSpaceSize();
    }

    assertEquals(used, status.getUsed());
    assertEquals(max, status.getCapacity());
}

From source file:org.gridgain.grid.ggfs.GridGgfsHadoop20FileSystemAbstractSelfTest.java

License:Open Source License

/** @throws Exception If failed. */
public void testStatus() throws Exception {

    try (FSDataOutputStream file = fs.create(new Path("/file1"), EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()))) {
        file.write(new byte[1024 * 1024]);
    }/*from   w  ww. j  av  a  2 s  . co m*/

    FsStatus status = fs.getFsStatus();

    assertEquals(4, grid(0).nodes().size());

    long used = 0, max = 0;

    for (int i = 0; i < 4; i++) {
        GridGgfs ggfs = grid(i).ggfs("ggfs");

        GridGgfsMetrics metrics = ggfs.metrics();

        used += metrics.localSpaceSize();
        max += metrics.maxSpaceSize();
    }

    assertEquals(used, status.getUsed());
    assertEquals(max, status.getCapacity());
}