Example usage for org.apache.hadoop.fs.permission FsPermission FsPermission

List of usage examples for org.apache.hadoop.fs.permission FsPermission FsPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsPermission FsPermission.

Prototype

public FsPermission(String mode) 

Source Link

Document

Construct by given mode, either in octal or symbolic format.

Usage

From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the directory permission propagation to UFS.
 *///from www  .ja  v  a  2  s .  c  o m
@Test
public void directoryPermissionForUfs() throws IOException {
    // Skip non-local and non-HDFS UFSs.
    Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs));

    Path dir = new Path("/root/directoryPermissionForUfsDir");
    sTFS.mkdirs(dir);

    FileStatus fs = sTFS.getFileStatus(dir);
    String defaultOwner = fs.getOwner();
    Short dirMode = fs.getPermission().toShort();
    FileStatus parentFs = sTFS.getFileStatus(dir.getParent());
    Short parentMode = parentFs.getPermission().toShort();

    UfsStatus ufsStatus = sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir));
    Assert.assertEquals(defaultOwner, ufsStatus.getOwner());
    Assert.assertEquals((int) dirMode, (int) ufsStatus.getMode());
    Assert.assertEquals((int) parentMode,
            (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir.getParent())).getMode());

    short newMode = (short) 0755;
    FsPermission newPermission = new FsPermission(newMode);
    sTFS.setPermission(dir, newPermission);

    Assert.assertEquals((int) newMode,
            (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir)).getMode());
}

From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the parent directory permission when mkdirs recursively.
 *///from   w ww .  j  a  v a  2s .  c o  m
@Test
public void parentDirectoryPermissionForUfs() throws IOException {
    // Skip non-local and non-HDFS UFSs.
    Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs));

    String path = "/root/parentDirectoryPermissionForUfsDir/parentDirectoryPermissionForUfsFile";
    Path fileA = new Path(path);
    Path dirA = fileA.getParent();
    sTFS.mkdirs(dirA);
    short parentMode = (short) 0700;
    FsPermission newPermission = new FsPermission(parentMode);
    sTFS.setPermission(dirA, newPermission);

    create(sTFS, fileA);

    Assert.assertEquals((int) parentMode,
            (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dirA)).getMode());

    // Rename from dirA to dirB, file and its parent permission should be in sync with the source
    // dirA.
    Path fileB = new Path("/root/dirB/fileB");
    Path dirB = fileB.getParent();
    sTFS.rename(dirA, dirB);
    Assert.assertEquals((int) parentMode,
            (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, fileB.getParent())).getMode());
}

From source file:alluxio.hadoop.AbstractFileSystem.java

License:Apache License

/**
 * {@inheritDoc}/*from ww  w  . ja  v  a 2 s .co m*/
 *
 * If the file does not exist in Alluxio, query it from HDFS.
 */
@Override
public FileStatus getFileStatus(Path path) throws IOException {
    LOG.info("getFileStatus({})", path);

    if (mStatistics != null) {
        mStatistics.incrementReadOps(1);
    }
    AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
    URIStatus fileStatus;
    try {
        fileStatus = sFileSystem.getStatus(uri);
    } catch (FileDoesNotExistException e) {
        throw new FileNotFoundException(e.getMessage());
    } catch (AlluxioException e) {
        throw new IOException(e);
    }

    return new FileStatus(fileStatus.getLength(), fileStatus.isFolder(), BLOCK_REPLICATION_CONSTANT,
            fileStatus.getBlockSizeBytes(), fileStatus.getCreationTimeMs(), fileStatus.getCreationTimeMs(),
            new FsPermission((short) fileStatus.getMode()), fileStatus.getOwner(), fileStatus.getGroup(),
            new Path(mAlluxioHeader + uri));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the directory permission propagation to UFS.
 *//*w  w  w  . ja  v  a 2s  .  c  om*/
@Test
public void directoryPermissionForUfs() throws IOException {
    if (!(sUfs instanceof LocalUnderFileSystem) && !(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-local and non-HDFS UFSs.
        return;
    }
    Path dir = new Path("/root/dir/");
    sTFS.mkdirs(dir);

    FileStatus fs = sTFS.getFileStatus(dir);
    String defaultOwner = fs.getOwner();
    Short dirMode = fs.getPermission().toShort();
    FileStatus parentFs = sTFS.getFileStatus(dir.getParent());
    Short parentMode = parentFs.getPermission().toShort();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, dir)));
    Assert.assertEquals((int) dirMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir)));
    Assert.assertEquals((int) parentMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir.getParent())));

    short newMode = (short) 0755;
    FsPermission newPermission = new FsPermission(newMode);
    sTFS.setPermission(dir, newPermission);

    Assert.assertEquals((int) newMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the parent directory permission when mkdirs recursively.
 *///from www  . ja v  a  2s.  c o m
@Test
public void parentDirectoryPermissionForUfs() throws IOException {
    if (!(sUfs instanceof LocalUnderFileSystem) && !(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-local and non-HDFS UFSs.
        return;
    }
    Path fileA = new Path("/root/dirA/fileA");
    Path dirA = fileA.getParent();
    sTFS.mkdirs(dirA);
    short parentMode = (short) 0700;
    FsPermission newPermission = new FsPermission(parentMode);
    sTFS.setPermission(dirA, newPermission);

    create(sTFS, fileA);

    Assert.assertEquals((int) parentMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dirA)));

    // Rename from dirA to dirB, file and its parent permission should be in sync with the source
    // dirA.
    Path fileB = new Path("/root/dirB/fileB");
    Path dirB = fileB.getParent();
    sTFS.rename(dirA, dirB);
    Assert.assertEquals((int) parentMode,
            (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileB.getParent())));
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(String path, CreateOptions options) throws IOException {
    IOException te = null;/*from w  ww .  j ava2  s  .  c om*/
    RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
    Permission perm = options.getPermission();
    while (retryPolicy.attemptRetry()) {
        try {
            LOG.debug("Creating HDFS file at {} with perm {}", path, perm.toString());
            // TODO(chaomin): support creating HDFS files with specified block size and replication.
            return FileSystem.create(mFileSystem, new Path(path), new FsPermission(perm.getMode().toShort()));
        } catch (IOException e) {
            LOG.error("Retry count {} : {} ", retryPolicy.getRetryCount(), e.getMessage(), e);
            te = e;
        }
    }
    throw te;
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public boolean mkdirs(String path, MkdirsOptions options) throws IOException {
    IOException te = null;/*from ww  w .jav a2 s  .  co  m*/
    RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
    while (retryPolicy.attemptRetry()) {
        try {
            Path hdfsPath = new Path(path);
            if (mFileSystem.exists(hdfsPath)) {
                LOG.debug("Trying to create existing directory at {}", path);
                return false;
            }
            // Create directories one by one with explicit permissions to ensure no umask is applied,
            // using mkdirs will apply the permission only to the last directory
            Stack<Path> dirsToMake = new Stack<>();
            dirsToMake.push(hdfsPath);
            Path parent = hdfsPath.getParent();
            while (!mFileSystem.exists(parent)) {
                dirsToMake.push(parent);
                parent = parent.getParent();
            }
            while (!dirsToMake.empty()) {
                if (!FileSystem.mkdirs(mFileSystem, dirsToMake.pop(),
                        new FsPermission(options.getPermission().getMode().toShort()))) {
                    return false;
                }
            }
            return true;
        } catch (IOException e) {
            LOG.error("{} try to make directory for {} : {}", retryPolicy.getRetryCount(), path, e.getMessage(),
                    e);
            te = e;
        }
    }
    throw te;
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public void setMode(String path, short mode) throws IOException {
    try {/*ww  w .j av  a  2 s .co  m*/
        FileStatus fileStatus = mFileSystem.getFileStatus(new Path(path));
        LOG.info("Changing file '{}' permissions from: {} to {}", fileStatus.getPath(),
                fileStatus.getPermission(), mode);
        mFileSystem.setPermission(fileStatus.getPath(), new FsPermission(mode));
    } catch (IOException e) {
        LOG.error("Fail to set permission for {} with perm {}", path, mode, e);
        throw e;
    }
}

From source file:com.alibaba.jstorm.hdfs.blobstore.HdfsBlobStoreFile.java

License:Apache License

@Override
public OutputStream getOutputStream() throws IOException {
    checkIsNotTmp();/* w ww. ja va  2s  .  com*/
    OutputStream out = null;
    FsPermission fileperms = new FsPermission(BLOBSTORE_FILE_PERMISSION);
    try {
        out = _fs.create(_path, (short) this.getMetadata().get_replication_factor());
        _fs.setPermission(_path, fileperms);
        _fs.setReplication(_path, (short) this.getMetadata().get_replication_factor());
    } catch (IOException e) {
        //Try to create the parent directory, may not work
        FsPermission dirperms = new FsPermission(HdfsBlobStoreImpl.BLOBSTORE_DIR_PERMISSION);
        if (!_fs.mkdirs(_path.getParent(), dirperms)) {
            LOG.warn("error creating parent dir: " + _path.getParent());
        }
        out = _fs.create(_path, (short) this.getMetadata().get_replication_factor());
        _fs.setPermission(_path, dirperms);
        _fs.setReplication(_path, (short) this.getMetadata().get_replication_factor());
    }
    if (out == null) {
        throw new IOException("Error in creating: " + _path);
    }
    return out;
}

From source file:com.alibaba.jstorm.hdfs.blobstore.HdfsBlobStoreImpl.java

License:Apache License

public HdfsBlobStoreImpl(Path path, Map<String, Object> conf, Configuration hconf) throws IOException {
    LOG.info("Blob store based in {}", path);
    _fullPath = path;//  ww  w .j  a v a 2s  .  c o m
    _hadoopConf = hconf;

    String hdfsHostName = (String) conf.get(Config.BLOBSTORE_HDFS_HOSTNAME);
    Integer hdfsPort = JStormUtils.parseInt(conf.get(Config.BLOBSTORE_HDFS_PORT));
    String defaultFS = (String) conf.get(Config.BLOBSTORE_HDFS_DEFAULT_FS);
    if ((hdfsHostName == null || hdfsPort == null) && defaultFS == null) {
        throw new RuntimeException(
                "<blobstore.hdfs.hostname, blobstore.hdfs.port> and blobstore.hdfs.defaultFS "
                        + "is empty. You must specify an HDFS location! ");
    }
    if (defaultFS == null) {
        defaultFS = String.format("hdfs://%s:%d", hdfsHostName, hdfsPort);
    }
    LOG.info("HDFS blob store, using defaultFS: {}", defaultFS);
    _hadoopConf.set("fs.defaultFS", defaultFS);

    String keyPrefix = "blobstore.hdfs.";
    for (Map.Entry<String, Object> confEntry : conf.entrySet()) {
        String key = confEntry.getKey();
        Object value = confEntry.getValue();
        if (key.startsWith(keyPrefix) && value != null) {
            key = key.substring(keyPrefix.length(), key.length());
            LOG.info("adding \"{}={}\" to hadoop conf", key, value);
            _hadoopConf.set(key, value.toString());
        }
    }

    _fs = path.getFileSystem(_hadoopConf);

    if (!_fs.exists(_fullPath)) {
        FsPermission perms = new FsPermission(BLOBSTORE_DIR_PERMISSION);
        boolean success = false;
        try {
            success = _fs.mkdirs(_fullPath, perms);
        } catch (IOException e) {
            LOG.error("fs mkdir ", e);
        }
        if (!success) {
            throw new IOException("Error creating blobstore directory: " + _fullPath);
        }
    }

    Object shouldCleanup = conf.get(Config.BLOBSTORE_CLEANUP_ENABLE);
    if (JStormUtils.parseBoolean(shouldCleanup, false)) {
        LOG.debug("Starting hdfs blobstore cleaner");
        _cleanup = new TimerTask() {
            @Override
            public void run() {
                try {
                    fullCleanup(FULL_CLEANUP_FREQ);
                } catch (IOException e) {
                    LOG.error("Error trying to cleanup", e);
                }
            }
        };
        timer.scheduleAtFixedRate(_cleanup, 0, FULL_CLEANUP_FREQ);
    }
}