Example usage for org.apache.hadoop.fs.permission PermissionStatus PermissionStatus

List of usage examples for org.apache.hadoop.fs.permission PermissionStatus PermissionStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission PermissionStatus PermissionStatus.

Prototype

public PermissionStatus(String user, String group, FsPermission permission) 

Source Link

Document

Constructor

Usage

From source file:common.NameNode.java

License:Apache License

/** {@inheritDoc} */
public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag,
        boolean createParent, short replication, long blockSize) throws IOException {
    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
        stateChangeLog/*w ww .  java2s  .c o m*/
                .debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
    }
    if (!checkPathLength(src)) {
        throw new IOException("create: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, "
                + MAX_PATH_DEPTH + " levels.");
    }
    namesystem.startFile(src,
            new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
            clientName, clientMachine, flag.get(), createParent, replication, blockSize);
    myMetrics.numFilesCreated.inc();
    myMetrics.numCreateFileOps.inc();
}

From source file:common.NameNode.java

License:Apache License

/** {@inheritDoc} */
public boolean mkdirs(String src, FsPermission masked, boolean createParent) throws IOException {
    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
    if (!checkPathLength(src)) {
        throw new IOException("mkdirs: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, "
                + MAX_PATH_DEPTH + " levels.");
    }/*ww w. ja  va  2 s.c  o  m*/
    return namesystem.mkdirs(src,
            new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
            createParent);
}

From source file:common.NameNode.java

License:Apache License

/** @inheritDoc */
public void createSymlink(String target, String link, FsPermission dirPerms, boolean createParent)
        throws IOException {
    myMetrics.numcreateSymlinkOps.inc();
    /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
     * URI may refer to a non-HDFS file system. 
     *//*  ww  w.j  av  a 2  s .  c o  m*/
    if (!checkPathLength(link)) {
        throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit");

    }
    if ("".equals(target)) {
        throw new IOException("Invalid symlink target");
    }
    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    namesystem.createSymlink(target, link, new PermissionStatus(ugi.getUserName(), null, dirPerms),
            createParent);
}

From source file:io.hops.metadata.lock.TestNDBSizer.java

License:Apache License

private void insertData() throws StorageException, StorageException, IOException {
    System.out.println("Building the data...");

    final int NUM_INODES = 500000;
    final int NUM_BLOCKS = 1200000;//2000000;
    final int NUM_REPLICAS = 3600000;//6000000;
    final int BATCH_SIZE = 5000;

    String filename = "";
    for (int i = 0; i < 100; i++) {
        filename += "-";
    }//from  w  ww.ja v  a 2s .  c  o m

    final List<INode> newFiles = new LinkedList<INode>();
    for (int i = 0; i < NUM_INODES; i++) {
        INodeDirectory dir = new INodeDirectoryWithQuota("",
                new PermissionStatus("salman", "usr", new FsPermission((short) 0777)));
        dir.setIdNoPersistance(i);
        dir.setLocalNameNoPersistance(filename + Integer.toString(i));
        dir.setParentIdNoPersistance(i);
        newFiles.add(dir);
        if (newFiles.size() >= BATCH_SIZE) {
            final int j = i;
            new LightWeightRequestHandler(HDFSOperationType.TEST) {
                @Override
                public Object performTask() throws StorageException, IOException {
                    INodeDataAccess da = (INodeDataAccess) HdfsStorageFactory
                            .getDataAccess(INodeDataAccess.class);
                    da.prepare(new LinkedList<INode>(), newFiles, new LinkedList<INode>());
                    newFiles.clear();
                    showProgressBar("INodes", j, NUM_INODES);
                    return null;
                }
            }.handle();

        }
    }

    System.out.println();

    final List<BlockInfo> newBlocks = new LinkedList<BlockInfo>();
    for (int i = 0; i < NUM_BLOCKS; i++) {
        BlockInfo block = new BlockInfo();
        block.setINodeIdNoPersistance(i);
        block.setBlockIdNoPersistance(i);
        newBlocks.add(block);
        if (newBlocks.size() >= BATCH_SIZE) {
            final int j = i;
            new LightWeightRequestHandler(HDFSOperationType.TEST) {
                @Override
                public Object performTask() throws StorageException, IOException {
                    BlockInfoDataAccess bda = (BlockInfoDataAccess) HdfsStorageFactory
                            .getDataAccess(BlockInfoDataAccess.class);
                    bda.prepare(new LinkedList<BlockInfo>(), newBlocks, new LinkedList<BlockInfo>());
                    newBlocks.clear();
                    showProgressBar("Blocks", j, NUM_BLOCKS);
                    return null;
                }
            }.handle();

        }
    }

    System.out.println();

    final List<IndexedReplica> replicas = new LinkedList<IndexedReplica>();
    for (int i = 0; i < NUM_REPLICAS; i++) {
        replicas.add(new IndexedReplica(i, i, i, i));
        if (replicas.size() >= BATCH_SIZE) {
            final int j = i;
            new LightWeightRequestHandler(HDFSOperationType.TEST) {
                @Override
                public Object performTask() throws StorageException, IOException {
                    ReplicaDataAccess rda = (ReplicaDataAccess) HdfsStorageFactory
                            .getDataAccess(ReplicaDataAccess.class);
                    rda.prepare(new LinkedList<IndexedReplica>(), replicas, new LinkedList<IndexedReplica>());
                    //StorageFactory.getConnector().commit();
                    replicas.clear();
                    showProgressBar("Replicas", j, NUM_REPLICAS);
                    return null;
                }
            }.handle();

        }
    }
}