Example usage for org.apache.hadoop.fs FileStatus getOwner

List of usage examples for org.apache.hadoop.fs FileStatus getOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getOwner.

Prototype

public String getOwner() 

Source Link

Document

Get the owner of the file.

Usage

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

/**
 * Mgt service return file and folder list of the give HDFS path
 * //w  w  w  . j  av a2 s . co  m
 * @param fsObjectPath
 *            file system path which user need info about files and folders
 * @return list with files and folders in the given path
 * @throws HDFSServerManagementException
 */
public FolderInformation[] getCurrentUserFSObjects(String fsObjectPath) throws HDFSServerManagementException {

    boolean isCurrentUserSuperTenant = false;
    //Checks if the current user has a role assigned. Else throws an error.
    try {
        checkCurrentTenantUserHasRole();
        isCurrentUserSuperTenant = hdfsAdminHelperInstance.isCurrentUserSuperTenant();

    } catch (HDFSServerManagementException e) {
        throw e;
    } catch (UserStoreException e) {
        handleException(" User store exception", e);
    }
    FileSystem hdfsFS = null;

    //The folder path is filtered to be getting only the items from /user/ directory.
    if (fsObjectPath == null
            || (!isCurrentUserSuperTenant && fsObjectPath.equals(HDFSConstants.HDFS_ROOT_FOLDER))) {
        fsObjectPath = HDFSConstants.HDFS_USER_ROOT;
    }

    try {
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e1) {
        String msg = "Error occurred while trying to get File system instance";
        handleException(msg, e1);
    }
    FileStatus[] fileStatusList = null;
    List<FolderInformation> folderInfo = new ArrayList<FolderInformation>();
    try {
        if (hdfsFS != null && hdfsFS.exists(new Path(fsObjectPath))) {
            if (hdfsAdminHelperInstance.isCurrentUserSuperTenant()) {
                fileStatusList = hdfsFS.listStatus(new Path(fsObjectPath));
            } else {
                fileStatusList = hdfsFS.listStatus(new Path(fsObjectPath), new PathFilter() {

                    //the filter to be sent when retrieving the file paths.
                    @Override
                    public boolean accept(Path path) {
                        String filter = null;
                        CarbonContext carbonContext = CarbonContext.getThreadLocalCarbonContext();
                        if (hdfsAdminHelperInstance.isCurrentUserTenantAdmin()) {
                            filter = carbonContext.getTenantDomain();
                        } else {
                            filter = carbonContext.getTenantDomain() + HDFSConstants.UNDERSCORE
                                    + carbonContext.getUsername();
                        }
                        return path.toString().contains(filter);
                    }
                });
            }
            //List the statuses of the files/directories in the given path if the path is a directory.
            if (fileStatusList != null) {
                for (FileStatus fileStatus : fileStatusList) {
                    FolderInformation folder = new FolderInformation();
                    folder.setFolder(fileStatus.isDir());
                    folder.setName(fileStatus.getPath().getName());
                    folder.setFolderPath(fileStatus.getPath().toUri().getPath());
                    folder.setOwner(fileStatus.getOwner());
                    folder.setGroup(fileStatus.getGroup());
                    folder.setPermissions(fileStatus.getPermission().toString());
                    folderInfo.add(folder);
                }
                return folderInfo.toArray(new FolderInformation[folderInfo.size()]);
            }
        }
    } catch (Exception e) {
        String msg = "Error occurred while retrieving folder information";
        handleException(msg, e);
    }
    return null;

}

From source file:ras.test.hadoop.fs.InMemoryFileSystemUnitTest.java

License:Apache License

@Test
public void testSetOwnerAndGroup() throws IOException {
    String owner = "me";
    String group = "mygroup";
    Path dir = new Path("/mydir");
    inMemoryFileSystem.mkdirs(dir);//from   w ww .  jav a2 s  .  c  o  m
    inMemoryFileSystem.setOwner(dir, owner, group);
    FileStatus fs = inMemoryFileSystem.getFileStatus(dir);
    assertThat("Wrong owner for directory", fs.getOwner(), is(equalTo(owner)));
    assertThat("Wrong group for directory", fs.getGroup(), is(equalTo(group)));

    // Test relative file path...
    inMemoryFileSystem.setWorkingDirectory(dir);
    Path file = new Path("myfile.txt");
    inMemoryFileSystem.create(file);
    inMemoryFileSystem.setOwner(file, owner, group);
    fs = inMemoryFileSystem.getFileStatus(file);
    assertThat("Wrong owner for file", fs.getOwner(), is(equalTo(owner)));
    assertThat("Wrong group for file", fs.getGroup(), is(equalTo(group)));
}

From source file:ras.test.hadoop.fs.InMemoryFileSystemUnitTest.java

License:Apache License

@Test
public void testSetUserNoGroups() throws IOException {
    String user = "george";
    inMemoryFileSystem.setUser(user);/*from w  w  w  .  ja v  a 2s .  c  o  m*/

    assertThat("Wrong user", inMemoryFileSystem.getUser(), is(equalTo(user)));
    assertThat("Wrong userGroups", inMemoryFileSystem.getUserGroups(),
            is(equalTo(InMemoryFileSystem.DEFAULT_USERS_GROUPS)));

    Path dir = new Path("/mydir");
    inMemoryFileSystem.mkdirs(dir);
    FileStatus fs = inMemoryFileSystem.getFileStatus(dir);
    assertThat("Wrong owner for directory", fs.getOwner(), is(equalTo(user)));
    Path file = new Path("myfile.txt");
    inMemoryFileSystem.create(file);
    fs = inMemoryFileSystem.getFileStatus(file);
    assertThat("Wrong owner for file", fs.getOwner(), is(equalTo(user)));
    assertThat("Wrong group for directory", fs.getGroup(), is(equalTo(InMemoryFileSystem.DEFAULT_GROUP)));
}

From source file:ras.test.hadoop.fs.InMemoryFileSystemUnitTest.java

License:Apache License

@Test
public void testSetUserAndGroups() throws IOException {
    String user = "bob";
    String group = "myteam";
    inMemoryFileSystem.setUser(user, group);

    assertThat("Wrong user", inMemoryFileSystem.getUser(), is(equalTo(user)));
    assertThat("Wrong userGroups", inMemoryFileSystem.getUserGroups(),
            is(equalTo((Set<String>) new HashSet<String>(Arrays.asList(group)))));
    FsPermission permission = new FsPermission((short) 0700);
    Path dir = new Path("/mydir");
    inMemoryFileSystem.mkdirs(dir, permission);
    inMemoryFileSystem.setOwner(dir, user, group);
    FileStatus fs = inMemoryFileSystem.getFileStatus(dir);
    assertThat("Wrong owner for directory", fs.getOwner(), is(equalTo(user)));
    assertThat("Wrong group for directory", fs.getGroup(), is(equalTo(group)));

    Path file = new Path("myfile.txt");
    inMemoryFileSystem.create(file);//w  w  w  .  j  ava 2s .c  o  m
    inMemoryFileSystem.setOwner(file, user, group);
    fs = inMemoryFileSystem.getFileStatus(file);
    assertThat("Wrong owner for file", fs.getOwner(), is(equalTo(user)));
    assertThat("Wrong group for directory", fs.getGroup(), is(equalTo(group)));
}

From source file:ras.test.hadoop.fs.InMemoryFileSystemUnitTest.java

License:Apache License

@Test
public void testMkdirsNotOwnerInGroupWithWrite() throws IOException {
    Path parentDir = new Path("/parentDir");
    inMemoryFileSystem.mkdirs(parentDir, new FsPermission((short) 0660));
    inMemoryFileSystem.setUser("bill"); // default user groups
    inMemoryFileSystem.setWorkingDirectory(parentDir);

    Path subDir = new Path("subDir");
    inMemoryFileSystem.mkdirs(subDir);/*  w w  w .  j  a  v  a2s  .  c  om*/
    FileStatus fStatus = inMemoryFileSystem.getFileStatus(subDir);
    assertThat("Wrong owner", fStatus.getOwner(), is(equalTo("bill")));
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

@SuppressWarnings("unchecked")
STJobTracker(final JobConf conf, String jobtrackerIndentifier) throws IOException, InterruptedException {
    // find the owner of the process
    // get the desired principal to load
    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
    UserGroupInformation.setConfiguration(conf);
    if (keytabFilename != null) {
        String desiredUser = conf.get(JTConfig.JT_USER_NAME, System.getProperty("user.name"));
        UserGroupInformation.loginUserFromKeytab(desiredUser, keytabFilename);
        mrOwner = UserGroupInformation.getLoginUser();
    } else {//  w  w w . j  a v  a 2  s.c  o m
        mrOwner = UserGroupInformation.getCurrentUser();
    }

    supergroup = conf.get(MR_SUPERGROUP, "supergroup");
    LOG.info("Starting jobtracker with owner as " + mrOwner.getShortUserName() + " and supergroup as "
            + supergroup);

    long secretKeyInterval = conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();

    //
    // Grab some static constants
    //

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // whether to dump or not every heartbeat message even when DEBUG is enabled
    dumpHeartbeat = conf.getBoolean(JT_HEARTBEATS_DUMP, false);

    // This is a directory of temporary submission files. We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();

    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
    this.interTrackerServer = RPC.getServer(SkewTuneClientProtocol.class, this, addr.getHostName(),
            addr.getPort(), handlerCount, false, conf, secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get(JT_HTTP_ADDRESS, String.format("%s:0", this.localMachine)));
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = System.currentTimeMillis();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("job.tracker", this);
    infoServer.addServlet("jobcompletion", "/completion", JobCompletionServlet.class);
    infoServer.addServlet("taskspeculation", "/speculation", SpeculationEventServlet.class);
    infoServer.addServlet("skewreport", "/skew", SkewReportServlet.class);
    infoServer.addServlet("tasksplit", "/split/*", SplitTaskServlet.class);
    infoServer.addServlet("tasksplitV2", "/splitV2/*", SplitTaskV2Servlet.class);
    infoServer.start();

    this.trackerIdentifier = jobtrackerIndentifier;

    // The rpc/web-server ports can be ephemeral ports...
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set(JT_HTTP_ADDRESS, infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());
    this.defaultNotificationUrl = String.format("http://%s:%d/completion?jobid=$jobId&status=$jobStatus",
            infoBindAddress, this.infoPort);
    LOG.info("JobTracker completion URI: " + defaultNotificationUrl);
    //        this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?taskid=$taskId&remainTime=$taskRemainTime",infoBindAddress,this.infoPort);
    this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?jobid=$jobId", infoBindAddress,
            this.infoPort);
    LOG.info("JobTracker speculation event URI: " + defaultSpeculationEventUrl);
    this.defaultSkewReportUrl = String.format("http://%s:%d/skew", infoBindAddress, this.infoPort);
    LOG.info("JobTracker skew report event URI: " + defaultSkewReportUrl);
    this.trackerHttp = String.format("http://%s:%d", infoBindAddress, this.infoPort);

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }

            // clean up the system dir, which will only work if hdfs is out
            // of safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(mrOwner.getShortUserName())) {
                    throw new AccessControlException(
                            "The systemdir " + systemDir + " is not owned by " + mrOwner.getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                } else {
                    break;
                }
            } catch (FileNotFoundException fnf) {
            } // ignore
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") because of permissions.");
            LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ");
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // initialize cluster variable
    cluster = new Cluster(this.conf);

    // now create a job client proxy
    jtClient = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID,
            JobTracker.getAddress(conf), mrOwner, this.conf,
            NetUtils.getSocketFactory(conf, ClientProtocol.class));

    new SpeculativeScheduler().start();

    // initialize task event fetcher
    new TaskCompletionEventFetcher().start();

    // Same with 'localDir' except it's always on the local disk.
    asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(conf), conf.getLocalDirs());
    asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR);

    // keep at least one asynchronous worker per CPU core
    int numProcs = Runtime.getRuntime().availableProcessors();
    LOG.info("# of available processors = " + numProcs);
    int maxFactor = conf.getInt(JT_MAX_ASYNC_WORKER_FACTOR, 2);
    asyncWorkers = new ThreadPoolExecutor(numProcs, numProcs * maxFactor, 30, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(true), new ThreadPoolExecutor.CallerRunsPolicy());

    speculativeSplit = conf.getBoolean(JT_SPECULATIVE_SPLIT, false);
}

From source file:tachyon.hadoop.HadoopUtils.java

License:Apache License

/**
 * Returns a string representation of a Hadoop {@link FileStatus}.
 *
 * @param fs Hadoop {@link FileStatus}//from  ww  w. j  a v a  2 s .  c  om
 * @return its string representation
 */
public static String toStringHadoopFileStatus(FileStatus fs) {
    StringBuilder sb = new StringBuilder();
    sb.append("HadoopFileStatus: Path: ").append(fs.getPath());
    sb.append(" , Length: ").append(fs.getLen());
    sb.append(" , IsDir: ").append(fs.isDir());
    sb.append(" , BlockReplication: ").append(fs.getReplication());
    sb.append(" , BlockSize: ").append(fs.getBlockSize());
    sb.append(" , ModificationTime: ").append(fs.getModificationTime());
    sb.append(" , AccessTime: ").append(fs.getAccessTime());
    sb.append(" , Permission: ").append(fs.getPermission());
    sb.append(" , Owner: ").append(fs.getOwner());
    sb.append(" , Group: ").append(fs.getGroup());
    return sb.toString();
}

From source file:tachyon.hadoop.TFSAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link TFS#setOwner(Path, String, String)}. It will test only changing the owner of
 * file using TFS./*from w w  w .j  a  v a2s  . co m*/
 */
@Test
public void changeOwnerTest() throws Exception {
    Path fileA = new Path("/chownfileA");
    final String newOwner = "test-user1";
    final String newGroup = "test-group1";

    create(sTFS, fileA);

    FileStatus fs = sTFS.getFileStatus(fileA);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertNotEquals(defaultOwner, newOwner);
    Assert.assertNotEquals(defaultGroup, newGroup);

    sTFS.setOwner(fileA, newOwner, null);

    fs = sTFS.getFileStatus(fileA);
    Assert.assertEquals(newOwner, fs.getOwner());
    Assert.assertEquals(defaultGroup, fs.getGroup());
}

From source file:tachyon.hadoop.TFSAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link TFS#setOwner(Path, String, String)}. It will test only changing the group of
 * file using TFS.//from   ww  w  . j  a va 2 s  . c  o  m
 */
@Test
public void changeGroupTest() throws Exception {
    Path fileB = new Path("/chownfileB");
    final String newOwner = "test-user1";
    final String newGroup = "test-group1";

    create(sTFS, fileB);

    FileStatus fs = sTFS.getFileStatus(fileB);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertNotEquals(defaultOwner, newOwner);
    Assert.assertNotEquals(defaultGroup, newGroup);

    sTFS.setOwner(fileB, null, newGroup);

    fs = sTFS.getFileStatus(fileB);
    Assert.assertEquals(defaultOwner, fs.getOwner());
    Assert.assertEquals(newGroup, fs.getGroup());
}

From source file:tachyon.hadoop.TFSAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link TFS#setOwner(Path, String, String)}. It will test changing both owner and group
 * of file using TFS.// w w  w. j  a  v a  2s.c  o m
 */
@Test
public void changeOwnerAndGroupTest() throws Exception {
    Path fileC = new Path("/chownfileC");
    final String newOwner = "test-user1";
    final String newGroup = "test-group1";

    create(sTFS, fileC);

    FileStatus fs = sTFS.getFileStatus(fileC);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertNotEquals(defaultOwner, newOwner);
    Assert.assertNotEquals(defaultGroup, newGroup);

    sTFS.setOwner(fileC, newOwner, newGroup);

    fs = sTFS.getFileStatus(fileC);
    Assert.assertEquals(newOwner, fs.getOwner());
    Assert.assertEquals(newGroup, fs.getGroup());
}