Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:sharedsidefunctions.DiamondAlignment.java

License:Apache License

public static void align(String diamond, String localDB, String key, String[] arguments, Configuration conf)
        throws IOException, InterruptedException {
    ArrayList<String> argumentsList = new ArrayList<String>(Arrays.asList(arguments));
    //        String qda[] = {"-q", "/tmp/" + key, "-d", localDB, "-a", "/tmp/" + key};
    String qda[] = { "-q", "/tmp/" + key, "-d", localDB };
    argumentsList.add(0, diamond);//from   ww w.  ja  v  a2  s  .c  om
    argumentsList.addAll(new ArrayList<String>(Arrays.asList(qda)));
    //        Process p1 = Runtime.getRuntime().exec(argumentsList.toArray(new String[argumentsList.size()]));
    //        p1.waitFor();
    String hadoopUser = UserGroupInformation.getCurrentUser().getUserName();
    Process p = Runtime.getRuntime().exec(argumentsList.toArray(new String[argumentsList.size()]));
    FileSystem fs = FileSystem.get(conf);
    //process stream copied to HDFS stream
    InputStream in = p.getInputStream();
    FSDataOutputStream out = fs.create(new Path("/user/" + hadoopUser + "/Hamond/output/" + key + ".out"));
    IOUtils.copyBytes(in, out, 4096, true);
    p.waitFor();

}

From source file:sharedsidefunctions.DiamondView.java

License:Apache License

public static void view(String diamond, String key, Configuration conf)
        throws IOException, InterruptedException {
    String hadoopUser = UserGroupInformation.getCurrentUser().getUserName();
    String view[] = { diamond, "view", "-a", "/tmp/" + key };
    Process p2 = Runtime.getRuntime().exec(view);
    FileSystem fs = FileSystem.get(conf);
    //process stream copied to HDFS stream
    InputStream in = p2.getInputStream();
    FSDataOutputStream out = fs.create(new Path("/user/" + hadoopUser + "/Hamond/output/" + key + ".out"));
    IOUtils.copyBytes(in, out, 4096, true);
    p2.waitFor();// w w  w  .j  ava 2 s .  c om
}

From source file:sharedsidefunctions.HadoopUser.java

License:Apache License

public static String getHadoopUser() throws IOException {
    String hadoopUser = UserGroupInformation.getCurrentUser().getUserName();
    return hadoopUser;
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

@SuppressWarnings("unchecked")
STJobTracker(final JobConf conf, String jobtrackerIndentifier) throws IOException, InterruptedException {
    // find the owner of the process
    // get the desired principal to load
    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
    UserGroupInformation.setConfiguration(conf);
    if (keytabFilename != null) {
        String desiredUser = conf.get(JTConfig.JT_USER_NAME, System.getProperty("user.name"));
        UserGroupInformation.loginUserFromKeytab(desiredUser, keytabFilename);
        mrOwner = UserGroupInformation.getLoginUser();
    } else {/*from w  ww.  ja va2 s  .  c  om*/
        mrOwner = UserGroupInformation.getCurrentUser();
    }

    supergroup = conf.get(MR_SUPERGROUP, "supergroup");
    LOG.info("Starting jobtracker with owner as " + mrOwner.getShortUserName() + " and supergroup as "
            + supergroup);

    long secretKeyInterval = conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();

    //
    // Grab some static constants
    //

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // whether to dump or not every heartbeat message even when DEBUG is enabled
    dumpHeartbeat = conf.getBoolean(JT_HEARTBEATS_DUMP, false);

    // This is a directory of temporary submission files. We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();

    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
    this.interTrackerServer = RPC.getServer(SkewTuneClientProtocol.class, this, addr.getHostName(),
            addr.getPort(), handlerCount, false, conf, secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get(JT_HTTP_ADDRESS, String.format("%s:0", this.localMachine)));
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = System.currentTimeMillis();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("job.tracker", this);
    infoServer.addServlet("jobcompletion", "/completion", JobCompletionServlet.class);
    infoServer.addServlet("taskspeculation", "/speculation", SpeculationEventServlet.class);
    infoServer.addServlet("skewreport", "/skew", SkewReportServlet.class);
    infoServer.addServlet("tasksplit", "/split/*", SplitTaskServlet.class);
    infoServer.addServlet("tasksplitV2", "/splitV2/*", SplitTaskV2Servlet.class);
    infoServer.start();

    this.trackerIdentifier = jobtrackerIndentifier;

    // The rpc/web-server ports can be ephemeral ports...
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set(JT_HTTP_ADDRESS, infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());
    this.defaultNotificationUrl = String.format("http://%s:%d/completion?jobid=$jobId&status=$jobStatus",
            infoBindAddress, this.infoPort);
    LOG.info("JobTracker completion URI: " + defaultNotificationUrl);
    //        this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?taskid=$taskId&remainTime=$taskRemainTime",infoBindAddress,this.infoPort);
    this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?jobid=$jobId", infoBindAddress,
            this.infoPort);
    LOG.info("JobTracker speculation event URI: " + defaultSpeculationEventUrl);
    this.defaultSkewReportUrl = String.format("http://%s:%d/skew", infoBindAddress, this.infoPort);
    LOG.info("JobTracker skew report event URI: " + defaultSkewReportUrl);
    this.trackerHttp = String.format("http://%s:%d", infoBindAddress, this.infoPort);

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }

            // clean up the system dir, which will only work if hdfs is out
            // of safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(mrOwner.getShortUserName())) {
                    throw new AccessControlException(
                            "The systemdir " + systemDir + " is not owned by " + mrOwner.getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                } else {
                    break;
                }
            } catch (FileNotFoundException fnf) {
            } // ignore
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") because of permissions.");
            LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ");
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // initialize cluster variable
    cluster = new Cluster(this.conf);

    // now create a job client proxy
    jtClient = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID,
            JobTracker.getAddress(conf), mrOwner, this.conf,
            NetUtils.getSocketFactory(conf, ClientProtocol.class));

    new SpeculativeScheduler().start();

    // initialize task event fetcher
    new TaskCompletionEventFetcher().start();

    // Same with 'localDir' except it's always on the local disk.
    asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(conf), conf.getLocalDirs());
    asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR);

    // keep at least one asynchronous worker per CPU core
    int numProcs = Runtime.getRuntime().availableProcessors();
    LOG.info("# of available processors = " + numProcs);
    int maxFactor = conf.getInt(JT_MAX_ASYNC_WORKER_FACTOR, 2);
    asyncWorkers = new ThreadPoolExecutor(numProcs, numProcs * maxFactor, 30, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(true), new ThreadPoolExecutor.CallerRunsPolicy());

    speculativeSplit = conf.getBoolean(JT_SPECULATIVE_SPLIT, false);
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

/**
 * @throws LoginException/*from  ww  w  .java  2s. co  m*/
 * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getStagingAreaDir()
 */
public String getStagingAreaDir() throws IOException {
    try {
        final String user = UserGroupInformation.getCurrentUser().getShortUserName();
        return mrOwner.doAs(new PrivilegedExceptionAction<String>() {
            @Override
            public String run() throws Exception {
                Path stagingRootDir = new Path(
                        conf.get(JTConfig.JT_STAGING_AREA_ROOT, "/tmp/hadoop/mapred/staging"));
                FileSystem fs = stagingRootDir.getFileSystem(conf);
                return fs.makeQualified(new Path(stagingRootDir, user + "/.staging")).toString();
            }
        });
    } catch (InterruptedException ie) {
        throw new IOException(ie);
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

@Override
public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
    LOG.info("Refreshing all user-to-groups mappings. Requested by user: "
            + UserGroupInformation.getCurrentUser().getShortUserName());

    Groups.getUserToGroupsMappingService(conf).refresh();
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

/**
 * Discard a current delegation token./*from   w  ww .j  a  va2 s.  c  om*/
 */
@Override
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
        throws IOException, InterruptedException {
    String user = UserGroupInformation.getCurrentUser().getUserName();
    secretManager.cancelToken(token, user);
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

/**
 * Get a new delegation token./*  ww  w . j  a v a 2  s  .c om*/
 */
@Override
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
        throws IOException, InterruptedException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    Text owner = new Text(ugi.getUserName());
    Text realUser = null;
    if (ugi.getRealUser() != null) {
        realUser = new Text(ugi.getRealUser().getUserName());
    }
    DelegationTokenIdentifier ident = new DelegationTokenIdentifier(owner, renewer, realUser);
    return new Token<DelegationTokenIdentifier>(ident, secretManager);
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

/**
 * Renew a delegation token to extend its lifetime.
 *///ww w .ja v  a  2 s .  com
@Override
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
        throws IOException, InterruptedException {
    String user = UserGroupInformation.getCurrentUser().getUserName();
    return secretManager.renewToken(token, user);
}

From source file:stroom.pipeline.server.writer.HDFSFileAppender.java

License:Apache License

public static UserGroupInformation buildRemoteUser(final Optional<String> runAsUser) {
    final String user = runAsUser.orElseGet(() -> {
        try {//w ww. j  av  a 2s .  com
            return UserGroupInformation.getCurrentUser().getUserName();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    });

    // userGroupInformation =
    // UserGroupInformation.createProxyUser(runAsUser,
    // UserGroupInformation.getLoginUser());
    return UserGroupInformation.createRemoteUser(user);

}