List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser
@InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getCurrentUser() throws IOException
From source file:org.apache.hoya.yarn.client.HoyaClient.java
License:Apache License
public String getUsername() throws IOException { return UserGroupInformation.getCurrentUser().getShortUserName(); }
From source file:org.apache.hoya.yarn.client.HoyaClient.java
License:Apache License
/** * Implement the list action: list all nodes * @return exit code of 0 if a list was created */// ww w . j a v a 2s .c om @VisibleForTesting public int actionList(String clustername) throws IOException, YarnException { verifyManagerSet(); String user = UserGroupInformation.getCurrentUser().getUserName(); List<ApplicationReport> instances = listHoyaInstances(user); if (clustername == null || clustername.isEmpty()) { log.info("Hoya instances for {}: {}", (user != null ? user : "all users"), instances.size()); for (ApplicationReport report : instances) { logAppReport(report); } return EXIT_SUCCESS; } else { HoyaUtils.validateClusterName(clustername); log.debug("Listing cluster named {}", clustername); ApplicationReport report = findClusterInInstanceList(instances, clustername); if (report != null) { logAppReport(report); return EXIT_SUCCESS; } else { throw unknownClusterException(clustername); } } }
From source file:org.apache.hoya.yarn.client.HoyaYarnClientImpl.java
License:Apache License
private String getUsername() throws IOException { return UserGroupInformation.getCurrentUser().getShortUserName(); }
From source file:org.apache.ignite.client.hadoop.GridHadoopClientProtocol.java
License:Apache License
/** {@inheritDoc} */ @Override/*w ww. j a va 2 s . c o m*/ public String getStagingAreaDir() throws IOException, InterruptedException { String usr = UserGroupInformation.getCurrentUser().getShortUserName(); return GridHadoopUtils.stagingAreaDir(conf, usr).toString(); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.proto.HadoopClientProtocol.java
License:Apache License
/** {@inheritDoc} */ @Override/*from ww w . j a v a 2 s . c o m*/ public String getStagingAreaDir() throws IOException, InterruptedException { String usr = UserGroupInformation.getCurrentUser().getShortUserName(); return HadoopUtils.stagingAreaDir(conf, usr).toString(); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w w w.j a va 2 s. c o m*/ public <T> T runAsJobOwner(final Callable<T> c) throws IgniteCheckedException { String user = job.info().user(); user = IgfsUtils.fixUserName(user); assert user != null; String ugiUser; try { UserGroupInformation currUser = UserGroupInformation.getCurrentUser(); assert currUser != null; ugiUser = currUser.getShortUserName(); } catch (IOException ioe) { throw new IgniteCheckedException(ioe); } try { if (F.eq(user, ugiUser)) // if current UGI context user is the same, do direct call: return c.call(); else { UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, user); return ugi.doAs(new PrivilegedExceptionAction<T>() { @Override public T run() throws Exception { return c.call(); } }); } } catch (Exception e) { throw new IgniteCheckedException(e); } }
From source file:org.apache.ignite.yarn.ApplicationMaster.java
License:Apache License
/** * @throws IOException//from www. java 2s .co m */ public void init() throws IOException { if (UserGroupInformation.isSecurityEnabled()) { Credentials cred = UserGroupInformation.getCurrentUser().getCredentials(); allTokens = IgniteYarnUtils.createTokenBuffer(cred); } fs = FileSystem.get(conf); nmClient = NMClient.createNMClient(); nmClient.init(conf); nmClient.start(); // Create async application master. rmClient = AMRMClientAsync.createAMRMClientAsync(300, this); rmClient.init(conf); rmClient.start(); if (props.igniteCfg() == null || props.igniteCfg().isEmpty()) { InputStream input = Thread.currentThread().getContextClassLoader() .getResourceAsStream(IgniteYarnUtils.DEFAULT_IGNITE_CONFIG); cfgPath = new Path(props.igniteWorkDir() + File.separator + IgniteYarnUtils.DEFAULT_IGNITE_CONFIG); // Create file. Override by default. FSDataOutputStream outputStream = fs.create(cfgPath, true); IOUtils.copy(input, outputStream); IOUtils.closeQuietly(input); IOUtils.closeQuietly(outputStream); } else cfgPath = new Path(props.igniteCfg()); }
From source file:org.apache.impala.util.FsPermissionChecker.java
License:Apache License
private FsPermissionChecker() throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); groups_.addAll(Arrays.asList(ugi.getGroupNames())); supergroup_ = CONF.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); user_ = ugi.getShortUserName();//from www. ja va 2 s . c o m }
From source file:org.apache.kylin.storage.hbase.util.PingHBaseCLI.java
License:Apache License
public static void main(String[] args) throws IOException { String hbaseTable = args[0];/*from ww w . jav a2 s.co m*/ System.out.println("Hello friend."); Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration(); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; Connection conn = null; Table table = null; ResultScanner scanner = null; try { conn = ConnectionFactory.createConnection(hconf); table = conn.getTable(TableName.valueOf(hbaseTable)); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { IOUtils.closeQuietly(scanner); IOUtils.closeQuietly(table); IOUtils.closeQuietly(conn); } }
From source file:org.apache.metron.maas.service.ApplicationMaster.java
License:Apache License
/** * Main run function for the application master * * @throws YarnException//from w w w. j av a2 s .co m * @throws IOException */ @SuppressWarnings({ "unchecked" }) public void run() throws YarnException, IOException, InterruptedException { LOG.info("Starting ApplicationMaster"); // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class // are marked as LimitedPrivate Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); allTokens = YarnUtils.INSTANCE.tokensFromCredentials(credentials); // Create appSubmitterUgi and add original tokens to it appSubmitterUgi = YarnUtils.INSTANCE.createUserGroup(credentials); startTimelineClient(conf); if (timelineClient != null) { YarnUtils.INSTANCE.publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), ContainerEvents.APP_ATTEMPT_START, domainId, appSubmitterUgi); } int minSize = getMinContainerMemoryIncrement(conf); listener = new ContainerRequestListener(timelineClient, appSubmitterUgi, domainId, minSize); amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, listener); amRMClient.init(conf); amRMClient.start(); nmClientAsync = new NMClientAsyncImpl(listener); nmClientAsync.init(conf); nmClientAsync.start(); // Setup local RPC Server to accept status requests directly from clients // TODO need to setup a protocol for client to be able to communicate to // the RPC server // TODO use the rpc port info to register with the RM for the client to // send requests to this app master // Register self with ResourceManager // This will start heartbeating to the RM appMasterHostname = NetUtils.getHostname(); RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores); maasHandler = new MaaSHandler(zkQuorum, zkRoot); try { maasHandler.start(); maasHandler.getDiscoverer().resetState(); listener.initialize(amRMClient, nmClientAsync, maasHandler.getDiscoverer()); } catch (Exception e) { throw new IllegalStateException("Unable to find zookeeper", e); } EnumMap<Resources, Integer> maxResources = Resources.toResourceMap(Resources.MEMORY.of(maxMem), Resources.V_CORE.of(maxVCores)); requestQueue = maasHandler.getConfig() .createQueue(ImmutableMap.of(ZKQueue.ZK_CLIENT, maasHandler.getClient())); LOG.info("Ready to accept requests..."); while (true) { ModelRequest request = requestQueue.dequeue(); if (request == null) { LOG.error("Received a null request..."); continue; } LOG.info("[" + request.getAction() + "]: Received request for model " + request.getName() + ":" + request.getVersion() + "x" + request.getNumInstances() + " containers of size " + request.getMemory() + "M at path " + request.getPath()); EnumMap<Resources, Integer> resourceRequest = Resources .toResourceMap(Resources.MEMORY.of(request.getMemory()), Resources.V_CORE.of(1)); EnumMap<Resources, Integer> resources = Resources.getRealisticResourceRequest(maxResources, Resources.toResource(resourceRequest)); Resource resource = Resources.toResource(resources); Path appMasterJar = getAppMasterJar(); if (request.getAction() == Action.ADD) { listener.requestContainers(request.getNumInstances(), resource); for (int i = 0; i < request.getNumInstances(); ++i) { Container container = listener.getContainers(resource).take(); LOG.info("Found container id of " + container.getId().getContainerId()); executor.execute(new LaunchContainer(conf, zkQuorum, zkRoot, nmClientAsync, request, container, allTokens, appMasterJar)); listener.getContainerState().registerRequest(container, request); } } else if (request.getAction() == Action.REMOVE) { listener.removeContainers(request.getNumInstances(), request); } } }