List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled
public static boolean isSecurityEnabled()
From source file:co.cask.hydrator.common.batch.JobUtils.java
License:Apache License
/** * Creates a new instance of {@link Job}. Note that the job created is not meant for actual MR * submission. It's just for setting up configurations. *//* ww w .j a v a 2 s .c o m*/ public static Job createInstance() throws IOException { Job job = Job.getInstance(); Configuration conf = job.getConfiguration(); conf.clear(); if (UserGroupInformation.isSecurityEnabled()) { // If runs in secure cluster, this program runner is running in a yarn container, hence not able // to get authenticated with the history. conf.unset("mapreduce.jobhistory.address"); conf.setBoolean(Job.JOB_AM_ACCESS_DISABLED, false); Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); job.getCredentials().addAll(credentials); } return job; }
From source file:co.cask.hydrator.plugin.batch.source.HiveBatchSource.java
License:Apache License
@Override public void prepareRun(BatchSourceContext context) throws Exception { // This line is to load VersionInfo class here to make it available in the HCatInputFormat.setInput call. This is // needed to support CDAP 3.2 where we were just exposing the classes of the plugin jar and not the resources. LOG.trace("Hadoop version: {}", VersionInfo.getVersion()); Job job = JobUtils.createInstance(); Configuration conf = job.getConfiguration(); conf.set(HiveConf.ConfVars.METASTOREURIS.varname, config.metaStoreURI); if (UserGroupInformation.isSecurityEnabled()) { conf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true"); conf.set("hive.metastore.token.signature", HiveAuthFactory.HS2_CLIENT_TOKEN); }/*from ww w .j a va 2 s . c om*/ // Use the current thread's classloader to ensure that when setInput is called it can access VersionInfo class // loaded above. This is needed to support CDAP 3.2 where we were just exposing classes to plugin jars and not // resources. ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); HCatInputFormat.setInput(conf, config.dbName, config.tableName, config.partitions); } finally { Thread.currentThread().setContextClassLoader(classLoader); } HCatSchema hCatSchema = HCatInputFormat.getTableSchema(conf); if (config.schema != null) { // if the user provided a schema then we should use that schema to read the table. This will allow user to // drop non-primitive types and read the table. hCatSchema = HiveSchemaConverter.toHiveSchema(Schema.parseJson(config.schema), hCatSchema); HCatInputFormat.setOutputSchema(job, hCatSchema); } HiveSchemaStore.storeHiveSchema(context, config.dbName, config.tableName, hCatSchema); context.setInput( Input.of(config.referenceName, new SourceInputFormatProvider(HCatInputFormat.class, conf))); }
From source file:co.cask.tephra.persist.HDFSTransactionStateStorage.java
License:Apache License
@Override protected void startUp() throws Exception { Preconditions.checkState(configuredSnapshotDir != null, "Snapshot directory is not configured. Please set " + TxConstants.Manager.CFG_TX_SNAPSHOT_DIR + " in configuration."); String hdfsUser = hConf.get(TxConstants.Manager.CFG_TX_HDFS_USER); if (hdfsUser == null || UserGroupInformation.isSecurityEnabled()) { if (hdfsUser != null && LOG.isDebugEnabled()) { LOG.debug("Ignoring configuration {}={}, running on secure Hadoop", TxConstants.Manager.CFG_TX_HDFS_USER, hdfsUser); }//from w w w .j a v a 2 s . c o m // NOTE: we can start multiple times this storage. As hdfs uses per-jvm cache, we want to create new fs instead // of getting closed one fs = FileSystem.newInstance(FileSystem.getDefaultUri(hConf), hConf); } else { fs = FileSystem.newInstance(FileSystem.getDefaultUri(hConf), hConf, hdfsUser); } snapshotDir = new Path(configuredSnapshotDir); LOG.info("Using snapshot dir " + snapshotDir); }
From source file:com.alibaba.jstorm.hdfs.common.security.AutoHDFS.java
License:Apache License
@SuppressWarnings("unchecked") protected byte[] getHadoopCredentials(Map conf) { try {//w w w . j av a2 s . co m if (UserGroupInformation.isSecurityEnabled()) { final Configuration configuration = new Configuration(); login(configuration); final String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL); final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI) ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString()) : FileSystem.getDefaultUri(configuration); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser, ugi); Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() { @Override public Object run() { try { FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration); Credentials credential = proxyUser.getCredentials(); fileSystem.addDelegationTokens(hdfsPrincipal, credential); LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser); return credential; } catch (IOException e) { throw new RuntimeException(e); } } }); ByteArrayOutputStream bao = new ByteArrayOutputStream(); ObjectOutputStream out = new ObjectOutputStream(bao); creds.write(out); out.flush(); out.close(); return bao.toByteArray(); } else { throw new RuntimeException("Security is not enabled for HDFS"); } } catch (Exception ex) { throw new RuntimeException("Failed to get delegation tokens.", ex); } }
From source file:com.alibaba.jstorm.hdfs.common.security.HdfsSecurityUtil.java
License:Apache License
public static void login(Map conf, Configuration hdfsConfig) throws IOException { //If AutoHDFS is specified, do not attempt to login using keytabs, only kept for backward compatibility. if (conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null || (!(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHDFS.class.getName())) && !(((List) conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoTGT.class.getName())))) { if (UserGroupInformation.isSecurityEnabled()) { // compareAndSet added because of https://issues.apache.org/jira/browse/STORM-1535 if (isLoggedIn.compareAndSet(false, true)) { LOG.info("Logging in using keytab as AutoHDFS is not specified for " + TOPOLOGY_AUTO_CREDENTIALS); String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY); if (keytab != null) { hdfsConfig.set(STORM_KEYTAB_FILE_KEY, keytab); }/*w w w. ja v a2 s . c o m*/ String userName = (String) conf.get(STORM_USER_NAME_KEY); if (userName != null) { hdfsConfig.set(STORM_USER_NAME_KEY, userName); } SecurityUtil.login(hdfsConfig, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); } } } }
From source file:com.asakusafw.m3bp.workaround.hadoop.WakeUpUserGroupInformation.java
License:Apache License
@Override public InterruptibleIo install(ProcessorContext context, Editor editor) throws IOException, InterruptedException { LOG.info("workaround: eager wake up UserGroupInformation"); UserGroupInformation.isSecurityEnabled(); return null;/*w ww . j a va2 s . co m*/ }
From source file:com.bigjob.Client.java
License:Apache License
/** * Main run function for the client/* w ww. j av a 2 s . com*/ * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM (RM)" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path // if (dfsUrl!=null && dfsUrl.equals("")==false){ // conf.set("fs.defaultFS", dfsUrl); // } FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.getId() + "/" + (Shell.WINDOWS ? windowBatPath : linuxShellPath); Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.getId(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.getId(), localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); capability.setVirtualCores(amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application //return monitorApplication(appId); System.out.println("ApplicationId:" + appId); return true; }
From source file:com.blackberry.bdp.kaboom.Authenticator.java
License:Apache License
private boolean authenticate(String proxyUserName) { UserGroupInformation proxyTicket;//from www .j a v a 2 s.com // logic for kerberos login boolean useSecurity = UserGroupInformation.isSecurityEnabled(); LOG.info("Hadoop Security enabled: " + useSecurity); if (useSecurity) { // sanity checking if (kerbConfPrincipal.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a principal to use for Kerberos auth."); return false; } if (kerbKeytab.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a keytab to use for Kerberos auth."); return false; } String principal; try { // resolves _HOST pattern using standard Hadoop search/replace // via DNS lookup when 2nd argument is empty principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, ""); } catch (IOException e) { LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal + "). Exception follows.", e); return false; } Preconditions.checkNotNull(principal, "Principal must not be null"); KerberosUser prevUser = staticLogin.get(); KerberosUser newUser = new KerberosUser(principal, kerbKeytab); // be cruel and unusual when user tries to login as multiple principals // this isn't really valid with a reconfigure but this should be rare // enough to warrant a restart of the agent JVM // TODO: find a way to interrogate the entire current config state, // since we don't have to be unnecessarily protective if they switch all // HDFS sinks to use a different principal all at once. Preconditions.checkState(prevUser == null || prevUser.equals(newUser), "Cannot use multiple kerberos principals in the same agent. " + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s", prevUser, newUser); // attempt to use cached credential if the user is the same // this is polite and should avoid flooding the KDC with auth requests UserGroupInformation curUser = null; if (prevUser != null && prevUser.equals(newUser)) { try { LOG.info("Attempting login as {} with cached credentials", prevUser.getPrincipal()); curUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e); } } if (curUser == null || !curUser.getUserName().equals(principal)) { try { // static login curUser = kerberosLogin(this, principal, kerbKeytab); LOG.info("Current user obtained from Kerberos login {}", curUser.getUserName()); } catch (IOException e) { LOG.error("Authentication or file read error while attempting to " + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab + "). Exception follows.", e); return false; } } else { LOG.debug("{}: Using existing principal login: {}", this, curUser); } try { if (UserGroupInformation.getLoginUser().isFromKeytab() == false) { LOG.warn("Using a keytab for authentication is {}", UserGroupInformation.getLoginUser().isFromKeytab()); LOG.warn("curUser.isFromKeytab(): {}", curUser.isFromKeytab()); LOG.warn("UserGroupInformation.getCurrentUser().isLoginKeytabBased(): {}", UserGroupInformation.getCurrentUser().isLoginKeytabBased()); LOG.warn("UserGroupInformation.isLoginKeytabBased(): {}", UserGroupInformation.isLoginKeytabBased()); LOG.warn("curUser.getAuthenticationMethod(): {}", curUser.getAuthenticationMethod()); //System.exit(1); } } catch (IOException e) { LOG.error("Failed to get login user.", e); System.exit(1); } // we supposedly got through this unscathed... so store the static user staticLogin.set(newUser); } // hadoop impersonation works with or without kerberos security proxyTicket = null; if (!proxyUserName.isEmpty()) { try { proxyTicket = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser()); } catch (IOException e) { LOG.error("Unable to login as proxy user. Exception follows.", e); return false; } } UserGroupInformation ugi = null; if (proxyTicket != null) { ugi = proxyTicket; } else if (useSecurity) { try { ugi = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.error("Unexpected error: Unable to get authenticated user after " + "apparent successful login! Exception follows.", e); return false; } } if (ugi != null) { // dump login information AuthenticationMethod authMethod = ugi.getAuthenticationMethod(); LOG.info("Auth method: {}", authMethod); LOG.info(" User name: {}", ugi.getUserName()); LOG.info(" Using keytab: {}", ugi.isFromKeytab()); if (authMethod == AuthenticationMethod.PROXY) { UserGroupInformation superUser; try { superUser = UserGroupInformation.getLoginUser(); LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod()); LOG.info(" Superuser name: {}", superUser.getUserName()); LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab()); } catch (IOException e) { LOG.error("Unexpected error: unknown superuser impersonating proxy.", e); return false; } } LOG.info("Logged in as user {}", ugi.getUserName()); UGIState state = new UGIState(); state.ugi = proxyTicket; state.lastAuthenticated = System.currentTimeMillis(); proxyUserMap.put(proxyUserName, state); return true; } return true; }
From source file:com.cloudera.beeswax.BeeswaxServiceImpl.java
License:Apache License
private <T> T doWithState(RunningQueryState state, PrivilegedExceptionAction<T> action) throws BeeswaxException { try {//w w w .j a v a 2s . c o m UserGroupInformation ugi; if (UserGroupInformation.isSecurityEnabled()) ugi = UserGroupInformation.createProxyUser(state.query.hadoop_user, UserGroupInformation.getLoginUser()); else { ugi = UserGroupInformation.createRemoteUser(state.query.hadoop_user); } return ugi.doAs(action); } catch (UndeclaredThrowableException e) { if (e.getUndeclaredThrowable() instanceof PrivilegedActionException) { Throwable bwe = e.getUndeclaredThrowable().getCause(); if (bwe instanceof BeeswaxException) { LOG.error("Caught BeeswaxException", (BeeswaxException) bwe); throw (BeeswaxException) bwe; } } LOG.error("Caught unexpected exception.", e); throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle); } catch (IOException e) { LOG.error("Caught IOException", e); throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle); } catch (InterruptedException e) { LOG.error("Caught InterruptedException", e); throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle); } }
From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.NFS4Handler.java
License:Apache License
/** * Process a CompoundRequest and return a CompoundResponse. *//*from w w w .j av a2s. c om*/ public CompoundResponse process(final RPCRequest rpcRequest, final CompoundRequest compoundRequest, final InetAddress clientAddress, final String sessionID) { Credentials creds = (Credentials) compoundRequest.getCredentials(); // FIXME below is a hack regarding CredentialsUnix if (creds == null || !(creds instanceof AuthenticatedCredentials)) { CompoundResponse response = new CompoundResponse(); response.setStatus(NFS4ERR_WRONGSEC); return response; } try { UserGroupInformation sudoUgi; String username = creds.getUsername(mConfiguration); if (UserGroupInformation.isSecurityEnabled()) { sudoUgi = UserGroupInformation.createProxyUser(username, UserGroupInformation.getCurrentUser()); } else { sudoUgi = UserGroupInformation.createRemoteUser(username); } final NFS4Handler server = this; final Session session = new Session(rpcRequest.getXid(), compoundRequest, mConfiguration, clientAddress, sessionID); return sudoUgi.doAs(new PrivilegedExceptionAction<CompoundResponse>() { public CompoundResponse run() throws Exception { String username = UserGroupInformation.getCurrentUser().getShortUserName(); int lastStatus = NFS4_OK; List<OperationResponse> responses = Lists.newArrayList(); for (OperationRequest request : compoundRequest.getOperations()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(sessionID + " " + request.getClass().getSimpleName() + " for " + username); } OperationRequestHandler<OperationRequest, OperationResponse> handler = OperationFactory .getHandler(request.getID()); OperationResponse response = handler.handle(server, session, request); responses.add(response); lastStatus = response.getStatus(); if (lastStatus != NFS4_OK) { LOGGER.warn(sessionID + " Quitting due to " + lastStatus + " on " + request.getClass().getSimpleName() + " for " + username); break; } server.incrementMetric("NFS_" + request.getClass().getSimpleName(), 1); server.incrementMetric("NFS_OPERATIONS", 1); } CompoundResponse response = new CompoundResponse(); response.setStatus(lastStatus); response.setOperations(responses); server.incrementMetric("NFS_COMMANDS", 1); return response; } }); } catch (Exception ex) { if (ex instanceof UndeclaredThrowableException && ex.getCause() != null) { Throwable throwable = ex.getCause(); if (throwable instanceof Exception) { ex = (Exception) throwable; } else if (throwable instanceof Error) { // something really bad happened LOGGER.error(sessionID + " Unhandled Error", throwable); throw (Error) throwable; } else { LOGGER.error(sessionID + " Unhandled Throwable", throwable); throw new RuntimeException(throwable); } } LOGGER.warn(sessionID + " Unhandled Exception", ex); CompoundResponse response = new CompoundResponse(); if (ex instanceof NFS4Exception) { response.setStatus(((NFS4Exception) ex).getError()); } else if (ex instanceof UnsupportedOperationException) { response.setStatus(NFS4ERR_NOTSUPP); } else { LOGGER.warn(sessionID + " Setting SERVERFAULT for " + clientAddress + " for " + compoundRequest.getOperations()); response.setStatus(NFS4ERR_SERVERFAULT); } return response; } }