Example usage for org.apache.hadoop.security UserGroupInformation getLoginUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getLoginUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getLoginUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getLoginUser() throws IOException 

Source Link

Document

Get the currently logged in user.

Usage

From source file:org.springframework.data.hadoop.fs.DistCp.java

License:Apache License

/**
 * DistCopy using a command-line style (arguments are specified as {@link String}s).
 * /*from   www.j  av  a  2  s . c  o  m*/
 * @param arguments copy arguments
 */
public void copy(String... arguments) {
    Assert.notEmpty(arguments, "invalid number of arguments");
    // sanitize the arguments
    final List<String> parsedArguments = new ArrayList<String>();
    for (String arg : arguments) {
        parsedArguments.addAll(Arrays.asList(StringUtils.tokenizeToStringArray(arg, " ")));
    }

    try {
        if (StringUtils.hasText(user)) {
            UserGroupInformation ugi = UserGroupInformation.createProxyUser(user,
                    UserGroupInformation.getLoginUser());
            ugi.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    invokeCopy(configuration, parsedArguments.toArray(new String[parsedArguments.size()]));
                    return null;
                }
            });
        } else {
            invokeCopy(configuration, parsedArguments.toArray(new String[parsedArguments.size()]));
        }
    } catch (Exception ex) {
        throw new IllegalStateException("Cannot run distCp impersonated as '" + user + "'", ex);
    }
}

From source file:org.springframework.data.hadoop.mapreduce.HadoopCodeExecutor.java

License:Apache License

protected int runCode() throws Exception {
    // merge configuration options
    final Configuration cfg = resolveConfiguration();

    // resolve target object
    final Class<T> type = resolveTargetClass(cfg);
    final T target = resolveTargetObject(type);

    // setup the invocation context
    Thread th = Thread.currentThread();
    ClassLoader oldTccl = th.getContextClassLoader();

    log.info("Invoking [" + (target != null ? target : type) + "] "
            + (jar != null ? "from jar [" + jar.getURI() + "]" : "") + " with args ["
            + Arrays.toString(arguments) + "]");

    ClassLoader newCL = cfg.getClassLoader();
    boolean isJarCL = newCL instanceof ParentLastURLClassLoader;
    try {/*from ww w  . j a  v  a 2 s .co  m*/
        ExecutionUtils.disableSystemExitCall();
        if (isJarCL) {
            ExecutionUtils.preventHadoopLeaks(beanClassLoader);
        }

        //ExecutionUtils.earlyLeaseDaemonInit(cfg);

        th.setContextClassLoader(newCL);

        if (StringUtils.hasText(user)) {
            UserGroupInformation ugi = UserGroupInformation.createProxyUser(user,
                    UserGroupInformation.getLoginUser());

            return ugi.doAs(new PrivilegedExceptionAction<Integer>() {
                @Override
                public Integer run() throws Exception {
                    return invokeTarget(cfg, target, type, arguments);
                }
            });
        } else {
            return invokeTarget(cfg, target, type, arguments);
        }
    } finally {
        ExecutionUtils.enableSystemExitCall();
        th.setContextClassLoader(oldTccl);

        if (isJarCL) {
            if (closeFs) {
                ExecutionUtils.shutdownFileSystem(cfg);
            }
            ExecutionUtils.patchLeakedClassLoader(newCL, oldTccl);
        }
    }
}

From source file:org.springframework.data.hadoop.mapreduce.JobFactoryBean.java

License:Apache License

@SuppressWarnings("rawtypes")
public void afterPropertiesSet() throws Exception {
    final Configuration cfg = ConfigurationUtils.createFrom(configuration, properties);

    buildGenericOptions(cfg);//ww  w  .jav  a 2  s . c  o m

    if (StringUtils.hasText(user)) {
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(user,
                UserGroupInformation.getLoginUser());
        ugi.doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                job = new Job(cfg);
                return null;
            }
        });
    } else {
        job = new Job(cfg);
    }

    ClassLoader loader = (beanClassLoader != null ? beanClassLoader
            : org.springframework.util.ClassUtils.getDefaultClassLoader());

    if (jar != null) {
        JobConf conf = (JobConf) job.getConfiguration();
        conf.setJar(jar.getURI().toString());
        loader = ExecutionUtils.createParentLastClassLoader(jar, beanClassLoader, cfg);
        conf.setClassLoader(loader);
    }

    // set first to enable auto-detection of K/V to skip the key/value types to be specified
    if (mapper != null) {
        Class<? extends Mapper> mapperClass = resolveClass(mapper, loader, Mapper.class);
        job.setMapperClass(mapperClass);
        configureMapperTypesIfPossible(job, mapperClass);
    }

    if (reducer != null) {
        Class<? extends Reducer> reducerClass = resolveClass(reducer, loader, Reducer.class);
        job.setReducerClass(reducerClass);
        configureReducerTypesIfPossible(job, reducerClass);
    }

    if (StringUtils.hasText(name)) {
        job.setJobName(name);
    }
    if (combiner != null) {
        job.setCombinerClass(resolveClass(combiner, loader, Reducer.class));
    }
    if (groupingComparator != null) {
        job.setGroupingComparatorClass(resolveClass(groupingComparator, loader, RawComparator.class));
    }
    if (inputFormat != null) {
        job.setInputFormatClass(resolveClass(inputFormat, loader, InputFormat.class));
    }
    if (mapKey != null) {
        job.setMapOutputKeyClass(resolveClass(mapKey, loader, Object.class));
    }
    if (mapValue != null) {
        job.setMapOutputValueClass(resolveClass(mapValue, loader, Object.class));
    }
    if (numReduceTasks != null) {
        job.setNumReduceTasks(numReduceTasks);
    }
    if (key != null) {
        job.setOutputKeyClass(resolveClass(key, loader, Object.class));
    }
    if (value != null) {
        job.setOutputValueClass(resolveClass(value, loader, Object.class));
    }
    if (outputFormat != null) {
        job.setOutputFormatClass(resolveClass(outputFormat, loader, OutputFormat.class));
    }
    if (partitioner != null) {
        job.setPartitionerClass(resolveClass(partitioner, loader, Partitioner.class));
    }
    if (sortComparator != null) {
        job.setSortComparatorClass(resolveClass(sortComparator, loader, RawComparator.class));
    }
    if (StringUtils.hasText(workingDir)) {
        job.setWorkingDirectory(new Path(workingDir));
    }
    if (jarClass != null) {
        job.setJarByClass(jarClass);
    }

    if (!CollectionUtils.isEmpty(inputPaths)) {
        for (String path : inputPaths) {
            FileInputFormat.addInputPath(job, new Path(path));
        }
    }

    if (StringUtils.hasText(outputPath)) {
        FileOutputFormat.setOutputPath(job, new Path(outputPath));
    }

    if (compressOutput != null) {
        FileOutputFormat.setCompressOutput(job, compressOutput);
    }

    if (codecClass != null) {
        FileOutputFormat.setOutputCompressorClass(job,
                resolveClass(codecClass, loader, CompressionCodec.class));
    }

    processJob(job);
}

From source file:org.springframework.data.hadoop.mapreduce.StreamJobFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws Exception {
    Assert.isTrue(!ObjectUtils.isEmpty(input), "at least one input required");
    Assert.hasText(output, "the output is required");

    final Configuration cfg = ConfigurationUtils.createFrom(configuration, properties);

    buildGenericOptions(cfg);/*w w  w .  j av a 2s .com*/

    Map<String, String> args = new LinkedHashMap<String, String>();

    // add unique arguments
    addArgument(output, "-output", args);
    addArgument(mapper, "-mapper", args);
    addArgument(reducer, "-reducer", args);
    addArgument(combiner, "-combiner", args);
    addArgument(partitioner, "-partitioner", args);
    addArgument(inputFormat, "-inputformat", args);
    addArgument(outputFormat, "-outputformat", args);

    if (numReduceTasks != null)
        addArgument(numReduceTasks.toString(), "-numReduceTasks", args);

    // translate map to list
    final List<String> argsList = new ArrayList<String>(args.size() * 2 + 16);

    for (Map.Entry<String, String> entry : args.entrySet()) {
        argsList.add(entry.getKey());
        argsList.add(entry.getValue());
    }

    // add -cmdEnv (to the list not the map to avoid key collision)
    if (cmdEnv != null) {
        Enumeration<?> props = cmdEnv.propertyNames();
        while (props.hasMoreElements()) {
            String key = props.nextElement().toString();
            argsList.add("-cmdenv");
            argsList.add(key + "=" + cmdEnv.getProperty(key));
        }
    }

    // add recurring arguments
    addArgument(input, "-input", argsList);

    if (StringUtils.hasText(user)) {
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(user,
                UserGroupInformation.getLoginUser());
        ugi.doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                job = new Job(createStreamJob(cfg, argsList.toArray(new String[argsList.size()])));
                return null;
            }
        });
    } else {
        job = new Job(createStreamJob(cfg, argsList.toArray(new String[argsList.size()])));
    }

    job.setJobName(name);
}

From source file:org.springframework.data.hadoop.pig.PigServerFactoryBean.java

License:Apache License

protected PigServer createPigInstance() throws Exception {
    final PigContext ctx = (pigContext != null ? pigContext : new PigContext());

    // apparently if not connected, pig can cause all kind of errors
    PigServer pigServer = null;//from ww  w .  j a  v a  2  s. c o  m

    try {
        if (StringUtils.hasText(user)) {
            UserGroupInformation ugi = UserGroupInformation.createProxyUser(user,
                    UserGroupInformation.getLoginUser());
            pigServer = ugi.doAs(new PrivilegedExceptionAction<PigServer>() {
                @Override
                public PigServer run() throws Exception {
                    return new PigServer(ctx, true);
                }
            });
        } else {
            pigServer = new PigServer(ctx, true);
        }
    } catch (ExecException ex) {
        throw PigUtils.convert(ex);
    }

    if (!CollectionUtils.isEmpty(pathToSkip)) {
        for (String path : pathToSkip) {
            pigServer.addPathToSkip(path);
        }
    }

    if (parallelism != null) {
        pigServer.setDefaultParallel(parallelism);
    }

    if (StringUtils.hasText(jobName)) {
        pigServer.setJobName(jobName);
    } else {
        if (StringUtils.hasText(beanName)) {
            pigServer.setJobName(beanName);
        }
    }

    if (StringUtils.hasText(jobPriority)) {
        pigServer.setJobPriority(jobPriority);
    }

    if (validateEachStatement != null) {
        PigUtils.validateEachStatement(pigServer, validateEachStatement);
    }

    if (!CollectionUtils.isEmpty(scripts)) {
        PigUtils.runWithConversion(pigServer, scripts, false);
    }

    return pigServer;
}

From source file:org.trpr.dataaccess.hbase.auth.kerberos.KerberosAuthenticationProvider.java

License:Apache License

/**
 * Interface method implementation. Initializes the specified HBase configuration with Kerberos authentication properties
 * @see org.trpr.dataaccess.hbase.auth.AuthenticationProvider#authenticatePrincipal(org.apache.hadoop.conf.Configuration)
 *//* ww w  .j a  va2s. c o m*/
public void authenticatePrincipal(Configuration configuration) throws SecurityException {
    for (Object key : this.kerberosAuthProperties.keySet()) {
        configuration.set(key.toString(), this.kerberosAuthProperties.getProperty(key.toString()));
    }
    System.setProperty(KerberosAuthenticationProvider.KERBEROS_CONFIG_SYSTEM_VARIABLE,
            this.kerberosConfigLocation);
    try {
        UserGroupInformation.setConfiguration(configuration);
        UserGroupInformation.loginUserFromKeytab(this.kerberosPrincipal, this.kerberosKeytabLocation);
        UserGroupInformation loggedInUser = UserGroupInformation.getLoginUser();
        LOGGER.info("Currently logged in Kerberos principal : " + loggedInUser);
        new TGTRenewalThread(configuration, loggedInUser);
    } catch (Exception e) {
        throw new SecurityException("Error authenticating Kerberos Principal : " + this.kerberosPrincipal
                + " .Error message : " + e.getMessage(), e);
    }
}

From source file:org.trustedanalytics.servicebroker.h2oprovisioner.cdhclients.DeprovisionerYarnClientProviderTest.java

License:Apache License

@Test
public void getClient_SetProperUgiLoginUser() throws IOException {
    DeprovisionerYarnClientProvider sut = new DeprovisionerYarnClientProvider();
    sut.getClient("someUser", new Configuration());

    assertEquals("someUser", UserGroupInformation.getLoginUser().getUserName());
}

From source file:oz.hadoop.yarn.test.cluster.InJvmContainerExecutor.java

License:Apache License

/**
 *
 * @param container//from   w ww .  j  ava2s .  com
 * @param containerWorkDir
 * @return
 */
private UserGroupInformation buildUgiForContainerLaunching(Container container, final Path containerWorkDir) {
    UserGroupInformation ugi;
    try {
        ugi = UserGroupInformation.createRemoteUser(UserGroupInformation.getLoginUser().getUserName());
        ugi.setAuthenticationMethod(AuthMethod.TOKEN);
        String filePath = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE).toString();
        Credentials credentials = Credentials.readTokenStorageFile(new File(filePath), this.getConf());
        Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
        for (Token<? extends TokenIdentifier> token : tokens) {
            ugi.addToken(token);
        }
    } catch (Exception e) {
        throw new IllegalArgumentException(
                "Failed to build UserGroupInformation to launch container " + container, e);
    }
    return ugi;
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

@SuppressWarnings("unchecked")
STJobTracker(final JobConf conf, String jobtrackerIndentifier) throws IOException, InterruptedException {
    // find the owner of the process
    // get the desired principal to load
    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
    UserGroupInformation.setConfiguration(conf);
    if (keytabFilename != null) {
        String desiredUser = conf.get(JTConfig.JT_USER_NAME, System.getProperty("user.name"));
        UserGroupInformation.loginUserFromKeytab(desiredUser, keytabFilename);
        mrOwner = UserGroupInformation.getLoginUser();
    } else {//ww w . j a  v  a2  s  . co  m
        mrOwner = UserGroupInformation.getCurrentUser();
    }

    supergroup = conf.get(MR_SUPERGROUP, "supergroup");
    LOG.info("Starting jobtracker with owner as " + mrOwner.getShortUserName() + " and supergroup as "
            + supergroup);

    long secretKeyInterval = conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();

    //
    // Grab some static constants
    //

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // whether to dump or not every heartbeat message even when DEBUG is enabled
    dumpHeartbeat = conf.getBoolean(JT_HEARTBEATS_DUMP, false);

    // This is a directory of temporary submission files. We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();

    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
    this.interTrackerServer = RPC.getServer(SkewTuneClientProtocol.class, this, addr.getHostName(),
            addr.getPort(), handlerCount, false, conf, secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get(JT_HTTP_ADDRESS, String.format("%s:0", this.localMachine)));
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = System.currentTimeMillis();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("job.tracker", this);
    infoServer.addServlet("jobcompletion", "/completion", JobCompletionServlet.class);
    infoServer.addServlet("taskspeculation", "/speculation", SpeculationEventServlet.class);
    infoServer.addServlet("skewreport", "/skew", SkewReportServlet.class);
    infoServer.addServlet("tasksplit", "/split/*", SplitTaskServlet.class);
    infoServer.addServlet("tasksplitV2", "/splitV2/*", SplitTaskV2Servlet.class);
    infoServer.start();

    this.trackerIdentifier = jobtrackerIndentifier;

    // The rpc/web-server ports can be ephemeral ports...
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set(JT_HTTP_ADDRESS, infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());
    this.defaultNotificationUrl = String.format("http://%s:%d/completion?jobid=$jobId&status=$jobStatus",
            infoBindAddress, this.infoPort);
    LOG.info("JobTracker completion URI: " + defaultNotificationUrl);
    //        this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?taskid=$taskId&remainTime=$taskRemainTime",infoBindAddress,this.infoPort);
    this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?jobid=$jobId", infoBindAddress,
            this.infoPort);
    LOG.info("JobTracker speculation event URI: " + defaultSpeculationEventUrl);
    this.defaultSkewReportUrl = String.format("http://%s:%d/skew", infoBindAddress, this.infoPort);
    LOG.info("JobTracker skew report event URI: " + defaultSkewReportUrl);
    this.trackerHttp = String.format("http://%s:%d", infoBindAddress, this.infoPort);

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }

            // clean up the system dir, which will only work if hdfs is out
            // of safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(mrOwner.getShortUserName())) {
                    throw new AccessControlException(
                            "The systemdir " + systemDir + " is not owned by " + mrOwner.getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                } else {
                    break;
                }
            } catch (FileNotFoundException fnf) {
            } // ignore
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") because of permissions.");
            LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ");
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // initialize cluster variable
    cluster = new Cluster(this.conf);

    // now create a job client proxy
    jtClient = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID,
            JobTracker.getAddress(conf), mrOwner, this.conf,
            NetUtils.getSocketFactory(conf, ClientProtocol.class));

    new SpeculativeScheduler().start();

    // initialize task event fetcher
    new TaskCompletionEventFetcher().start();

    // Same with 'localDir' except it's always on the local disk.
    asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(conf), conf.getLocalDirs());
    asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR);

    // keep at least one asynchronous worker per CPU core
    int numProcs = Runtime.getRuntime().availableProcessors();
    LOG.info("# of available processors = " + numProcs);
    int maxFactor = conf.getInt(JT_MAX_ASYNC_WORKER_FACTOR, 2);
    asyncWorkers = new ThreadPoolExecutor(numProcs, numProcs * maxFactor, 30, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(true), new ThreadPoolExecutor.CallerRunsPolicy());

    speculativeSplit = conf.getBoolean(JT_SPECULATIVE_SPLIT, false);
}

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

License:Open Source License

protected boolean storeJobScript(final JobInformation ji, final String requestorUserName, final byte[] source)
        throws IOException {
    final String jobFolderName = String.valueOf(Math.abs(random.nextInt()));

    final Path jobFolder = new Path(probosFolder, jobFolderName);
    final Path script = new Path(probosFolder, jobFolderName + ".SC");
    PrivilegedExceptionAction<Path> submitAction = new PrivilegedExceptionAction<Path>() {
        public Path run() throws Exception {
            FileSystem fs = FileSystem.get(yConf);
            fs.mkdirs(jobFolder);//  w  ww  .  j  av a2s .  c  om
            OutputStream os = fs.create(script);
            os.write(source);
            os.close();
            LOG.info("Wrote " + source.length + " bytes to " + script.toString() + " as the job script for job "
                    + ji.jobId);
            return script;
        }
    };

    //setuid to the requestor's user id
    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(requestorUserName,
            UserGroupInformation.getLoginUser());
    Path rtr = null;
    try {
        if (UserGroupInformation.isSecurityEnabled())
            rtr = proxyUser.doAs(submitAction);
        else
            rtr = submitAction.run();
        ji.proxyUser = proxyUser;
        ji.scriptLocation = rtr;
        ji.folderLocation = jobFolder;
        ji.modify();
        return true;
    } catch (Exception e) {
        LOG.error("Could not store job file!", e);
        return false;
    }
}