Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.deeplearning4j.iterativereduce.runtime.yarn.ResourceManagerHandler.java

License:Apache License

public void submitApplication(ApplicationId appId, String appName, Map<String, String> env,
        Map<String, LocalResource> localResources, List<String> commands, int memory)
        throws URISyntaxException, IOException {

    if (clientResourceManager == null)
        throw new IllegalStateException("Cannot submit an application without connecting to resource manager!");

    ApplicationSubmissionContext appCtx = Records.newRecord(ApplicationSubmissionContext.class);
    appCtx.setApplicationId(appId);//from w ww. j a  va2  s. com
    appCtx.setApplicationName(appName);
    appCtx.setQueue("default");
    appCtx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());

    //System.out.println( "Based on my current user I am: " + UserGroupInformation.getCurrentUser().getShortUserName() );

    Priority prio = Records.newRecord(Priority.class);
    prio.setPriority(0);
    appCtx.setPriority(prio);

    // Launch ctx
    ContainerLaunchContext containerCtx = Records.newRecord(ContainerLaunchContext.class);
    containerCtx.setLocalResources(localResources);
    containerCtx.setCommands(commands);
    containerCtx.setEnvironment(env);
    containerCtx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());

    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memory);
    containerCtx.setResource(capability);

    appCtx.setAMContainerSpec(containerCtx);

    SubmitApplicationRequest submitReq = Records.newRecord(SubmitApplicationRequest.class);
    submitReq.setApplicationSubmissionContext(appCtx);

    LOG.info("Submitting application to ASM");
    clientResourceManager.submitApplication(submitReq);

    // Don't return anything, ASM#submit returns an empty response
}

From source file:org.dknight.app.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from   ww  w  .  java  2  s  . c  o m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    List<NodeReport> nodeReports = null;
    if (yarnClient != null) {
        try {
            nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
            for (NodeReport node : nodeReports) {
                LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                        + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                        + node.getNumContainers());
            }
        } catch (YarnException e) {
            e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
        } catch (IOException e) {
            e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
        }
    }
    for (int i = 0; i < numTotalContainers; ++i) {
        ContainerRequest containerAsk = null;
        if (CollectionUtils.isNotEmpty(nodeReports)) {
            NodeReport requestNode = nodeReports.get(Math.min(i, nodeReports.size() - 1));
            String[] requsetHosts = new String[] { requestNode.getNodeId().getHost() };
            LOG.info("Ask container at the host list:" + requsetHosts);
            containerAsk = setupContainerAskForRM(requsetHosts);
        } else {
            LOG.info("Ask container with ANY host");
            containerAsk = setupContainerAskForRM(null);
        }
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}

From source file:org.elasticsearch.hadoop.yarn.rpc.YarnRpc.java

License:Apache License

public void start() {
    // handle security
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.setConfiguration(cfg);
    }//from   w  w w  .ja  va  2 s  . c o  m

    try {
        endpoint = resolveEndpoint(cfg);
    } catch (IOException ex) {
        throw new EsYarnException("Cannot resolve endpoint", ex);
    }

    UserGroupInformation ugi = null;
    try {
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException ex) {
        throw new EsYarnException("Cannot get current user", ex);
    }

    // create proxy
    proxy = ugi.doAs(new PrivilegedAction<P>() {
        @SuppressWarnings("unchecked")
        @Override
        public P run() {
            return (P) YarnRPC.create(cfg).getProxy(protocolType, endpoint, cfg);
        }
    });

}

From source file:org.elasticsearch.repositories.hdfs.HaHdfsFailoverTestSuiteIT.java

License:Apache License

public void testHAFailoverWithRepository() throws Exception {
    RestClient client = client();//from  ww  w  .ja v  a 2 s.c  om
    Map<String, String> emptyParams = Collections.emptyMap();
    Header contentHeader = new BasicHeader("Content-Type", "application/json");

    String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
    String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
    String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
    boolean securityEnabled = hdfsKerberosPrincipal != null;

    Configuration hdfsConfiguration = new Configuration();
    hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
    hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
    hdfsConfiguration.set("dfs.client.failover.proxy.provider.ha-hdfs",
            "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");

    AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
        if (securityEnabled) {
            // ensure that keytab exists
            Path kt = PathUtils.get(kerberosKeytabLocation);
            if (Files.exists(kt) == false) {
                throw new IllegalStateException("Could not locate keytab at " + kerberosKeytabLocation);
            }
            if (Files.isReadable(kt) != true) {
                throw new IllegalStateException("Could not read keytab at " + kerberosKeytabLocation);
            }
            logger.info("Keytab Length: " + Files.readAllBytes(kt).length);

            // set principal names
            hdfsConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.data.transfer.protection", "authentication");

            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, kerberosKeytabLocation);
        } else {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.getCurrentUser();
        }
        return null;
    });

    // Create repository
    {
        Response response = client.performRequest("PUT", "/_snapshot/hdfs_ha_repo_read", emptyParams,
                new NStringEntity("{" + "\"type\":\"hdfs\"," + "\"settings\":{"
                        + "\"uri\": \"hdfs://ha-hdfs/\",\n"
                        + "\"path\": \"/user/elasticsearch/existing/readonly-repository\","
                        + "\"readonly\": \"true\"," + securityCredentials(securityEnabled, esKerberosPrincipal)
                        + "\"conf.dfs.nameservices\": \"ha-hdfs\","
                        + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\","
                        + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": "
                        + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}"
                        + "}", Charset.defaultCharset()),
                contentHeader);

        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Get repository
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Failover the namenode to the second.
    failoverHDFS("nn1", "nn2", hdfsConfiguration);

    // Get repository again
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }
}

From source file:org.hdl.caffe.yarn.app.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from ww w .j  av a2s.c  o  m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.AbstractCallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    appMasterHostname = System.getenv(Environment.NM_HOST.name());
    CaffeApplicationRpcServer rpcServer = new CaffeApplicationRpcServer(appMasterHostname, new RpcForClient());
    appMasterRpcPort = rpcServer.getRpcPort();
    rpcServer.startRpcServiceThread();

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);

    // Dump out information about cluster capability as seen by the
    // resource manager
    long maxMem = response.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capability of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    for (Container container : previousAMRunningContainers) {
        launchedContainers.add(container.getId());
    }
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

}

From source file:org.notmysock.tez.BroadcastTest.java

License:Apache License

public boolean run(Configuration conf, boolean doLocalityCheck) throws Exception {
    System.out.println("Running BroadcastTest");
    // conf and UGI
    TezConfiguration tezConf;//from   w w  w  . j  a v  a2s .  c o  m
    if (conf != null) {
        tezConf = new TezConfiguration(conf);
    } else {
        tezConf = new TezConfiguration();
    }
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    UserGroupInformation.setConfiguration(tezConf);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();

    // staging dir
    FileSystem fs = FileSystem.get(tezConf);
    String stagingDirStr = Path.SEPARATOR + "user" + Path.SEPARATOR + user + Path.SEPARATOR + ".staging"
            + Path.SEPARATOR + Path.SEPARATOR + Long.toString(System.currentTimeMillis());
    Path stagingDir = new Path(stagingDirStr);
    tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDirStr);
    stagingDir = fs.makeQualified(stagingDir);

    Path jobJar = new Path(stagingDir, "job.jar");
    fs.copyFromLocalFile(getCurrentJarURL(), jobJar);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("job.jar", createLocalResource(fs, jobJar));

    TezClient tezSession = null;
    // needs session or else TaskScheduler does not hold onto containers
    tezSession = TezClient.create("BroadcastTest", tezConf);
    tezSession.addAppMasterLocalFiles(localResources);
    tezSession.start();

    DAGClient dagClient = null;

    try {
        DAG dag = createDAG(fs, tezConf, stagingDir, localResources);

        dag.addTaskLocalFiles(localResources);

        tezSession.waitTillReady();
        dagClient = tezSession.submitDAG(dag);

        // monitoring
        DAGStatus dagStatus = dagClient.waitForCompletionWithStatusUpdates(null);
        if (dagStatus.getState() != DAGStatus.State.SUCCEEDED) {
            System.out.println("DAG diagnostics: " + dagStatus.getDiagnostics());
            return false;
        }
        return true;
    } finally {
        fs.delete(stagingDir, true);
        tezSession.stop();
    }
}

From source file:org.openflamingo.remote.thrift.thriftfs.ThriftHandlerBase.java

License:Apache License

/**
 * The methods below should be called by all RPCs with the request context
 * passed in, whenever said RPCs are accessing Hadoop-internal methods. These
 * assume the authentication role of the requester.
 * <p/>//from  w  w w.j  a va 2  s  . c  o m
 * Most of the time you can just wrap the entire contents of the method with
 * these methods. If, however, your RPC needs to throw an exception not of
 * type IOException, then you may need to wrap only the portions which
 * actually touch Hadoop, and then throw your own exception(s) based on the
 * result of these calls.
 */
protected <T> T assumeUserContextAndExecute(RequestContext ctx, PrivilegedExceptionAction<T> action)
        throws IOException {
    try {
        return UserGroupInformation
                .createProxyUser(ctx.confOptions.get("effective_user"), UserGroupInformation.getCurrentUser())
                .doAs(action);
    } catch (Throwable e) {
        throw ThriftUtils.toThrift(e);
    }
}

From source file:org.openflamingo.remote.thrift.thriftfs.ThriftHandlerBase.java

License:Apache License

protected <T> T assumeUserContextAndExecute(RequestContext ctx, PrivilegedAction<T> action) {
    try {//ww w . jav  a2  s.  com
        return UserGroupInformation
                .createProxyUser(ctx.confOptions.get("effective_user"), UserGroupInformation.getCurrentUser())
                .doAs(action);
    } catch (java.io.IOException e) {
        // This should only be thrown in the event getLoginUser() fails.
        throw new Error(e);
    }
}

From source file:org.pentaho.big.data.impl.shim.mapreduce.PentahoMapReduceJobBuilderImpl.java

License:Apache License

@Override
protected MapReduceJobAdvanced submit(Configuration conf) throws IOException {
    cleanOutputPath(conf);//from www . j  a v  a2  s  .  c o  m

    FileSystem fs = hadoopShim.getFileSystem(conf);

    if (Boolean.parseBoolean(getProperty(conf, pmrProperties, PENTAHO_MAPREDUCE_PROPERTY_USE_DISTRIBUTED_CACHE,
            Boolean.toString(true)))) {
        String installPath = getProperty(conf, pmrProperties,
                PENTAHO_MAPREDUCE_PROPERTY_KETTLE_HDFS_INSTALL_DIR, null);
        String installId = getProperty(conf, pmrProperties, PENTAHO_MAPREDUCE_PROPERTY_KETTLE_INSTALLATION_ID,
                null);
        try {
            if (Utils.isEmpty(installPath)) {
                throw new IllegalArgumentException(BaseMessages.getString(PKG,
                        JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_KETTLE_HDFS_INSTALL_DIR_MISSING));
            }
            if (Utils.isEmpty(installId)) {
                installId = this.installId;
            }
            if (!installPath.endsWith(Const.FILE_SEPARATOR)) {
                installPath += Const.FILE_SEPARATOR;
            }

            Path kettleEnvInstallDir = fs.asPath(installPath, installId);
            FileObject pmrLibArchive = pmrArchiveGetter.getPmrArchive(conf);

            // Make sure the version we're attempting to use is installed
            if (hadoopShim.getDistributedCacheUtil().isKettleEnvironmentInstalledAt(fs, kettleEnvInstallDir)) {
                log.logDetailed(BaseMessages.getString(PKG,
                        "JobEntryHadoopTransJobExecutor.UsingKettleInstallationFrom",
                        kettleEnvInstallDir.toUri().getPath()));
            } else {
                // Load additional plugin folders as requested
                String additionalPluginNames = getProperty(conf, pmrProperties,
                        PENTAHO_MAPREDUCE_PROPERTY_ADDITIONAL_PLUGINS, null);
                if (pmrLibArchive == null) {
                    throw new KettleException(BaseMessages.getString(PKG,
                            JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_UNABLE_TO_LOCATE_ARCHIVE,
                            pmrArchiveGetter.getVfsFilename(conf)));
                }

                log.logBasic(BaseMessages.getString(PKG, "JobEntryHadoopTransJobExecutor.InstallingKettleAt",
                        kettleEnvInstallDir));

                FileObject bigDataPluginFolder = vfsPluginDirectory;
                hadoopShim.getDistributedCacheUtil().installKettleEnvironment(pmrLibArchive, fs,
                        kettleEnvInstallDir, bigDataPluginFolder, additionalPluginNames);

                log.logBasic(BaseMessages.getString(PKG,
                        "JobEntryHadoopTransJobExecutor.InstallationOfKettleSuccessful", kettleEnvInstallDir));
            }

            stageMetaStoreForHadoop(conf, fs, installPath);

            if (!hadoopShim.getDistributedCacheUtil().isKettleEnvironmentInstalledAt(fs, kettleEnvInstallDir)) {
                throw new KettleException(BaseMessages.getString(PKG,
                        JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_KETTLE_INSTALLATION_MISSING_FROM,
                        kettleEnvInstallDir.toUri().getPath()));
            }

            log.logBasic(BaseMessages.getString(PKG,
                    JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_CONFIGURING_JOB_WITH_KETTLE_AT,
                    kettleEnvInstallDir.toUri().getPath()));

            String mapreduceClasspath = conf.get(MAPREDUCE_APPLICATION_CLASSPATH,
                    DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH);
            conf.set(MAPREDUCE_APPLICATION_CLASSPATH, CLASSES + mapreduceClasspath);

            hadoopShim.getDistributedCacheUtil().configureWithKettleEnvironment(conf, fs, kettleEnvInstallDir);
            log.logBasic(MAPREDUCE_APPLICATION_CLASSPATH + ": " + conf.get(MAPREDUCE_APPLICATION_CLASSPATH));
        } catch (Exception ex) {
            throw new IOException(BaseMessages.getString(PKG,
                    JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_INSTALLATION_OF_KETTLE_FAILED), ex);
        }
    }
    JobConf jobConf = conf.getAsDelegateConf(JobConf.class);
    jobConf.getCredentials().addAll(UserGroupInformation.getCurrentUser().getCredentials());
    return super.submit(conf);
}

From source file:org.pentaho.hadoop.shim.common.ConfigurationProxy.java

License:Apache License

/**
 * Submit job for the current configuration provided by this implementation.
 *
 * @return RunningJob implementation/*w w w . j a  v  a  2s.  co m*/
 */
@Override
public RunningJob submit() throws IOException, ClassNotFoundException, InterruptedException {
    JobClient jobClient = createJobClient();
    if (YarnQueueAclsVerifier.verify(jobClient.getQueueAclsForCurrentUser())) {
        return new RunningJobProxy(jobClient.submitJob(this));
    } else {
        throw new YarnQueueAclsException(
                BaseMessages.getString(ConfigurationProxy.class, "ConfigurationProxy.UserHasNoPermissions",
                        UserGroupInformation.getCurrentUser().getUserName()));
    }
}