Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob.java

License:Apache License

/**
 * Enqueue the job and print out the job id for later collection.
 * @see org.apache.hive.hcatalog.templeton.CompleteDelegator
 *///from   w w  w  .j a  va 2s.  c  o  m
@Override
public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, TException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Preparing to submit job: " + Arrays.toString(args));
    }
    Configuration conf = getConf();

    conf.set(JAR_ARGS_NAME, TempletonUtils.encodeArray(args));
    String memoryMb = appConf.mapperMemoryMb();
    if (memoryMb != null && memoryMb.length() != 0) {
        conf.set(AppConfig.HADOOP_MAP_MEMORY_MB, memoryMb);
    }
    String amMemoryMB = appConf.amMemoryMb();
    if (amMemoryMB != null && !amMemoryMB.isEmpty()) {
        conf.set(AppConfig.HADOOP_MR_AM_MEMORY_MB, amMemoryMB);
    }
    String amJavaOpts = appConf.controllerAMChildOpts();
    if (amJavaOpts != null && !amJavaOpts.isEmpty()) {
        conf.set(AppConfig.HADOOP_MR_AM_JAVA_OPTS, amJavaOpts);
    }

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    conf.set("user.name", user);
    Job job = new Job(conf);
    job.setJarByClass(LaunchMapper.class);
    job.setJobName(TempletonControllerJob.class.getSimpleName());
    job.setMapperClass(LaunchMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setInputFormatClass(SingleInputFormat.class);

    NullOutputFormat<NullWritable, NullWritable> of = new NullOutputFormat<NullWritable, NullWritable>();
    job.setOutputFormatClass(of.getClass());
    job.setNumReduceTasks(0);

    JobClient jc = new JobClient(new JobConf(job.getConfiguration()));

    if (UserGroupInformation.isSecurityEnabled()) {
        Token<DelegationTokenIdentifier> mrdt = jc.getDelegationToken(new Text("mr token"));
        job.getCredentials().addToken(new Text("mr token"), mrdt);
    }
    String metastoreTokenStrForm = addHMSToken(job, user);

    job.submit();

    submittedJobId = job.getJobID();
    if (metastoreTokenStrForm != null) {
        //so that it can be cancelled later from CompleteDelegator
        DelegationTokenCache.getStringFormTokenCache().storeDelegationToken(submittedJobId.toString(),
                metastoreTokenStrForm);
        LOG.debug("Added metastore delegation token for jobId=" + submittedJobId.toString() + " user=" + user);
    }
    return 0;
}

From source file:org.apache.hive.service.auth.HiveAuthFactory.java

License:Apache License

public static void verifyProxyAccess(String realUser, String proxyUser, String ipAddress, HiveConf hiveConf)
        throws HiveSQLException {
    try {/*from www  . j a va2s . c o m*/
        UserGroupInformation sessionUgi;
        if (UserGroupInformation.isSecurityEnabled()) {
            KerberosNameShim kerbName = ShimLoader.getHadoopShims().getKerberosNameShim(realUser);
            sessionUgi = UserGroupInformation.createProxyUser(kerbName.getServiceName(),
                    UserGroupInformation.getLoginUser());
        } else {
            sessionUgi = UserGroupInformation.createRemoteUser(realUser);
        }
        if (!proxyUser.equalsIgnoreCase(realUser)) {
            ProxyUsers.refreshSuperUserGroupsConfiguration(hiveConf);
            ProxyUsers.authorize(UserGroupInformation.createProxyUser(proxyUser, sessionUgi), ipAddress,
                    hiveConf);
        }
    } catch (IOException e) {
        throw new HiveSQLException("Failed to validate proxy privilege of " + realUser + " for " + proxyUser,
                "08S01", e);
    }
}

From source file:org.apache.hive.service.cli.CLIService.java

License:Apache License

@Override
public synchronized void init(HiveConf hiveConf) {
    this.hiveConf = hiveConf;
    sessionManager = new SessionManager(hiveServer2);
    addService(sessionManager);/*ww  w  .ja  va  2 s. c  o m*/
    //  If the hadoop cluster is secure, do a kerberos login for the service from the keytab
    if (UserGroupInformation.isSecurityEnabled()) {
        try {
            HiveAuthFactory.loginFromKeytab(hiveConf);
            this.serviceUGI = Utils.getUGI();
        } catch (IOException e) {
            throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
        } catch (LoginException e) {
            throw new ServiceException("Unable to login to kerberos with given principal/keytab", e);
        }

        // Also try creating a UGI object for the SPNego principal
        String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL);
        String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB);
        if (principal.isEmpty() || keyTabFile.isEmpty()) {
            LOG.info(
                    "SPNego httpUGI not created, spNegoPrincipal: " + principal + ", ketabFile: " + keyTabFile);
        } else {
            try {
                this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf);
                LOG.info("SPNego httpUGI successfully created.");
            } catch (IOException e) {
                LOG.warn("SPNego httpUGI creation failed: ", e);
            }
        }
    }
    // creates connection to HMS and thus *must* occur after kerberos login above
    try {
        applyAuthorizationConfigPolicy(hiveConf);
    } catch (Exception e) {
        throw new RuntimeException(
                "Error applying authorization policy on hive configuration: " + e.getMessage(), e);
    }
    setupBlockedUdfs();
    super.init(hiveConf);
}

From source file:org.apache.hive.service.cli.session.HiveSessionImplwithUGI.java

License:Apache License

public void setSessionUGI(String owner) throws HiveSQLException {
    if (owner == null) {
        throw new HiveSQLException("No username provided for impersonation");
    }//from  w w w.j av a  2 s . co  m
    if (UserGroupInformation.isSecurityEnabled()) {
        try {
            sessionUgi = UserGroupInformation.createProxyUser(owner, UserGroupInformation.getLoginUser());
        } catch (IOException e) {
            throw new HiveSQLException("Couldn't setup proxy user", e);
        }
    } else {
        sessionUgi = UserGroupInformation.createRemoteUser(owner);
    }
}

From source file:org.apache.hoya.tools.HoyaUtils.java

License:Apache License

/**
 * Turn on security. This is setup to only run once.
 * @param conf configuration to build up security
 * @return true if security was initialized in this call
 * @throws IOException IO/Net problems/*  w ww.ja v a2  s.c  om*/
 * @throws BadConfigException the configuration and system state are inconsistent
 */
public static boolean initProcessSecurity(Configuration conf) throws IOException, BadConfigException {

    if (processSecurityAlreadyInitialized.compareAndSet(true, true)) {
        //security is already inited
        return false;
    }

    log.info("JVM initialized into secure mode with kerberos realm {}", HoyaUtils.getKerberosRealm());
    //this gets UGI to reset its previous world view (i.e simple auth)
    //security
    log.debug("java.security.krb5.realm={}", System.getProperty("java.security.krb5.realm", ""));
    log.debug("java.security.krb5.kdc={}", System.getProperty("java.security.krb5.kdc", ""));
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation authUser = UserGroupInformation.getCurrentUser();
    log.debug("Authenticating as " + authUser.toString());
    log.debug("Login user is {}", UserGroupInformation.getLoginUser());
    if (!UserGroupInformation.isSecurityEnabled()) {
        throw new BadConfigException("Although secure mode is enabled,"
                + "the application has already set up its user as an insecure entity %s", authUser);
    }
    if (authUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.SIMPLE) {
        throw new BadConfigException("Auth User is not Kerberized %s"
                + " -security has already been set up with the wrong authentication method", authUser);

    }

    HoyaUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
    HoyaUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
    return true;
}

From source file:org.apache.hoya.tools.HoyaUtils.java

License:Apache License

/**
 * Force an early login: This catches any auth problems early rather than
 * in RPC operatins/*from w  ww. j a v  a 2  s.  c  o  m*/
 * @throws IOException if the login fails
 */
public static void forceLogin() throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        if (UserGroupInformation.isLoginKeytabBased()) {
            UserGroupInformation.getLoginUser().reloginFromKeytab();
        } else {
            UserGroupInformation.getLoginUser().reloginFromTicketCache();
        }
    }
}

From source file:org.apache.hoya.yarn.appmaster.HoyaAppMaster.java

License:Apache License

/**
 * Create and run the cluster./*from   w ww  .j  av a2  s  .  co m*/
 * @return exit code
 * @throws Throwable on a failure
 */
private int createAndRunCluster(String clustername) throws Throwable {
    HoyaVersionInfo.loadAndPrintVersionInfo(log);

    //load the cluster description from the cd argument
    String hoyaClusterDir = serviceArgs.getHoyaClusterURI();
    URI hoyaClusterURI = new URI(hoyaClusterDir);
    Path clusterDirPath = new Path(hoyaClusterURI);
    HoyaFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);

    log.info("Deploying cluster {}:", instanceDefinition);

    //REVISIT: why is this done?
    appState.updateInstanceDefinition(instanceDefinition);
    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
        log.error("Bad conf dir {}", confDir);
        File parentFile = confDir.getParentFile();
        log.error("Parent dir {}:\n{}", parentFile, HoyaUtils.listDir(parentFile));
        throw new BadCommandArgumentsException("Configuration directory %s doesn't exist", confDir);
    }

    Configuration serviceConf = getConfig();
    // Try to get the proper filtering of static resources through the yarn proxy working
    serviceConf.set("hadoop.http.filter.initializers",
            "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");

    conf = new YarnConfiguration(serviceConf);
    //get our provider
    MapOperations globalOptions = instanceDefinition.getInternalOperations().getGlobalOptions();
    String providerType = globalOptions.getMandatoryOption(OptionKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    HoyaProviderFactory factory = HoyaProviderFactory.createHoyaProviderFactory(providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    providerService.init(getConfig());
    addService(providerService);

    InetSocketAddress address = HoyaUtils.getRmSchedulerAddress(conf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(conf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
            HoyaUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name()));
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("Hoya AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    Credentials credentials = currentUser.getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    dob.close();
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        log.info("Token {}", token.getKind());
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // set up secret manager
    secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
        hoyaUsername = System.getenv(HADOOP_USER_NAME);
        log.info(HADOOP_USER_NAME + "='{}'", hoyaUsername);
    }

    Map<String, String> envVars;

    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
        int heartbeatInterval = HEARTBEAT_INTERVAL;

        //add the RM client -this brings the callbacks in
        asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this);
        addService(asyncRMClient);
        //wrap it for the app state model
        rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient);
        //now bring it up
        runChildService(asyncRMClient);

        //nmclient relays callbacks back to this class
        nmClientAsync = new NMClientAsyncImpl("nmclient", this);
        runChildService(nmClientAsync);

        //bring up the Hoya RPC service
        startHoyaRPCServer();

        InetSocketAddress rpcServiceAddr = rpcService.getConnectAddress();
        appMasterHostname = rpcServiceAddr.getHostName();
        appMasterRpcPort = rpcServiceAddr.getPort();
        appMasterTrackingUrl = null;
        log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
        appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
        appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

        //build the role map
        List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles());
        providerRoles.addAll(HoyaAMClientProvider.ROLES);

        // Start up the WebApp and track the URL for it
        webApp = new HoyaAMWebApp();
        WebApps.$for("hoyaam", WebAppApi.class, new WebAppApiImpl(this, appState, providerService), "ws")
                .with(serviceConf).start(webApp);
        appMasterTrackingUrl = "http://" + appMasterHostname + ":" + webApp.port();
        WebAppService<HoyaAMWebApp> webAppService = new WebAppService<HoyaAMWebApp>("hoya", webApp);

        webAppService.init(conf);
        webAppService.start();
        addService(webAppService);

        appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
        appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());

        // Register self with ResourceManager
        // This will start heartbeating to the RM
        // address = HoyaUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
        log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl);
        RegisterApplicationMasterResponse response = asyncRMClient.registerApplicationMaster(appMasterHostname,
                appMasterRpcPort, appMasterTrackingUrl);
        Resource maxResources = response.getMaximumResourceCapability();
        containerMaxMemory = maxResources.getMemory();
        containerMaxCores = maxResources.getVirtualCores();
        appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores());
        // set the RM-defined maximum cluster values
        appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
        appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));

        boolean securityEnabled = UserGroupInformation.isSecurityEnabled();
        if (securityEnabled) {
            secretManager.setMasterKey(response.getClientToAMTokenMasterKey().array());
            applicationACLs = response.getApplicationACLs();

            //tell the server what the ACLs are 
            rpcService.getServer().refreshServiceAcl(conf, new HoyaAMPolicyProvider());
        }

        // extract container list
        List<Container> liveContainers = AMRestartSupport.retrieveContainersFromPreviousAttempt(response);
        String amRestartSupported = Boolean.toString(liveContainers != null);
        appInformation.put(StatusKeys.INFO_AM_RESTART_SUPPORTED, amRestartSupported);

        //now validate the installation
        Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir);

        providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled);

        //determine the location for the role history data
        Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

        //build the instance
        appState.buildInstance(instanceDefinition, providerConf, providerRoles, fs.getFileSystem(), historyDir,
                liveContainers, appInformation);

        // add the AM to the list of nodes in the cluster

        appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(),
                appMasterHostname + ":" + webApp.port());

        // build up environment variables that the AM wants set in every container
        // irrespective of provider and role.
        envVars = new HashMap<String, String>();
        if (hoyaUsername != null) {
            envVars.put(HADOOP_USER_NAME, hoyaUsername);
        }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalOptions.getMandatoryOption(OptionKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);

    //launcher service
    launchService = new RoleLaunchService(this, providerService, fs, new Path(getGeneratedConfDir()), envVars,
            launcherTmpDirPath);

    runChildService(launchService);

    appState.noteAMLaunched();

    //Give the provider restricted access to the state
    providerService.bind(appState);

    // launch the provider; this is expected to trigger a callback that
    // brings up the service
    launchProviderService(instanceDefinition, confDir);

    try {
        //now block waiting to be told to exit the process
        waitForAMCompletionSignal();
        //shutdown time
    } finally {
        finish();
    }

    return amExitCode;
}

From source file:org.apache.hoya.yarn.appmaster.rpc.RpcBinder.java

License:Apache License

public static HoyaClusterProtocol getProxy(final Configuration conf, ApplicationReport application,
        final int rpcTimeout) throws IOException, HoyaException, InterruptedException {

    String host = application.getHost();
    int port = application.getRpcPort();
    String address = host + ":" + port;
    if (host == null || 0 == port) {
        throw new HoyaException(HoyaExitCodes.EXIT_CONNECTIVITY_PROBLEM,
                "Hoya YARN instance " + application.getName() + " isn't providing a valid address for the"
                        + " Hoya RPC protocol: " + address);
    }//from  w  w w  .  j ava2s . com

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName());
    final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(),
            application.getRpcPort());
    HoyaClusterProtocol realProxy;

    log.debug("Connecting to {}", serviceAddr);
    if (UserGroupInformation.isSecurityEnabled()) {
        org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken();
        Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
        newUgi.addToken(token);
        realProxy = newUgi.doAs(new PrivilegedExceptionAction<HoyaClusterProtocol>() {
            @Override
            public HoyaClusterProtocol run() throws IOException {
                return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
            }
        });
    } else {
        return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
    }
    return realProxy;
}

From source file:org.apache.ignite.yarn.ApplicationMaster.java

License:Apache License

/** {@inheritDoc} */
public synchronized void onContainersAllocated(List<Container> conts) {
    for (Container c : conts) {
        if (checkContainer(c)) {
            try {
                ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

                if (UserGroupInformation.isSecurityEnabled())
                    // Set the tokens to the newly allocated container:
                    ctx.setTokens(allTokens.duplicate());

                Map<String, String> env = new HashMap<>(System.getenv());

                env.put("IGNITE_TCP_DISCOVERY_ADDRESSES", getAddress(c.getNodeId().getHost()));

                if (props.jvmOpts() != null && !props.jvmOpts().isEmpty())
                    env.put("JVM_OPTS", props.jvmOpts());

                ctx.setEnvironment(env);

                Map<String, LocalResource> resources = new HashMap<>();

                resources.put("ignite", IgniteYarnUtils.setupFile(ignitePath, fs, LocalResourceType.ARCHIVE));
                resources.put("ignite-config.xml",
                        IgniteYarnUtils.setupFile(cfgPath, fs, LocalResourceType.FILE));

                if (props.licencePath() != null)
                    resources.put("gridgain-license.xml", IgniteYarnUtils
                            .setupFile(new Path(props.licencePath()), fs, LocalResourceType.FILE));

                if (props.userLibs() != null)
                    resources.put("libs",
                            IgniteYarnUtils.setupFile(new Path(props.userLibs()), fs, LocalResourceType.FILE));

                ctx.setLocalResources(resources);

                ctx.setCommands(Collections.singletonList(
                        (props.licencePath() != null ? "cp gridgain-license.xml ./ignite/*/ || true && " : "")
                                + "cp -r ./libs/* ./ignite/*/libs/ || true && " + "./ignite/*/bin/ignite.sh "
                                + "./ignite-config.xml" + " -J-Xmx" + ((int) props.memoryPerNode()) + "m"
                                + " -J-Xms" + ((int) props.memoryPerNode()) + "m"
                                + IgniteYarnUtils.YARN_LOG_OUT));

                log.log(Level.INFO, "Launching container: {0}.", c.getId());

                nmClient.startContainer(c, ctx);

                containers.put(c.getId(), new IgniteContainer(c.getId(), c.getNodeId(),
                        c.getResource().getVirtualCores(), c.getResource().getMemory()));
            } catch (Exception ex) {
                log.log(Level.WARNING, "Error launching container " + c.getId(), ex);
            }/* w w  w . j  a  v a 2 s  .  c  o m*/
        } else
            rmClient.releaseAssignedContainer(c.getId());
    }
}

From source file:org.apache.ignite.yarn.ApplicationMaster.java

License:Apache License

/**
 * @throws IOException//from www .  j av  a2s.c  om
 */
public void init() throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials cred = UserGroupInformation.getCurrentUser().getCredentials();

        allTokens = IgniteYarnUtils.createTokenBuffer(cred);
    }

    fs = FileSystem.get(conf);

    nmClient = NMClient.createNMClient();

    nmClient.init(conf);
    nmClient.start();

    // Create async application master.
    rmClient = AMRMClientAsync.createAMRMClientAsync(300, this);

    rmClient.init(conf);

    rmClient.start();

    if (props.igniteCfg() == null || props.igniteCfg().isEmpty()) {
        InputStream input = Thread.currentThread().getContextClassLoader()
                .getResourceAsStream(IgniteYarnUtils.DEFAULT_IGNITE_CONFIG);

        cfgPath = new Path(props.igniteWorkDir() + File.separator + IgniteYarnUtils.DEFAULT_IGNITE_CONFIG);

        // Create file. Override by default.
        FSDataOutputStream outputStream = fs.create(cfgPath, true);

        IOUtils.copy(input, outputStream);

        IOUtils.closeQuietly(input);

        IOUtils.closeQuietly(outputStream);
    } else
        cfgPath = new Path(props.igniteCfg());
}