Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:com.amintor.hdfs.client.kerberizedhdfsclient.KerberizedHDFSClient.java

/**
 * @param args the command line arguments
 *//*from   www.  ja  v a  2 s .c  o m*/
public static void main(String[] args) {

    try {
        Configuration conf = new Configuration();
        conf.addResource(new FileInputStream(HDFS_SITE_LOCATION));
        conf.addResource(new FileInputStream(CORE_SITE_LOCATION));
        String authType = conf.get("hadoop.security.authentication");
        System.out.println("Authentication Type:" + authType);
        if (authType.trim().equalsIgnoreCase("kerberos")) {
            // Login through UGI keytab
            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab("vijay", "/Users/vsingh/Software/vijay.keytab");
            FileSystem hdFS = FileSystem.get(conf);
            FileStatus[] listStatus = hdFS.listStatus(new Path(args[0]));
            for (FileStatus statusFile : listStatus) {
                System.out.print("Replication:" + statusFile.getReplication() + "\t");
                System.out.print("Owner:" + statusFile.getOwner() + "\t");
                System.out.print("Group:" + statusFile.getGroup() + "\t");
                System.out.println("Path:" + statusFile.getPath() + "\t");
            }

        }
    } catch (IOException ex) {
        Logger.getLogger(KerberizedHDFSClient.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.blackberry.logtools.LogTools.java

License:Apache License

public void runPigLocal(Map<String, String> params, String out, String tmp, final boolean quiet,
        final boolean silent, Configuration conf, String queue_name, String additional_jars, File pig_tmp,
        ArrayList<String> D_options, String PIG_DIR, FileSystem fs)
        throws IllegalArgumentException, IOException {
    //Create temp file on local to hold data to sort
    final File local_tmp = Files.createTempDir();
    local_tmp.deleteOnExit();//from   ww w. ja v a 2  s  . c o  m

    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                logConsole(quiet, silent, warn, "Deleting tmp files in local tmp");
                delete(local_tmp);
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }));

    //Set input parameter for pig job
    params.put("tmpdir", local_tmp.toString() + "/" + tmp);

    //Check for an out of '-', meaning write to stdout
    String pigout;
    if (out.equals("-")) {
        params.put("out", local_tmp + "/" + tmp + "/final");
        pigout = local_tmp + "/" + tmp + "/final";
    } else {
        params.put("out", local_tmp + "/" + StringEscapeUtils.escapeJava(out));
        pigout = StringEscapeUtils.escapeJava(out);
    }

    //Copy the tmp folder from HDFS to the local tmp directory, and delete the remote folder
    fs.copyToLocalFile(true, new Path(tmp), new Path(local_tmp + "/" + tmp));

    try {
        logConsole(quiet, silent, info, "Running PIG Command");
        conf.set("mapred.job.queue.name", queue_name);
        conf.set("pig.additional.jars", additional_jars);
        conf.set("pig.exec.reducers.bytes.per.reducer", Integer.toString(100 * 1000 * 1000));
        conf.set("pig.logfile", pig_tmp.toString());
        conf.set("hadoopversion", "23");
        //PIG temp directory set to be able to delete all temp files/directories
        conf.set("pig.temp.dir", local_tmp.getAbsolutePath());

        //Setting output separator for logdriver
        String DEFAULT_OUTPUT_SEPARATOR = "\t";
        Charset UTF_8 = Charset.forName("UTF-8");
        String outputSeparator = conf.get("logdriver.output.field.separator", DEFAULT_OUTPUT_SEPARATOR);
        byte[] bytes = outputSeparator.getBytes(UTF_8);
        if (bytes.length != 1) {
            System.err.println(
                    ";******************** The output separator must be a single byte in UTF-8. ******************** ");
            System.exit(1);
        }
        conf.set("logdriver.output.field.separator", Byte.toString(bytes[0]));

        dOpts(D_options, silent, out, conf);

        PigServer pigServer = new PigServer(ExecType.LOCAL, conf);
        UserGroupInformation.setConfiguration(new Configuration(false));
        pigServer.registerScript(PIG_DIR + "/formatAndSortLocal.pg", params);
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(1);
    }

    logConsole(quiet, silent, warn, "PIG Job Completed.");

    if (out.equals("-")) {
        System.out.println(";#################### DATA RESULTS ####################");
        try {
            File results = new File(pigout);
            String[] resultList = results.list();

            //Find the files in the directory, open and printout results
            for (int i = 0; i < resultList.length; i++) {
                if (resultList[i].contains("part-") && !resultList[i].contains(".crc")) {
                    BufferedReader br = new BufferedReader(
                            new FileReader(new File(pigout + "/" + resultList[i])));
                    String line;
                    line = br.readLine();
                    while (line != null) {
                        System.out.println(line);
                        line = br.readLine();
                    }
                    br.close();
                }
            }
            System.out.println(";#################### END OF RESULTS ####################");
        } catch (IOException e) {
            e.printStackTrace();
            System.exit(1);
        }
    } else {
        fs.copyFromLocalFile(new Path(local_tmp + "/" + StringEscapeUtils.escapeJava(out)), new Path(pigout));
        System.out.println(
                ";#################### Done. Search results are in " + pigout + " ####################");
    }
}

From source file:com.cip.crane.agent.utils.TaskHelper.java

License:Open Source License

public TaskHelper() {
    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");
    conf.set("dfs.namenode.kerberos.principal", AgentEnvValue.getHdfsValue(AgentEnvValue.NAMENODE_PRINCIPAL));
    conf.set("dp.hdfsclinet.kerberos.principal", AgentEnvValue.getHdfsValue(AgentEnvValue.KERBEROS_PRINCIPAL));
    conf.set("dp.hdfsclinet.keytab.file", AgentEnvValue.getValue(AgentEnvValue.AGENT_ROOT_PATH)
            + AgentEnvValue.getHdfsValue(AgentEnvValue.KEYTAB_FILE));
    conf.set("fs.hdfs.impl.disable.cache", "true");
    UserGroupInformation.setConfiguration(conf);
    try {//from   ww  w . j a  v  a 2s.  com
        SecurityUtil.login(conf, "dp.hdfsclinet.keytab.file", "dp.hdfsclinet.kerberos.principal");
    } catch (IOException e) {
        //throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:com.cloudera.lib.service.hadoop.HadoopService.java

License:Open Source License

@Override
protected void init() throws ServiceException {
    LOG.info("Using Hadoop JARs version [{}]", VersionInfo.getVersion());
    String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
    if (security.equals("kerberos")) {
        String defaultName = getServer().getName();
        String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
        keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
        if (keytab.length() == 0) {
            throw new ServiceException(HadoopException.ERROR.H01, KERBEROS_KEYTAB);
        }/*  w  w  w. j a v  a2 s.  c  om*/
        String principal = defaultName + "/localhost@LOCALHOST";
        principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
        if (principal.length() == 0) {
            throw new ServiceException(HadoopException.ERROR.H01, KERBEROS_PRINCIPAL);
        }
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
        } catch (IOException ex) {
            throw new ServiceException(HadoopException.ERROR.H02, ex.getMessage(), ex);
        }
        LOG.info("Using Hadoop Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
    } else if (security.equals("simple")) {
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "simple");
        UserGroupInformation.setConfiguration(conf);
        LOG.info("Using Hadoop simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
    } else {
        throw new ServiceException(HadoopException.ERROR.H09, security);
    }

    serviceHadoopConf = new XConfiguration();
    for (Map.Entry entry : getServiceConfig()) {
        String name = (String) entry.getKey();
        if (name.startsWith(HADOOP_CONF_PREFIX)) {
            name = name.substring(HADOOP_CONF_PREFIX.length());
            String value = (String) entry.getValue();
            serviceHadoopConf.set(name, value);

        }
    }
    setRequiredServiceHadoopConf(serviceHadoopConf);

    LOG.debug("Hadoop default configuration:");
    for (Map.Entry entry : serviceHadoopConf) {
        LOG.debug("  {} = {}", entry.getKey(), entry.getValue());
    }

    jobTrackerWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(JOB_TRACKER_WHITELIST));
    nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}

From source file:com.cloudera.oryx.common.servcomp.Store.java

License:Open Source License

private Store() {
    try {/*from ww w.  j  a v a 2  s . com*/
        Configuration conf = OryxConfiguration.get();
        if (Namespaces.isLocalData()) {
            fs = FileSystem.getLocal(conf);
        } else {
            UserGroupInformation.setConfiguration(conf);
            fs = FileSystem.get(URI.create(Namespaces.get().getPrefix()), conf);
        }
    } catch (IOException ioe) {
        log.error("Unable to configure Store", ioe);
        throw new IllegalStateException(ioe);
    }
}

From source file:com.collective.celos.ci.config.deploy.CelosCiContext.java

License:Apache License

private Configuration setupConfiguration(String username, CelosCiTarget target) throws Exception {
    JScpWorker jscpWorker = new JScpWorker(username);
    Configuration conf = new Configuration();

    conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
    conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());

    conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToHdfsSite()).getContent().getInputStream());
    conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToCoreSite()).getContent().getInputStream());

    UserGroupInformation.setConfiguration(conf);

    return conf;/*from  ww w. j a  va 2s .  co m*/
}

From source file:com.dp.bigdata.taurus.agent.utils.TaskHelper.java

License:Open Source License

public TaskHelper() {
    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");
    conf.set("dfs.namenode.kerberos.principal", AgentEnvValue.getHdfsValue(AgentEnvValue.NAMENODE_PRINCIPAL));
    conf.set("dp.hdfsclinet.kerberos.principal", AgentEnvValue.getHdfsValue(AgentEnvValue.KERBEROS_PRINCIPAL));
    conf.set("dp.hdfsclinet.keytab.file", AgentEnvValue.getValue(AgentEnvValue.AGENT_ROOT_PATH)
            + AgentEnvValue.getHdfsValue(AgentEnvValue.KEYTAB_FILE));
    conf.set("fs.hdfs.impl.disable.cache", "true");
    UserGroupInformation.setConfiguration(conf);
    try {//w ww  .j ava  2  s.  co m
        SecurityUtil.login(conf, "dp.hdfsclinet.keytab.file", "dp.hdfsclinet.kerberos.principal");
    } catch (IOException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:com.flipkart.foxtrot.core.datastore.impl.hbase.HBaseUtil.java

License:Apache License

public static Configuration create(final HbaseConfig hbaseConfig) throws IOException {
    Configuration configuration = HBaseConfiguration.create();

    if (isValidFile(hbaseConfig.getCoreSite())) {
        configuration.addResource(new File(hbaseConfig.getCoreSite()).toURI().toURL());
    }/* ww w.  ja  va2 s  . c o  m*/

    if (isValidFile(hbaseConfig.getHdfsSite())) {
        configuration.addResource(new File(hbaseConfig.getHdfsSite()).toURI().toURL());
    }

    if (isValidFile(hbaseConfig.getHbasePolicy())) {
        configuration.addResource(new File(hbaseConfig.getHbasePolicy()).toURI().toURL());
    }

    if (isValidFile(hbaseConfig.getHbaseSite())) {
        configuration.addResource(new File(hbaseConfig.getHbaseSite()).toURI().toURL());
    }

    if (hbaseConfig.isSecure() && isValidFile(hbaseConfig.getKeytabFileName())) {
        configuration.set("hbase.master.kerberos.principal", hbaseConfig.getAuthString());
        configuration.set("hadoop.kerberos.kinit.command", hbaseConfig.getKinitPath());
        UserGroupInformation.setConfiguration(configuration);
        System.setProperty("java.security.krb5.conf", hbaseConfig.getKerberosConfigFile());
        UserGroupInformation.loginUserFromKeytab(hbaseConfig.getAuthString(), hbaseConfig.getKeytabFileName());
        logger.info("Logged into Hbase with User: " + UserGroupInformation.getLoginUser());
    }

    if (null != hbaseConfig.getHbaseZookeeperQuorum()) {
        configuration.set("hbase.zookeeper.quorum", hbaseConfig.getHbaseZookeeperQuorum());
    }

    if (null != hbaseConfig.getHbaseZookeeperClientPort()) {
        configuration.setInt("hbase.zookeeper.property.clientPort", hbaseConfig.getHbaseZookeeperClientPort());
    }
    return configuration;
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.java

License:Apache License

private FileSystem createFileSystem(Configuration hconf, String configFile, boolean forceNew)
        throws IOException {
    FileSystem filesystem = null;

    // load hdfs client config file if specified. The path is on local file
    // system/*from  w w w. j  av  a  2s.c o m*/
    if (configFile != null) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Adding resource config file to hdfs configuration:" + configFile, logPrefix);
        }
        hconf.addResource(new Path(configFile));

        if (!new File(configFile).exists()) {
            logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT,
                    configFile));
        }
    }

    // This setting disables shutdown hook for file system object. Shutdown
    // hook may cause FS object to close before the cache or store and
    // unpredictable behavior. This setting is provided for GFXD like server
    // use cases where FS close is managed by a server. This setting is not
    // supported by old versions of hadoop, HADOOP-4829
    hconf.setBoolean("fs.automatic.close", false);

    // Hadoop has a configuration parameter io.serializations that is a list of serialization 
    // classes which can be used for obtaining serializers and deserializers. This parameter 
    // by default contains avro classes. When a sequence file is created, it calls 
    // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
    // reflection of all the classes that were part of io.serializations. But since, there is 
    // no avro class available it throws an exception. 
    // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
    // that are important to us. 
    hconf.setStrings("io.serializations",
            new String[] { "org.apache.hadoop.io.serializer.WritableSerialization" });
    // create writer

    SchemaMetrics.configureGlobally(hconf);

    String nameNodeURL = null;
    if ((nameNodeURL = getNameNodeURL()) == null) {
        nameNodeURL = hconf.get("fs.default.name");
    }

    URI namenodeURI = URI.create(nameNodeURL);

    //if (! GemFireCacheImpl.getExisting().isHadoopGfxdLonerMode()) {
    String authType = hconf.get("hadoop.security.authentication");

    //The following code handles Gemfire XD with secure HDFS
    //A static set is used to cache all known secure HDFS NameNode urls.
    UserGroupInformation.setConfiguration(hconf);

    //Compare authentication method ignoring case to make GFXD future version complaint
    //At least version 2.0.2 starts complaining if the string "kerberos" is not in all small case.
    //However it seems current version of hadoop accept the authType in any case
    if (authType.equalsIgnoreCase("kerberos")) {

        String principal = hconf.get(HoplogConfig.KERBEROS_PRINCIPAL);
        String keyTab = hconf.get(HoplogConfig.KERBEROS_KEYTAB_FILE);

        if (!PERFORM_SECURE_HDFS_CHECK) {
            if (logger.isDebugEnabled())
                logger.debug("{}Ignore secure hdfs check", logPrefix);
        } else {
            if (!secureNameNodes.contains(nameNodeURL)) {
                if (logger.isDebugEnabled())
                    logger.debug("{}Executing secure hdfs check", logPrefix);
                try {
                    filesystem = FileSystem.newInstance(namenodeURI, hconf);
                    //Make sure no IOExceptions are generated when accessing insecure HDFS. 
                    filesystem.listFiles(new Path("/"), false);
                    throw new HDFSIOException(
                            "Gemfire XD HDFS client and HDFS cluster security levels do not match. The configured HDFS Namenode is not secured.");
                } catch (IOException ex) {
                    secureNameNodes.add(nameNodeURL);
                } finally {
                    //Close filesystem to avoid resource leak
                    if (filesystem != null) {
                        closeFileSystemIgnoreError(filesystem);
                    }
                }
            }
        }

        // check to ensure the namenode principal is defined
        String nameNodePrincipal = hconf.get("dfs.namenode.kerberos.principal");
        if (nameNodePrincipal == null) {
            throw new IOException(LocalizedStrings.GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF.toLocalizedString());
        }

        // ok, the user specified a gfxd principal so we will try to login
        if (principal != null) {
            //If NameNode principal is the same as Gemfire XD principal, there is a 
            //potential security hole
            String regex = "[/@]";
            if (nameNodePrincipal != null) {
                String HDFSUser = nameNodePrincipal.split(regex)[0];
                String GFXDUser = principal.split(regex)[0];
                if (HDFSUser.equals(GFXDUser)) {
                    logger.warn(
                            LocalizedMessage.create(LocalizedStrings.HDFS_USER_IS_SAME_AS_GF_USER, GFXDUser));
                }
            }

            // a keytab must exist if the user specifies a principal
            if (keyTab == null) {
                throw new IOException(LocalizedStrings.GF_KERBEROS_KEYTAB_UNDEF.toLocalizedString());
            }

            // the keytab must exist as well
            File f = new File(keyTab);
            if (!f.exists()) {
                throw new FileNotFoundException(
                        LocalizedStrings.GF_KERBEROS_KEYTAB_FILE_ABSENT.toLocalizedString(f.getAbsolutePath()));
            }

            //Authenticate Gemfire XD principal to Kerberos KDC using Gemfire XD keytab file
            String principalWithValidHost = SecurityUtil.getServerPrincipal(principal, "");
            UserGroupInformation.loginUserFromKeytab(principalWithValidHost, keyTab);
        } else {
            logger.warn(LocalizedMessage.create(LocalizedStrings.GF_KERBEROS_PRINCIPAL_UNDEF));
        }
    }
    //}

    filesystem = getFileSystemFactory().create(namenodeURI, hconf, forceNew);

    if (logger.isDebugEnabled()) {
        logger.debug("{}Initialized FileSystem linked to " + filesystem.getUri() + " " + filesystem.hashCode(),
                logPrefix);
    }
    return filesystem;
}

From source file:com.github.sakserv.minicluster.impl.KdcLocalClusterHBaseIntegrationTest.java

License:Apache License

@BeforeClass
public static void setUp() throws Exception {

    //System.setProperty("sun.security.krb5.debug", "true");

    // Force clean
    FileUtils.deleteFolder(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY));
    FileUtils.deleteFolder(propertyParser.getProperty(ConfigVars.HBASE_ROOT_DIR_KEY));
    FileUtils.deleteFolder(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY));

    // KDC//w w  w  .j av  a  2s  . c  o m
    kdcLocalCluster = new KdcLocalCluster.Builder()
            .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY)))
            .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY))
            .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY))
            .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY))
            .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY))
            .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(","))
            .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY))
            .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY))
            .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT))
            .setMaxTicketLifetime(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY)))
            .setMaxRenewableLifetime(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME)))
            .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG))).build();
    kdcLocalCluster.start();

    Configuration baseConf = kdcLocalCluster.getBaseConf();

    // Zookeeper
    Jaas jaas = new Jaas().addServiceEntry("Server", kdcLocalCluster.getKrbPrincipal("zookeeper"),
            kdcLocalCluster.getKeytabForPrincipal("zookeeper"), "zookeeper");
    javax.security.auth.login.Configuration.setConfiguration(jaas);

    Map<String, Object> properties = new HashMap<>();
    properties.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
    properties.put("requireClientAuthScheme", "sasl");
    properties.put("sasl.serverconfig", "Server");
    properties.put("kerberos.removeHostFromPrincipal", "true");
    properties.put("kerberos.removeRealmFromPrincipal", "true");

    zookeeperLocalCluster = new ZookeeperLocalCluster.Builder()
            .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)))
            .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY))
            .setZookeeperConnectionString(
                    propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY))
            .setCustomProperties(properties).build();
    zookeeperLocalCluster.start();

    // HBase
    UserGroupInformation.setConfiguration(baseConf);

    System.setProperty("zookeeper.sasl.client", "true");
    System.setProperty("zookeeper.sasl.clientconfig", "Client");
    javax.security.auth.login.Configuration.setConfiguration(new Jaas().addEntry("Client",
            kdcLocalCluster.getKrbPrincipalWithRealm("hbase"), kdcLocalCluster.getKeytabForPrincipal("hbase")));

    try (CuratorFramework client = CuratorFrameworkFactory.newClient(
            zookeeperLocalCluster.getZookeeperConnectionString(), new ExponentialBackoffRetry(1000, 3))) {
        client.start();

        List<ACL> perms = new ArrayList<>();
        perms.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.AUTH_IDS));
        perms.add(new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE));

        client.create().withMode(CreateMode.PERSISTENT).withACL(perms)
                .forPath(propertyParser.getProperty(ConfigVars.HBASE_ZNODE_PARENT_KEY));
    }

    Jaas jaasHbaseClient = new Jaas().addEntry("Client", kdcLocalCluster.getKrbPrincipalWithRealm("hbase"),
            kdcLocalCluster.getKeytabForPrincipal("hbase"));
    javax.security.auth.login.Configuration.setConfiguration(jaasHbaseClient);
    File jaasHbaseClientFile = new File(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY),
            "hbase-client.jaas");
    org.apache.commons.io.FileUtils.writeStringToFile(jaasHbaseClientFile, jaasHbaseClient.toFile());

    Configuration hbaseConfig = HBaseConfiguration.create();
    hbaseConfig.addResource(baseConf);

    hbaseLocalCluster = new HbaseLocalCluster.Builder()
            .setHbaseMasterPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HBASE_MASTER_PORT_KEY)))
            .setHbaseMasterInfoPort(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.HBASE_MASTER_INFO_PORT_KEY)))
            .setNumRegionServers(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.HBASE_NUM_REGION_SERVERS_KEY)))
            .setHbaseRootDir(propertyParser.getProperty(ConfigVars.HBASE_ROOT_DIR_KEY))
            .setZookeeperPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)))
            .setZookeeperConnectionString(
                    propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY))
            .setZookeeperZnodeParent(propertyParser.getProperty(ConfigVars.HBASE_ZNODE_PARENT_KEY))
            .setHbaseWalReplicationEnabled(Boolean
                    .parseBoolean(propertyParser.getProperty(ConfigVars.HBASE_WAL_REPLICATION_ENABLED_KEY)))
            .setHbaseConfiguration(hbaseConfig).build();
    hbaseLocalCluster.start();
}