Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_KEY.

Prototype

String DFS_DATANODE_DATA_DIR_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_KEY.

Click Source Link

Usage

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Modify the config and start up additional DataNodes. The info port for
 * DataNodes is guaranteed to use a free port.
 * //from   w  w w .j  a  va 2 s  .  c  o  m
 * Data nodes can run with the name node in the mini cluster or
 * a real name node. For example, running with a real name node is useful
 * when running simulated data nodes with a real name node.
 * If minicluster's name node is null assume that the conf has been
 * set with the right address:port of the name node.
 *
 * @param conf
 *            the base configuration to use in starting the DataNodes. This
 *            will be modified as necessary.
 * @param numDataNodes
 *            Number of DataNodes to start; may be zero
 * @param manageDfsDirs
 *            if true, the data directories for DataNodes will be
 *            created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
 *            set in the conf
 * @param operation
 *            the operation with which to start the DataNodes. If null
 *            or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks
 *            array of strings indicating the rack that each DataNode is on
 * @param hosts
 *            array of strings indicating the hostnames for each DataNode
 * @param simulatedCapacities
 *            array of capacities of the simulated data nodes
 * @param setupHostsFile
 *            add new nodes to dfs hosts files
 * @param checkDataNodeAddrConfig
 *            if true, only set DataNode port addresses if not already set in config
 * @param checkDataNodeHostConfig
 *            if true, only set DataNode hostname key if not already set in config
 * @param dnConfOverlays
 *            An array of {@link Configuration} objects that will overlay the
 *            global MiniDFSCluster Configuration for the corresponding DataNode.
 * @throws IllegalStateException
 *             if NameNode has been shutdown
 */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType,
        boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts,
        long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig,
        boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    // Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageType);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: "
                    + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        Configuration newconf = new HdfsConfiguration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }

        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
                IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // Work around issue testing security where rapidly starting multiple
                // DataNodes using the same principal gets rejected by the KDC as a
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        // since the HDFS does things based on host|ip:port, we need to add the
        // mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}

From source file:common.DataNode.java

License:Apache License

static Collection<URI> getStorageDirs(Configuration conf) {
    Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
    return Util.stringCollectionAsURIs(dirNames);
}

From source file:common.DataNode.java

License:Apache License

/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.//ww w . ja va2  s.  com
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf) throws IOException {
    LocalFileSystem localFS = FileSystem.getLocal(conf);
    FsPermission permission = new FsPermission(conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
            DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
    ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);

    if (dirs.size() > 0) {
        return new DataNode(conf, dirs);
    }
    LOG.error("All directories in " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + " are invalid.");
    return null;
}

From source file:common.DataNode.java

License:Apache License

static ArrayList<File> getDataDirsFromURIs(Collection<URI> dataDirs, LocalFileSystem localFS,
        FsPermission permission) {//from w ww. j a v a  2s . c om
    ArrayList<File> dirs = new ArrayList<File>();
    for (URI dirURI : dataDirs) {
        if (!"file".equalsIgnoreCase(dirURI.getScheme())) {
            LOG.warn("Unsupported URI schema in " + dirURI + ". Ignoring ...");
            continue;
        }
        // drop any (illegal) authority in the URI for backwards compatibility
        File data = new File(dirURI.getPath());
        try {
            DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
            dirs.add(data);
        } catch (IOException e) {
            LOG.warn(
                    "Invalid directory in: " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + e.getMessage());
        }
    }
    return dirs;
}

From source file:org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.java

License:Apache License

/**
 * @param config/*w w w . j a v a 2s. c o  m*/
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getWalogDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile, Maps.filterEntries(config.getSiteConfig(),
            v -> ClientConfiguration.ClientProperty.getPropertyByKey(v.getKey()) != null));

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }

    // disable audit logging for mini....
    InputStream auditStream = this.getClass().getResourceAsStream("/auditLog.xml");

    if (auditStream != null) {
        FileUtils.copyInputStreamToFile(auditStream, new File(config.getConfDir(), "auditLog.xml"));
    }

    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.apache.accumulo.minicluster.MiniAccumuloCluster.java

License:Apache License

/**
 * @param config//from   w  w w .j  a  v  a2 s. c o  m
 *          initial configuration
 */
public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException {

    this.config = config.initialize();

    config.getConfDir().mkdirs();
    config.getAccumuloDir().mkdirs();
    config.getZooKeeperDir().mkdirs();
    config.getLogDir().mkdirs();
    config.getWalogDir().mkdirs();
    config.getLibDir().mkdirs();

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        nn.mkdirs();
        File dn = new File(config.getAccumuloDir(), "dn");
        dn.mkdirs();
        File dfs = new File(config.getAccumuloDir(), "dfs");
        dfs.mkdirs();
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster(conf, 1, true, null);
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else {
        dfsUri = "file://";
    }

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    FileWriter fileWriter = new FileWriter(siteFile);
    fileWriter.append("<configuration>\n");

    for (Entry<String, String> entry : config.getSiteConfig().entrySet())
        fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue()
                + "</value></property>\n");
    fileWriter.append("</configuration>\n");
    fileWriter.close();

    zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
    fileWriter = new FileWriter(zooCfgFile);

    // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
    Properties zooCfg = new Properties();
    zooCfg.setProperty("tickTime", "2000");
    zooCfg.setProperty("initLimit", "10");
    zooCfg.setProperty("syncLimit", "5");
    zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
    zooCfg.setProperty("maxClientCnxns", "1000");
    zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
    zooCfg.store(fileWriter, null);

    fileWriter.close();

    File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
    nativeMap.mkdirs();
    File testRoot = new File(
            new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap")
                    .getAbsolutePath());

    if (testRoot.exists()) {
        for (String file : testRoot.list()) {
            File src = new File(testRoot, file);
            if (src.isFile() && file.startsWith("libNativeMap"))
                FileUtils.copyFile(src, new File(nativeMap, file));
        }
    }
}

From source file:org.apache.accumulo.minicluster.MiniAccumuloClusterTest.java

License:Apache License

@Test
public void checkDFSConstants() {
    // check for unexpected changes in static constants because these will be inlined
    // and we won't otherwise know that they won't work on a particular version
    assertEquals("dfs.namenode.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
    assertEquals("dfs.datanode.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
    assertEquals("dfs.replication", DFSConfigKeys.DFS_REPLICATION_KEY);
}

From source file:org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.java

License:Apache License

/**
 * @param config//from  ww w  . java2 s .co  m
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = config.getHadoopConfiguration().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile,
            Maps.filterEntries(config.getSiteConfig(),
                    v -> org.apache.accumulo.core.client.ClientConfiguration.ClientProperty
                            .getPropertyByKey(v.getKey()) != null));

    Map<String, String> clientProps = config.getClientProps();
    clientProps.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), config.getZooKeepers());
    clientProps.put(ClientProperty.INSTANCE_NAME.getKey(), config.getInstanceName());
    if (!clientProps.containsKey(ClientProperty.AUTH_TYPE.getKey())) {
        clientProps.put(ClientProperty.AUTH_TYPE.getKey(), "password");
        clientProps.put(ClientProperty.AUTH_PRINCIPAL.getKey(), config.getRootUserName());
        clientProps.put(ClientProperty.AUTH_TOKEN.getKey(), config.getRootPassword());
    }

    File clientPropsFile = config.getClientPropsFile();
    writeConfigProperties(clientPropsFile, clientProps);

    File siteFile = new File(config.getConfDir(), "accumulo.properties");
    writeConfigProperties(siteFile, config.getSiteConfig());
    siteConfig = new SiteConfiguration(siteFile);

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly
        // escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }
    clusterControl = new MiniAccumuloClusterControl(this);
}