Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_REPLICATION_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_REPLICATION_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_REPLICATION_KEY.

Prototype

String DFS_REPLICATION_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_REPLICATION_KEY.

Click Source Link

Usage

From source file:com.bigstep.datalake.DLFileSystem.java

License:Apache License

@Override
public short getDefaultReplication() {
    return (short) getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT);
}

From source file:com.kylinolap.job.hadoop.cube.CubeHFileJob.java

License:Apache License

public int run(String[] args) throws Exception {
    Options options = new Options();

    try {/*from  ww w  . j  av a2 s.com*/
        options.addOption(OPTION_JOB_NAME);
        options.addOption(OPTION_CUBE_NAME);
        options.addOption(OPTION_INPUT_PATH);
        options.addOption(OPTION_OUTPUT_PATH);
        options.addOption(OPTION_HTABLE_NAME);
        parseOptions(options, args);

        Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH));
        String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();

        CubeManager cubeMgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());

        CubeInstance cube = cubeMgr.getCube(cubeName);
        job = Job.getInstance(getConf(), getOptionValue(OPTION_JOB_NAME));

        File JarFile = new File(KylinConfig.getInstanceFromEnv().getKylinJobJarPath());
        if (JarFile.exists()) {
            job.setJar(KylinConfig.getInstanceFromEnv().getKylinJobJarPath());
        } else {
            job.setJarByClass(this.getClass());
        }

        addInputDirs(getOptionValue(OPTION_INPUT_PATH), job);
        FileOutputFormat.setOutputPath(job, output);

        job.setInputFormatClass(SequenceFileInputFormat.class);
        job.setMapperClass(CubeHFileMapper.class);
        job.setReducerClass(KeyValueSortReducer.class);

        // set job configuration
        job.getConfiguration().set(BatchConstants.CFG_CUBE_NAME, cubeName);
        Configuration conf = HBaseConfiguration.create(getConf());
        // add metadata to distributed cache
        attachKylinPropsAndMetadata(cube, job.getConfiguration());

        String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
        HTable htable = new HTable(conf, tableName);

        //Automatic config !
        HFileOutputFormat.configureIncrementalLoad(job, htable);

        // set block replication to 3 for hfiles
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "3");

        this.deletePath(job.getConfiguration(), output);

        return waitForCompletion(job);
    } catch (Exception e) {
        printUsage(options);
        log.error(e.getLocalizedMessage(), e);
        return 2;
    }
}

From source file:io.hops.erasure_coding.BasicClusterTestCase.java

License:Apache License

@Override
public void setUp() throws Exception {
    cluster = new MiniDFSCluster.Builder(getConf())
            .numDataNodes(//ww  w .  j a  va  2 s . c  om
                    getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT))
            .build();
    cluster.waitActive();

    dfs = cluster.getFileSystem();
}

From source file:io.hops.erasure_coding.TestLocalEncodingManagerImpl.java

License:Apache License

public TestLocalEncodingManagerImpl() {
    conf = new HdfsConfiguration();
    conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_TEST_BLOCK_SIZE);
    conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    numDatanode = 16;//from  w ww.jav  a2s.c o  m
}

From source file:net.arp7.HdfsPerfTest.WriteFileParameters.java

License:Apache License

/**
 * Initialize some write parameters from the configuration.
 *
 * @param conf/*ww  w. j  a v  a  2 s .  c  o m*/
 */
private void initDefaultsFromConfiguration(Configuration conf) {
    blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);

    replication = conf.getLong(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT);
}

From source file:org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.java

License:Apache License

/**
 * @param config/*  w  w  w . ja  v  a 2  s. c  om*/
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getWalogDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile, Maps.filterEntries(config.getSiteConfig(),
            v -> ClientConfiguration.ClientProperty.getPropertyByKey(v.getKey()) != null));

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }

    // disable audit logging for mini....
    InputStream auditStream = this.getClass().getResourceAsStream("/auditLog.xml");

    if (auditStream != null) {
        FileUtils.copyInputStreamToFile(auditStream, new File(config.getConfDir(), "auditLog.xml"));
    }

    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.apache.accumulo.minicluster.MiniAccumuloCluster.java

License:Apache License

/**
 * @param config//from   w  w w  .  j  a  v  a2s  .  c  om
 *          initial configuration
 */
public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException {

    this.config = config.initialize();

    config.getConfDir().mkdirs();
    config.getAccumuloDir().mkdirs();
    config.getZooKeeperDir().mkdirs();
    config.getLogDir().mkdirs();
    config.getWalogDir().mkdirs();
    config.getLibDir().mkdirs();

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        nn.mkdirs();
        File dn = new File(config.getAccumuloDir(), "dn");
        dn.mkdirs();
        File dfs = new File(config.getAccumuloDir(), "dfs");
        dfs.mkdirs();
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster(conf, 1, true, null);
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else {
        dfsUri = "file://";
    }

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    FileWriter fileWriter = new FileWriter(siteFile);
    fileWriter.append("<configuration>\n");

    for (Entry<String, String> entry : config.getSiteConfig().entrySet())
        fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue()
                + "</value></property>\n");
    fileWriter.append("</configuration>\n");
    fileWriter.close();

    zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
    fileWriter = new FileWriter(zooCfgFile);

    // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
    Properties zooCfg = new Properties();
    zooCfg.setProperty("tickTime", "2000");
    zooCfg.setProperty("initLimit", "10");
    zooCfg.setProperty("syncLimit", "5");
    zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
    zooCfg.setProperty("maxClientCnxns", "1000");
    zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
    zooCfg.store(fileWriter, null);

    fileWriter.close();

    File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
    nativeMap.mkdirs();
    File testRoot = new File(
            new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap")
                    .getAbsolutePath());

    if (testRoot.exists()) {
        for (String file : testRoot.list()) {
            File src = new File(testRoot, file);
            if (src.isFile() && file.startsWith("libNativeMap"))
                FileUtils.copyFile(src, new File(nativeMap, file));
        }
    }
}

From source file:org.apache.accumulo.minicluster.MiniAccumuloClusterTest.java

License:Apache License

@Test
public void checkDFSConstants() {
    // check for unexpected changes in static constants because these will be inlined
    // and we won't otherwise know that they won't work on a particular version
    assertEquals("dfs.namenode.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
    assertEquals("dfs.datanode.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
    assertEquals("dfs.replication", DFSConfigKeys.DFS_REPLICATION_KEY);
}

From source file:org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.java

License:Apache License

/**
 * @param config/*from ww w. jav  a 2  s . c  om*/
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = config.getHadoopConfiguration().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile,
            Maps.filterEntries(config.getSiteConfig(),
                    v -> org.apache.accumulo.core.client.ClientConfiguration.ClientProperty
                            .getPropertyByKey(v.getKey()) != null));

    Map<String, String> clientProps = config.getClientProps();
    clientProps.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), config.getZooKeepers());
    clientProps.put(ClientProperty.INSTANCE_NAME.getKey(), config.getInstanceName());
    if (!clientProps.containsKey(ClientProperty.AUTH_TYPE.getKey())) {
        clientProps.put(ClientProperty.AUTH_TYPE.getKey(), "password");
        clientProps.put(ClientProperty.AUTH_PRINCIPAL.getKey(), config.getRootUserName());
        clientProps.put(ClientProperty.AUTH_TOKEN.getKey(), config.getRootPassword());
    }

    File clientPropsFile = config.getClientPropsFile();
    writeConfigProperties(clientPropsFile, clientProps);

    File siteFile = new File(config.getConfDir(), "accumulo.properties");
    writeConfigProperties(siteFile, config.getSiteConfig());
    siteConfig = new SiteConfiguration(siteFile);

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly
        // escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }
    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.apache.kylin.job.hadoop.cube.CubeHFileJob.java

License:Apache License

public int run(String[] args) throws Exception {
    Options options = new Options();

    try {//from   w w w.ja  v a2  s .  c  om
        options.addOption(OPTION_JOB_NAME);
        options.addOption(OPTION_CUBE_NAME);
        options.addOption(OPTION_INPUT_PATH);
        options.addOption(OPTION_OUTPUT_PATH);
        options.addOption(OPTION_HTABLE_NAME);
        parseOptions(options, args);

        Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH));
        String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();

        CubeManager cubeMgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());

        CubeInstance cube = cubeMgr.getCube(cubeName);
        job = Job.getInstance(getConf(), getOptionValue(OPTION_JOB_NAME));

        setJobClasspath(job);

        addInputDirs(getOptionValue(OPTION_INPUT_PATH), job);
        FileOutputFormat.setOutputPath(job, output);

        job.setInputFormatClass(SequenceFileInputFormat.class);
        job.setMapperClass(CubeHFileMapper.class);
        job.setReducerClass(KeyValueSortReducer.class);

        // set job configuration
        job.getConfiguration().set(BatchConstants.CFG_CUBE_NAME, cubeName);
        Configuration conf = HBaseConfiguration.create(getConf());
        // add metadata to distributed cache
        attachKylinPropsAndMetadata(cube, job.getConfiguration());

        String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
        HTable htable = new HTable(conf, tableName);

        //Automatic config !
        HFileOutputFormat.configureIncrementalLoad(job, htable);

        // set block replication to 3 for hfiles
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "3");

        this.deletePath(job.getConfiguration(), output);

        return waitForCompletion(job);
    } catch (Exception e) {
        logger.error("error in CubeHFileJob", e);
        printUsage(options);
        throw e;
    }
}