Example usage for io.netty.util.internal ThreadLocalRandom setInitialSeedUniquifier

List of usage examples for io.netty.util.internal ThreadLocalRandom setInitialSeedUniquifier

Introduction

In this page you can find the example usage for io.netty.util.internal ThreadLocalRandom setInitialSeedUniquifier.

Prototype

public static void setInitialSeedUniquifier(long initialSeedUniquifier) 

Source Link

Usage

From source file:org.apache.solr.cloud.hdfs.HdfsTestUtil.java

License:Apache License

public static MiniDFSCluster setupClass(String dir, boolean safeModeTesting, boolean haTesting)
        throws Exception {
    LuceneTestCase.assumeFalse("HDFS tests were disabled by -Dtests.disableHdfs",
            Boolean.parseBoolean(System.getProperty("tests.disableHdfs", "false")));

    savedLocale = Locale.getDefault();
    // TODO: we HACK around HADOOP-9643
    Locale.setDefault(Locale.ENGLISH);

    if (!HA_TESTING_ENABLED)
        haTesting = false;//  ww w  .  ja v a  2s . c  o m

    // keep netty from using secure random on startup: SOLR-10098
    ThreadLocalRandom.setInitialSeedUniquifier(1L);

    int dataNodes = Integer.getInteger("tests.hdfs.numdatanodes", 2);

    Configuration conf = new Configuration();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions.enabled", "false");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hdfs.minidfs.basedir", dir + File.separator + "hdfsBaseDir");
    conf.set("dfs.namenode.name.dir", dir + File.separator + "nameNodeNameDir");
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);

    System.setProperty("test.build.data", dir + File.separator + "hdfs" + File.separator + "build");
    System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");
    System.setProperty("solr.lock.type", DirectoryFactory.LOCK_TYPE_HDFS);

    System.setProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean()));

    final MiniDFSCluster dfsCluster;

    if (!haTesting) {
        dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
        System.setProperty("solr.hdfs.home", getDataDir(dfsCluster, "solr_hdfs_home"));
    } else {

        dfsCluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology())
                .numDataNodes(dataNodes).build();

        Configuration haConfig = getClientConfiguration(dfsCluster);

        HdfsUtil.TEST_CONF = haConfig;
        System.setProperty("solr.hdfs.home", getDataDir(dfsCluster, "solr_hdfs_home"));
    }

    dfsCluster.waitActive();

    if (haTesting)
        dfsCluster.transitionToActive(0);

    int rndMode = LuceneTestCase.random().nextInt(3);
    if (safeModeTesting && rndMode == 1) {
        NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false);

        int rnd = LuceneTestCase.random().nextInt(10000);
        Timer timer = new Timer();
        timers.put(dfsCluster, timer);
        timer.schedule(new TimerTask() {

            @Override
            public void run() {
                NameNodeAdapter.leaveSafeMode(dfsCluster.getNameNode());
            }
        }, rnd);

    } else if (haTesting && rndMode == 2) {
        int rnd = LuceneTestCase.random().nextInt(30000);
        Timer timer = new Timer();
        timers.put(dfsCluster, timer);
        timer.schedule(new TimerTask() {

            @Override
            public void run() {
                // TODO: randomly transition to standby
                //          try {
                //            dfsCluster.transitionToStandby(0);
                //            dfsCluster.transitionToActive(1);
                //          } catch (IOException e) {
                //            throw new RuntimeException();
                //          }

            }
        }, rnd);
    } else {

        // TODO: we could do much better at testing this
        // force a lease recovery by creating a tlog file and not closing it
        URI uri = dfsCluster.getURI();
        Path hdfsDirPath = new Path(
                uri.toString() + "/solr/collection1/core_node1/data/tlog/tlog.0000000000000000000");
        // tran log already being created testing
        badTlogOutStreamFs = FileSystem.get(hdfsDirPath.toUri(), conf);
        badTlogOutStream = badTlogOutStreamFs.create(hdfsDirPath);
    }

    SolrTestCaseJ4.useFactory("org.apache.solr.core.HdfsDirectoryFactory");

    return dfsCluster;
}