Example usage for org.apache.hadoop.util ExitUtil disableSystemExit

List of usage examples for org.apache.hadoop.util ExitUtil disableSystemExit

Introduction

In this page you can find the example usage for org.apache.hadoop.util ExitUtil disableSystemExit.

Prototype

public static void disableSystemExit() 

Source Link

Document

Disable the use of System.exit for testing.

Usage

From source file:cascading.platform.tez.Hadoop2TezPlatform.java

License:Open Source License

@Override
public synchronized void setUp() throws IOException {
    if (configuration != null)
        return;/* ww  w  . j a  va 2 s .  co  m*/

    if (!isUseCluster()) {
        // Current usage requirements:
        // 1. Clients need to set "tez.local.mode" to true when creating a TezClient instance. (For the examples this can be done via -Dtez.local.mode=true)
        // 2. fs.defaultFS must be set to "file:///"
        // 2.1 If running examples - this must be set in tez-site.xml (so that it's picked up by the client, as well as the conf instances used to configure the Inputs / Outputs).
        // 2.2 If using programatically (without a tez-site.xml present). All configuration instances used (to crate the client / configure Inputs / Outputs) - must have this property set.
        // 3. tez.runtime.optimize.local.fetch needs to be set to true (either via tez-site.xml or in all configurations used to create the job (similar to fs.defaultFS in step 2))
        // 4. tez.staging-dir must be set (either programatically or via tez-site.xml).
        // Until TEZ-1337 goes in - the staging-dir for the job is effectively the root of the filesystem (and where inputs are read from / written to if relative paths are used).

        LOG.info("not using cluster");
        configuration = new Configuration();

        configuration.setInt(FlowRuntimeProps.GATHER_PARTITIONS, getNumGatherPartitions());
        //      configuration.setInt( FlowRuntimeProps.GATHER_PARTITIONS, 1 ); // deadlocks if larger than 1

        configuration.set(TezConfiguration.TEZ_LOCAL_MODE, "true");
        configuration.set("fs.defaultFS", "file:///");
        configuration.set("tez.runtime.optimize.local.fetch", "true");

        // hack to prevent deadlocks where downstream processors are scheduled before upstream
        configuration.setInt("tez.am.inline.task.execution.max-tasks", 3); // testHashJoinMergeIntoHashJoinAccumulatedAccumulatedMerge fails if set to 2

        configuration.set(TezConfiguration.TEZ_IGNORE_LIB_URIS, "true"); // in local mode, use local classpath
        configuration.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);
        configuration.set(TezConfiguration.TEZ_GENERATE_DEBUG_ARTIFACTS, "true");

        configuration.set("tez.am.mode.session", "true"); // allows multiple TezClient instances to be used in a single jvm

        if (!Util.isEmpty(System.getProperty("hadoop.tmp.dir")))
            configuration.set("hadoop.tmp.dir", System.getProperty("hadoop.tmp.dir"));
        else
            configuration.set("hadoop.tmp.dir", "build/test/tmp");

        fileSys = FileSystem.get(configuration);
    } else {
        LOG.info("using cluster");

        if (Util.isEmpty(System.getProperty("hadoop.log.dir")))
            System.setProperty("hadoop.log.dir", "build/test/log");

        if (Util.isEmpty(System.getProperty("hadoop.tmp.dir")))
            System.setProperty("hadoop.tmp.dir", "build/test/tmp");

        new File(System.getProperty("hadoop.log.dir")).mkdirs(); // ignored
        new File(System.getProperty("hadoop.tmp.dir")).mkdirs(); // ignored

        Configuration defaultConf = new Configuration();

        defaultConf.setInt(FlowRuntimeProps.GATHER_PARTITIONS, getNumGatherPartitions());

        defaultConf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);

        //      defaultConf.set( TezConfiguration.TEZ_AM_LOG_LEVEL, "DEBUG" );
        //      defaultConf.set( TezConfiguration.TEZ_TASK_LOG_LEVEL, "DEBUG" );

        defaultConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
        defaultConf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
        defaultConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, System.getProperty("hadoop.tmp.dir"));

        miniDFSCluster = new MiniDFSCluster.Builder(defaultConf).numDataNodes(4).format(true).racks(null)
                .build();

        fileSys = miniDFSCluster.getFileSystem();

        Configuration tezConf = new Configuration(defaultConf);
        tezConf.set("fs.defaultFS", fileSys.getUri().toString()); // use HDFS
        tezConf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");

        // see MiniTezClusterWithTimeline as alternate
        miniTezCluster = new MiniTezCluster(getClass().getName(), 4, 1, 1); // todo: set to 4
        miniTezCluster.init(tezConf);
        miniTezCluster.start();

        configuration = miniTezCluster.getConfig();

        // stats won't work after completion unless ATS is used
        if (setTimelineStore(configuration)) // true if ats can be loaded and configured for this hadoop version
        {
            configuration.set(TezConfiguration.TEZ_HISTORY_LOGGING_SERVICE_CLASS,
                    ATSHistoryLoggingService.class.getName());
            configuration.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            configuration.set(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, "localhost:10200");
            configuration.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:8188");
            configuration.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS, "localhost:8190");

            yarnHistoryServer = new ApplicationHistoryServer();
            yarnHistoryServer.init(configuration);
            yarnHistoryServer.start();
        }
    }

    configuration.setInt(TezConfiguration.TEZ_AM_MAX_APP_ATTEMPTS, 1);
    configuration.setInt(TezConfiguration.TEZ_AM_TASK_MAX_FAILED_ATTEMPTS, 1);
    configuration.setInt(TezConfiguration.TEZ_AM_MAX_TASK_FAILURES_PER_NODE, 1);

    Map<Object, Object> globalProperties = getGlobalProperties();

    if (logger != null)
        globalProperties.put("log4j.logger", logger);

    FlowProps.setJobPollingInterval(globalProperties, 10); // should speed up tests

    Hadoop2TezPlanner.copyProperties(configuration, globalProperties); // copy any external properties

    Hadoop2TezPlanner.copyConfiguration(properties, configuration); // put all properties on the jobconf

    ExitUtil.disableSystemExit();

    //    forbidSystemExitCall();
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

private void initMiniDFSCluster(Configuration conf, int numDataNodes, StorageType storageType, boolean format,
        boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
        boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks,
        String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode,
        boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
        boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays)
        throws IOException {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();//from  w w w .j  ava2s. co  m

    synchronized (MiniDFSCluster.class) {
        instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;

    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class);

    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling checkpointing in the Standby node "
                + "since no HTTP ports have been specified.");
        conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling log-roll triggering in the "
                + "Standby node since no IPC ports have been specified.");
        conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    federation = nnTopology.isFederated();
    try {
        createNameNodesAndSetConf(nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
                enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
        LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir));
        throw ioe;
    }
    if (format) {
        if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
            throw new IOException(
                    "Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir));
        }
    }

    if (startOpt == StartupOption.RECOVER) {
        return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
            dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, simulatedCapacities, setupHostsFile,
            checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    // make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:it.crs4.pydoop.mapreduce.pipes.TestPipeApplication.java

License:Apache License

/**
 * test org.apache.hadoop.mapreduce.pipes.Submitter
 *
 * @throws Exception/*from  w  w w . ja va  2  s.c om*/
 */
@Test
public void testSubmitter() throws Exception {

    Configuration conf = new Configuration();

    File[] psw = cleanTokenPasswordFile();

    System.setProperty("test.build.data", "target/tmp/build/TEST_SUBMITTER_MAPPER/data");
    conf.set("hadoop.log.dir", "target/tmp");

    // prepare configuration
    Submitter.setIsJavaMapper(conf, false);
    Submitter.setIsJavaReducer(conf, false);
    Submitter.setKeepCommandFile(conf, false);
    Submitter.setIsJavaRecordReader(conf, false);
    Submitter.setIsJavaRecordWriter(conf, false);
    PipesPartitioner<IntWritable, Text> partitioner = new PipesPartitioner<IntWritable, Text>();
    partitioner.configure(conf);

    Submitter.setJavaPartitioner(conf, partitioner.getClass());

    assertEquals(PipesPartitioner.class, (Submitter.getJavaPartitioner(conf)));
    // test going to call main method with System.exit(). Change Security
    SecurityManager securityManager = System.getSecurityManager();
    // store System.out
    PrintStream oldps = System.out;
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    ExitUtil.disableSystemExit();
    // test without parameters
    try {
        System.setOut(new PrintStream(out));
        Submitter.main(new String[0]);
        fail();
    } catch (ExitUtil.ExitException e) {
        // System.exit prohibited! output message test
        assertTrue(out.toString().contains(""));
        assertTrue(out.toString().contains("bin/hadoop pipes"));
        assertTrue(out.toString().contains("[-input <path>] // Input directory"));
        assertTrue(out.toString().contains("[-output <path>] // Output directory"));
        assertTrue(out.toString().contains("[-jar <jar file> // jar filename"));
        assertTrue(out.toString().contains("[-inputformat <class>] // InputFormat class"));
        assertTrue(out.toString().contains("[-map <class>] // Java Map class"));
        assertTrue(out.toString().contains("[-partitioner <class>] // Java Partitioner"));
        assertTrue(out.toString().contains("[-reduce <class>] // Java Reduce class"));
        assertTrue(out.toString().contains("[-writer <class>] // Java RecordWriter"));
        assertTrue(out.toString().contains("[-program <executable>] // executable URI"));
        assertTrue(out.toString().contains("[-reduces <num>] // number of reduces"));
        assertTrue(out.toString().contains("[-lazyOutput <true/false>] // createOutputLazily"));

        assertTrue(out.toString()
                .contains("-conf <configuration file>     specify an application configuration file"));
        assertTrue(out.toString().contains("-D <property=value>            use value for given property"));
        assertTrue(out.toString().contains("-fs <local|namenode:port>      specify a namenode"));
        assertTrue(out.toString().contains("-jt <local|jobtracker:port>    specify a job tracker"));
        assertTrue(out.toString().contains(
                "-files <comma separated list of files>    specify comma separated files to be copied to the map reduce cluster"));
        assertTrue(out.toString().contains(
                "-libjars <comma separated list of jars>    specify comma separated jar files to include in the classpath."));
        assertTrue(out.toString().contains(
                "-archives <comma separated list of archives>    specify comma separated archives to be unarchived on the compute machines."));
    } finally {
        System.setOut(oldps);
        // restore
        System.setSecurityManager(securityManager);
        if (psw != null) {
            // remove password files
            for (File file : psw) {
                file.deleteOnExit();
            }
        }
    }
    // test call Submitter form command line
    try {
        File fCommand = getFileCommand(null);
        String[] args = new String[20];
        File input = new File(workSpace + File.separator + "input");
        if (!input.exists()) {
            Assert.assertTrue(input.createNewFile());
        }
        File outPut = new File(workSpace + File.separator + "output");
        FileUtil.fullyDelete(outPut);

        args[0] = "-input";
        args[1] = input.getAbsolutePath();// "input";
        args[2] = "-output";
        args[3] = outPut.getAbsolutePath();// "output";
        args[4] = "-inputformat";
        args[5] = "org.apache.hadoop.mapreduce.lib.input.TextInputFormat";
        args[6] = "-map";
        args[7] = "org.apache.hadoop.mapreduce.lib.map.InverseMapper";
        args[8] = "-partitioner";
        args[9] = "it.crs4.pydoop.mapreduce.pipes.PipesPartitioner";
        args[10] = "-reduce";
        args[11] = "org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer";
        args[12] = "-writer";
        args[13] = "org.apache.hadoop.mapreduce.lib.output.TextOutputFormat";
        args[14] = "-program";
        args[15] = fCommand.getAbsolutePath();// "program";
        args[16] = "-reduces";
        args[17] = "2";
        args[18] = "-lazyOutput";
        args[19] = "lazyOutput";
        Submitter.main(args);
        fail();
    } catch (ExitUtil.ExitException e) {
        // status should be 0
        assertEquals(e.status, 0);

    } finally {
        System.setOut(oldps);
        System.setSecurityManager(securityManager);
    }

}