Example usage for org.apache.hadoop.metrics2.lib DefaultMetricsSystem initialize

List of usage examples for org.apache.hadoop.metrics2.lib DefaultMetricsSystem initialize

Introduction

In this page you can find the example usage for org.apache.hadoop.metrics2.lib DefaultMetricsSystem initialize.

Prototype

public static MetricsSystem initialize(String prefix) 

Source Link

Document

Convenience method to initialize the metrics system

Usage

From source file:com.github.joshelser.dropwizard.metrics.hadoop.StandaloneExample.java

License:Apache License

public static void main(String[] args) throws Exception {
    final MetricRegistry metrics = new MetricRegistry();

    final HadoopMetrics2Reporter metrics2Reporter = HadoopMetrics2Reporter.forRegistry(metrics).build(
            DefaultMetricsSystem.initialize("StandaloneTest"), // The application-level name
            "Test", // Component name
            "Test", // Component description
            "Test"); // Name for each metric record
    final ConsoleReporter consoleReporter = ConsoleReporter.forRegistry(metrics).build();

    MetricsSystem metrics2 = DefaultMetricsSystem.instance();
    // Writes to stdout without a filename configuration
    // Will be invoked every 10seconds by default
    FileSink sink = new FileSink();
    metrics2.register("filesink", "filesink", sink);
    sink.init(new SubsetConfiguration(null, null) {
        public String getString(String key) {
            if (key.equals("filename")) {
                return null;
            }/*from  www.  j  a v  a  2 s. c o m*/
            return super.getString(key);
        }
    });

    // How often should the dropwizard reporter be invoked
    metrics2Reporter.start(500, TimeUnit.MILLISECONDS);
    // How often will the dropwziard metrics be logged to the console
    consoleReporter.start(2, TimeUnit.SECONDS);

    generateMetrics(metrics, 5000, 25, TimeUnit.MILLISECONDS, metrics2Reporter, 10);
}

From source file:com.twitter.hraven.hadoopJobMonitor.HadoopJobMonitorService.java

License:Apache License

public void init() {
    YarnConfiguration yConf = new YarnConfiguration();
    DefaultMetricsSystem.initialize("HadoopJobMonitor");
    String logDir = System.getProperty("hadoopJobMonitor.log.dir");
    if (logDir == null)
        logDir = "/tmp";
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    try {//from  www. ja va 2s  . c  om
        ObjectName name = new ObjectName(
                "com.twitter.hraven.hadoopJobMonitor.jmx:type=" + WhiteList.class.getSimpleName());
        WhiteList.init(logDir);
        WhiteList mbean = WhiteList.getInstance();
        mbs.registerMBean(mbean, name);
        LOG.error("Current whitelist is: \n" + mbean.getExpirations());
    } catch (Exception e) {
        LOG.fatal("Error in retriving white list from dir " + logDir, e);
    }

    metrics = HadoopJobMonitorMetrics.initSingleton(conf);

    rmDelegate = new ResourceMgrDelegate(yConf);
    clientCache = new ClientCache(conf, rmDelegate);
    AppConfCache.init(conf);
    ProgressCache.init(conf);
    Mail.init(conf);
    Notifier.init(conf);
    clusterCheckerExecutor = Executors
            .newSingleThreadScheduledExecutor(new ClusterStatusChecker.SimpleThreadFactory());
    int concurrentAppCheckers = conf.getInt(HadoopJobMonitorConfiguration.NEW_APP_CHECKER_CONCURRENCY,
            HadoopJobMonitorConfiguration.DEFAULT_NEW_APP_CHECKER_CONCURRENCY);
    appCheckerExecutor = new BlockingExecutor(concurrentAppCheckers,
            new AppStatusChecker.SimpleThreadFactory());
}

From source file:de.huberlin.wbi.hiway.am.HiWay.java

License:Apache License

/**
 * Parse command line options./*from   w w w  . j  a  va2s.co  m*/
 * 
 * @param args
 *            Command line arguments.
 * @return Whether init successful and run should be invoked.
 * @throws ParseException
 *             ParseException
 */
public boolean init(String[] args) throws ParseException {

    DefaultMetricsSystem.initialize("ApplicationMaster");

    Options opts = new Options();
    opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used unless for testing purposes");
    opts.addOption("workflow", true, "The workflow file to be executed by the Application Master");
    opts.addOption("s", "summary", true,
            "The name of the json summary file. No file is created if this parameter is not specified.");
    opts.addOption("debug", false, "Dump out debug information");
    opts.addOption("appid", true, "Id of this Application Master.");

    opts.addOption("help", false, "Print usage");
    CommandLine cliParser = new GnuParser().parse(opts, args);

    if (args.length == 0) {
        printUsage(opts);
        throw new IllegalArgumentException("No args specified for application master to initialize");
    }

    if (!cliParser.hasOption("appid")) {
        throw new IllegalArgumentException("No id of Application Master specified");
    }

    appId = cliParser.getOptionValue("appid");
    try {
        statLog = new BufferedWriter(new FileWriter(appId + ".log"));
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(-1);
    }

    if (cliParser.hasOption("help")) {
        printUsage(opts);
        return false;
    }

    if (cliParser.hasOption("debug")) {
        dumpOutDebugInfo();
    }

    if (cliParser.hasOption("summary")) {
        summaryPath = new Path(cliParser.getOptionValue("summary"));
    }

    String hdfsBaseDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE_DEFAULT);
    String hdfsSandboxDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE_DEFAULT);
    Path hdfsBaseDirectory = new Path(new Path(hdfs.getUri()), hdfsBaseDirectoryName);
    Data.setHdfsBaseDirectory(hdfsBaseDirectory);
    Path hdfsSandboxDirectory = new Path(hdfsBaseDirectory, hdfsSandboxDirectoryName);
    hdfsApplicationDirectory = new Path(hdfsSandboxDirectory, appId);
    Data.setHdfsApplicationDirectory(hdfsApplicationDirectory);
    Data.setHdfs(hdfs);

    Map<String, String> envs = System.getenv();

    if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
        if (cliParser.hasOption("app_attempt_id")) {
            String appIdStr = cliParser.getOptionValue("app_attempt_id", "");
            appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
        } else {
            throw new IllegalArgumentException("Application Attempt Id not set in the environment");
        }
    } else {
        ContainerId containerId = ConverterUtils.toContainerId(envs.get(Environment.CONTAINER_ID.name()));
        appAttemptID = containerId.getApplicationAttemptId();
    }

    if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
        throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HOST.name())) {
        throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
        throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_PORT.name())) {
        throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment");
    }

    System.out.println("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId()
            + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId="
            + appAttemptID.getAttemptId());

    String shellEnvs[] = conf.getStrings(HiWayConfiguration.HIWAY_WORKER_SHELL_ENV,
            HiWayConfiguration.HIWAY_WORKER_SHELL_ENV_DEFAULT);
    for (String env : shellEnvs) {
        env = env.trim();
        int index = env.indexOf('=');
        if (index == -1) {
            shellEnv.put(env, "");
            continue;
        }
        String key = env.substring(0, index);
        String val = "";
        if (index < (env.length() - 1)) {
            val = env.substring(index + 1);
        }
        shellEnv.put(key, val);
    }

    if (!cliParser.hasOption("workflow")) {
        throw new IllegalArgumentException("No workflow file specified to be executed by application master");
    }

    workflowPath = new Path(cliParser.getOptionValue("workflow"));
    workflowFile = new Data(workflowPath);
    schedulerName = HiWayConfiguration.HIWAY_SCHEDULER_OPTS.valueOf(conf.get(HiWayConfiguration.HIWAY_SCHEDULER,
            HiWayConfiguration.HIWAY_SCHEDULER_DEFAULT.toString()));

    containerMemory = conf.getInt(HiWayConfiguration.HIWAY_WORKER_MEMORY,
            HiWayConfiguration.HIWAY_WORKER_MEMORY_DEFAULT);
    containerCores = conf.getInt(HiWayConfiguration.HIWAY_WORKER_VCORES,
            HiWayConfiguration.HIWAY_WORKER_VCORES_DEFAULT);
    requestPriority = conf.getInt(HiWayConfiguration.HIWAY_WORKER_PRIORITY,
            HiWayConfiguration.HIWAY_WORKER_PRIORITY_DEFAULT);
    return true;
}

From source file:de.huberlin.wbi.hiway.am.WorkflowDriver.java

License:Apache License

/** Parse command line arguments, initialize HDFS, manage environment variables. */
public boolean init(String[] args) throws ParseException, IOException, JSONException {

    DefaultMetricsSystem.initialize("ApplicationMaster");

    Options opts = new Options();
    opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used unless for testing purposes");
    opts.addOption("u", "summary", true,
            "The name of the json summary file. No file is created if this parameter is not specified.");
    opts.addOption("m", "memory", true,
            "The amount of memory (in MB) to be allocated per worker container. Overrides settings in hiway-site.xml.");
    opts.addOption("c", "custom", true,
            "The name of an (optional) JSON file, in which custom amounts of memory can be specified per task.");
    opts.addOption("s", "scheduler", true,
            "The scheduling policy that is to be employed. Valid arguments: "
                    + Arrays.toString(HiWayConfiguration.HIWAY_SCHEDULERS.values())
                    + ". Overrides settings in hiway-site.xml.");
    opts.addOption("d", "debug", false, "Provide additional logs and information for debugging");
    opts.addOption("v", "verbose", false, "Increase verbosity of output / reporting.");
    opts.addOption("appid", true, "Id of this Application Master.");

    opts.addOption("h", "help", false, "Print usage");
    CommandLine cliParser = new GnuParser().parse(opts, args);

    if (args.length == 0) {
        Logger.printUsage(opts);//from  ww w  .ja  v  a  2 s .  c om
        throw new IllegalArgumentException("No args specified for application master to initialize");
    }

    if (cliParser.getArgs().length == 0) {
        Logger.printUsage(opts);
        throw new IllegalArgumentException("No workflow file specified.");
    }

    if (!cliParser.hasOption("appid")) {
        throw new IllegalArgumentException("No id of Application Master specified");
    }

    if (cliParser.hasOption("verbose")) {
        HiWayConfiguration.verbose = true;
    }

    appId = cliParser.getOptionValue("appid");
    try {
        logger.statLog = new BufferedWriter(new FileWriter(appId + ".log"));
    } catch (IOException e) {
        e.printStackTrace(System.out);
        System.exit(-1);
    }

    if (cliParser.hasOption("help")) {
        Logger.printUsage(opts);
        return false;
    }

    if (cliParser.hasOption("debug")) {
        Logger.dumpOutDebugInfo();
        HiWayConfiguration.debug = true;
    }

    if (cliParser.hasOption("summary")) {
        summaryPath = new Path(cliParser.getOptionValue("summary"));
    }

    String hdfsBaseDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE_DEFAULT);
    String hdfsSandboxDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE_DEFAULT);
    Path hdfsBaseDirectory = new Path(new Path(hdfs.getUri()), hdfsBaseDirectoryName);
    Data.setHdfsBaseDirectory(hdfsBaseDirectory);
    Path hdfsSandboxDirectory = new Path(hdfsBaseDirectory, hdfsSandboxDirectoryName);
    hdfsApplicationDirectory = new Path(hdfsSandboxDirectory, appId);
    Data.setHdfsApplicationDirectory(hdfsApplicationDirectory);
    Data.setHdfs(hdfs);

    if (cliParser.hasOption("custom")) {
        Data customMemPath = new Data(cliParser.getOptionValue("custom"));
        customMemPath.stageIn();
        StringBuilder sb = new StringBuilder();
        try (BufferedReader in = new BufferedReader(new FileReader(customMemPath.getLocalPath().toString()))) {
            String line;
            while ((line = in.readLine()) != null) {
                sb.append(line);
            }
        }
        JSONObject obj = new JSONObject(sb.toString());
        Iterator<?> keys = obj.keys();
        while (keys.hasNext()) {
            String key = (String) keys.next();
            int minMem = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
            int desiredMem = obj.getInt(key);
            customMemoryMap.put(key,
                    (desiredMem % minMem) == 0 ? desiredMem : (desiredMem / minMem + 1) * minMem);
        }
    }

    Map<String, String> envs = System.getenv();

    /* this application's attempt id (combination of attemptId and fail count) */
    ApplicationAttemptId appAttemptID;
    if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
        if (cliParser.hasOption("app_attempt_id")) {
            String appIdStr = cliParser.getOptionValue("app_attempt_id", "");
            appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
        } else {
            throw new IllegalArgumentException("Application Attempt Id not set in the environment");
        }
    } else {
        ContainerId containerId = ConverterUtils.toContainerId(envs.get(Environment.CONTAINER_ID.name()));
        appAttemptID = containerId.getApplicationAttemptId();
    }

    if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
        throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HOST.name())) {
        throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
        throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_PORT.name())) {
        throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment");
    }

    Logger.writeToStdout("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId()
            + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId="
            + appAttemptID.getAttemptId());

    String shellEnvs[] = conf.getStrings(HiWayConfiguration.HIWAY_WORKER_SHELL_ENV,
            HiWayConfiguration.HIWAY_WORKER_SHELL_ENV_DEFAULT);
    for (String env : shellEnvs) {
        env = env.trim();
        int index = env.indexOf('=');
        if (index == -1) {
            shellEnv.put(env, "");
            continue;
        }
        String key = env.substring(0, index);
        String val = "";
        if (index < (env.length() - 1)) {
            val = env.substring(index + 1);
        }
        shellEnv.put(key, val);
    }

    String workflowParam = cliParser.getArgs()[0];
    try {
        workflowPath = new Path(new URI(workflowParam).getPath());
    } catch (URISyntaxException e) {
        workflowPath = new Path(workflowParam);
    }

    schedulerEnumValue = HiWayConfiguration.HIWAY_SCHEDULERS.valueOf(conf
            .get(HiWayConfiguration.HIWAY_SCHEDULER, HiWayConfiguration.HIWAY_SCHEDULER_DEFAULT.toString()));
    if (cliParser.hasOption("scheduler")) {
        schedulerEnumValue = HiWayConfiguration.HIWAY_SCHEDULERS.valueOf(cliParser.getOptionValue("scheduler"));
    }

    containerMemory = conf.getInt(HiWayConfiguration.HIWAY_WORKER_MEMORY,
            HiWayConfiguration.HIWAY_WORKER_MEMORY_DEFAULT);
    if (cliParser.hasOption("memory")) {
        containerMemory = Integer.parseInt(cliParser.getOptionValue("memory"));
    }

    containerCores = conf.getInt(HiWayConfiguration.HIWAY_WORKER_VCORES,
            HiWayConfiguration.HIWAY_WORKER_VCORES_DEFAULT);
    requestPriority = conf.getInt(HiWayConfiguration.HIWAY_WORKER_PRIORITY,
            HiWayConfiguration.HIWAY_WORKER_PRIORITY_DEFAULT);

    // Create and start the Timeline timelineClient
    if (conf.getBoolean("yarn.timeline-service.enabled", false)) {
        timelineClient = TimelineClient.createTimelineClient();
        timelineClient.init(conf);
        timelineClient.start();
        Logger.writeToStdout("Started TimeLineClient.");
    } else {
        Logger.writeToStdErr("TimeLineClient disabled.");
    }
    return true;
}

From source file:org.apache.kylin.common.metrics.metrics2.Metrics2Reporter.java

License:Apache License

public Metrics2Reporter(MetricRegistry registry, KylinConfig conf) {
    this.metricRegistry = registry;
    this.conf = conf;
    String applicationName = "kylin";

    reporter = HadoopMetrics2Reporter.forRegistry(metricRegistry).convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS).build(DefaultMetricsSystem.initialize(applicationName), // The application-level name
                    applicationName, // Component name
                    applicationName, // Component description
                    "General"); // Name for each metric record
}

From source file:org.apache.kylin.rest.metrics.QueryMetricsFacade.java

License:Apache License

public static void init() {
    enabled = KylinConfig.getInstanceFromEnv().getQueryMetricsEnabled();
    if (!enabled)
        return;//from w  ww.  j  a v a  2  s  .c  om

    DefaultMetricsSystem.initialize("Kylin");
}

From source file:org.apache.phoenix.metrics.MetricsManagerImpl.java

License:Apache License

@Override
public void initialize(String prefix) {
    this.system = DefaultMetricsSystem.initialize(prefix);
}

From source file:org.apache.phoenix.monitoring.GlobalMetricRegistriesAdapter.java

License:Apache License

private GlobalMetricRegistriesAdapter() {
    DefaultMetricsSystem.initialize("Phoenix");
    JvmMetrics.initSingleton("Phoenix", "");
}

From source file:org.apache.tez.dag.app.DAGAppMaster.java

License:Apache License

@Override
public synchronized void serviceStart() throws Exception {

    //start all the components
    startServices();/*w w  w  . j  av a  2s  .com*/
    super.serviceStart();

    if (versionMismatch) {
        // Short-circuit and return as no DAG should not be run
        this.taskSchedulerEventHandler.setShouldUnregisterFlag();
        shutdownHandler.shutdown();
        return;
    }

    // metrics system init is really init & start.
    // It's more test friendly to put it here.
    DefaultMetricsSystem.initialize("DAGAppMaster");

    this.appsStartTime = clock.getTime();
    AMStartedEvent startEvent = new AMStartedEvent(appAttemptID, appsStartTime,
            appMasterUgi.getShortUserName());
    historyEventHandler.handle(new DAGHistoryEvent(startEvent));

    this.lastDAGCompletionTime = clock.getTime();

    RecoveredDAGData recoveredDAGData;
    try {
        recoveredDAGData = recoverDAG();
    } catch (IOException e) {
        LOG.error("Error occurred when trying to recover data from previous attempt." + " Shutting down AM", e);
        this.state = DAGAppMasterState.ERROR;
        this.taskSchedulerEventHandler.setShouldUnregisterFlag();
        shutdownHandler.shutdown();
        return;
    }

    if (!isSession) {
        LOG.info("In Non-Session mode.");
    } else {
        LOG.info("In Session mode. Waiting for DAG over RPC");
        this.state = DAGAppMasterState.IDLE;
    }

    if (recoveredDAGData != null) {
        List<URL> classpathUrls = null;
        if (recoveredDAGData.cumulativeAdditionalResources != null) {
            classpathUrls = processAdditionalResources(recoveredDAGData.cumulativeAdditionalResources);
            amResources.putAll(recoveredDAGData.cumulativeAdditionalResources);
            cumulativeAdditionalResources.putAll(recoveredDAGData.cumulativeAdditionalResources);
        }

        if (recoveredDAGData.isCompleted || recoveredDAGData.nonRecoverable) {
            LOG.info("Found previous DAG in completed or non-recoverable state" + ", dagId="
                    + recoveredDAGData.recoveredDagID + ", isCompleted=" + recoveredDAGData.isCompleted
                    + ", isNonRecoverable=" + recoveredDAGData.nonRecoverable + ", state="
                    + (recoveredDAGData.dagState == null ? "null" : recoveredDAGData.dagState)
                    + ", failureReason=" + recoveredDAGData.reason);
            _updateLoggers(recoveredDAGData.recoveredDAG, "");
            if (recoveredDAGData.nonRecoverable) {
                DAGEventRecoverEvent recoverDAGEvent = new DAGEventRecoverEvent(
                        recoveredDAGData.recoveredDAG.getID(), DAGState.FAILED, classpathUrls);
                dagEventDispatcher.handle(recoverDAGEvent);
                this.state = DAGAppMasterState.RUNNING;
            } else {
                DAGEventRecoverEvent recoverDAGEvent = new DAGEventRecoverEvent(
                        recoveredDAGData.recoveredDAG.getID(), recoveredDAGData.dagState, classpathUrls);
                dagEventDispatcher.handle(recoverDAGEvent);
                this.state = DAGAppMasterState.RUNNING;
            }
        } else {
            LOG.info("Found DAG to recover, dagId=" + recoveredDAGData.recoveredDAG.getID());
            _updateLoggers(recoveredDAGData.recoveredDAG, "");
            DAGEventRecoverEvent recoverDAGEvent = new DAGEventRecoverEvent(
                    recoveredDAGData.recoveredDAG.getID(), classpathUrls);
            dagEventDispatcher.handle(recoverDAGEvent);
            this.state = DAGAppMasterState.RUNNING;
        }
    } else {
        if (!isSession) {
            // No dag recovered - in non-session, just restart the original DAG
            dagCounter.set(0);
            startDAG();
        }
    }

    if (isSession) {
        this.dagSubmissionTimer = new Timer(true);
        this.dagSubmissionTimer.scheduleAtFixedRate(new TimerTask() {
            @Override
            public void run() {
                checkAndHandleSessionTimeout();
            }
        }, sessionTimeoutInterval, sessionTimeoutInterval / 10);
    }
}

From source file:org.apache.tez.runtime.task.TezChild.java

License:Apache License

public static TezChild newTezChild(Configuration conf, String host, int port, String containerIdentifier,
        String tokenIdentifier, int attemptNumber, String[] localDirs, String workingDirectory,
        Map<String, String> serviceProviderEnvMap, @Nullable String pid, ExecutionContext executionContext,
        Credentials credentials, long memAvailable, String user)
        throws IOException, InterruptedException, TezException {

    // Pull in configuration specified for the session.
    // TODO TEZ-1233. This needs to be moved over the wire rather than localizing the file
    // for each and every task, and reading it back from disk. Also needs to be per vertex.
    Limits.setConfiguration(conf);/*from w w  w  . j av a  2 s.c om*/

    // Should this be part of main - Metrics and ObjectRegistry. TezTask setup should be independent
    // of this class. Leaving it here, till there's some entity representing a running JVM.
    DefaultMetricsSystem.initialize("TezTask");

    // singleton of ObjectRegistry for this JVM
    ObjectRegistryImpl objectRegistry = new ObjectRegistryImpl();

    return new TezChild(conf, host, port, containerIdentifier, tokenIdentifier, attemptNumber, workingDirectory,
            localDirs, serviceProviderEnvMap, objectRegistry, pid, executionContext, credentials, memAvailable,
            user);
}