Example usage for java.lang Thread setDefaultUncaughtExceptionHandler

List of usage examples for java.lang Thread setDefaultUncaughtExceptionHandler

Introduction

In this page you can find the example usage for java.lang Thread setDefaultUncaughtExceptionHandler.

Prototype

public static void setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler eh) 

Source Link

Document

Set the default handler invoked when a thread abruptly terminates due to an uncaught exception, and no other handler has been defined for that thread.

Usage

From source file:grakn.core.server.Grakn.java

public static void main(String[] args) {
    Thread.setDefaultUncaughtExceptionHandler(
            (Thread t, Throwable e) -> LOG.error(ErrorMessage.UNCAUGHT_EXCEPTION.getMessage(t.getName()), e));

    try {//from   w w  w  .  ja v a2s. c om
        String graknPidFileProperty = Optional.ofNullable(SystemProperty.GRAKN_PID_FILE.value()).orElseThrow(
                () -> new RuntimeException(ErrorMessage.GRAKN_PIDFILE_SYSTEM_PROPERTY_UNDEFINED.getMessage()));

        Path pidfile = Paths.get(graknPidFileProperty);
        PIDManager pidManager = new PIDManager(pidfile);
        pidManager.trackGraknPid();

        // Start Server with timer
        Stopwatch timer = Stopwatch.createStarted();
        boolean benchmark = parseBenchmarkArg(args);
        Server server = ServerFactory.createServer(benchmark);
        server.start();

        LOG.info("Grakn started in {}", timer.stop());
    } catch (RuntimeException | IOException e) {
        LOG.error(ErrorMessage.UNCAUGHT_EXCEPTION.getMessage(e.getMessage()), e);
        System.err.println(ErrorMessage.UNCAUGHT_EXCEPTION.getMessage(e.getMessage()));
    }
}

From source file:sample.SampleApp.java

public static void main(String[] args) throws InterruptedException {
    final Log logger = LogFactory.getLog(SampleApp.class);

    //create AWSCloudTrailProcessingExecutor and start it
    final AWSCloudTrailProcessingExecutor executor = new AWSCloudTrailProcessingExecutor.Builder(
            new SampleEventsProcessor(), "/sample/awscloudtrailprocessinglibrary.properties")
                    .withSourceFilter(new SampleSourceFilter()).withEventFilter(new SampleEventFilter())
                    .withProgressReporter(new SampleProgressReporter())
                    .withExceptionHandler(new SampleExceptionHandler()).build();
    executor.start();//from w ww  .j  a  va2 s.co  m

    // add shut down hook to gracefully stop executor (optional)
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            logger.info("Shut Down Hook is called.");
            executor.stop();
        }
    });

    // register a Default Uncaught Exception Handler (optional)
    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override
        public void uncaughtException(Thread t, Throwable e) {
            logger.error("Handled by global Exception handler. " + e.getMessage() + " " + t.getName());

            //Two options here:
            //First, we can call System.exit(1); in such case shut down hook will be called.
            //Second, we can optionally restart another executor and start.
            final AWSCloudTrailProcessingExecutor executor = new AWSCloudTrailProcessingExecutor.Builder(
                    new SampleEventsProcessor(), "/sample/awscloudtrailprocessinglibrary.properties")
                            .withSourceFilter(new SampleSourceFilter()).withEventFilter(new SampleEventFilter())
                            .withProgressReporter(new SampleProgressReporter())
                            .withExceptionHandler(new SampleExceptionHandler()).build();
            executor.start();
        }
    });

    //can optionally limit running time, or remove both lines so it is running forever. (optional)
    Thread.sleep(24 * 60 * 60 * 1000);
    executor.stop();
}

From source file:com.arpnetworking.clusteraggregator.Main.java

/**
 * Entry point.//from www . ja  va 2  s . c o m
 *
 * @param args command line arguments
 */
public static void main(final String[] args) {
    Thread.setDefaultUncaughtExceptionHandler((thread, throwable) -> {
        LOGGER.error().setMessage("Unhandled exception!").setThrowable(throwable).log();
    });

    Thread.currentThread().setUncaughtExceptionHandler((thread, throwable) -> {
        LOGGER.error().setMessage("Unhandled exception!").setThrowable(throwable).log();
    });

    LOGGER.info().setMessage("Launching cluster-aggregator").log();

    Runtime.getRuntime().addShutdownHook(SHUTDOWN_THREAD);

    if (args.length != 1) {
        throw new RuntimeException("No configuration file specified");
    }

    LOGGER.debug().setMessage("Loading configuration from file").addData("file", args[0]).log();

    Optional<DynamicConfiguration> configuration = Optional.absent();
    Optional<Configurator<Main, ClusterAggregatorConfiguration>> configurator = Optional.absent();
    try {
        final File configurationFile = new File(args[0]);
        configurator = Optional.of(new Configurator<>(Main::new, ClusterAggregatorConfiguration.class));
        final ObjectMapper objectMapper = ClusterAggregatorConfiguration.createObjectMapper();
        configuration = Optional.of(new DynamicConfiguration.Builder().setObjectMapper(objectMapper)
                .addSourceBuilder(new JsonNodeFileSource.Builder().setObjectMapper(objectMapper)
                        .setFile(configurationFile))
                .addTrigger(new FileTrigger.Builder().setFile(configurationFile).build())
                .addListener(configurator.get()).build());

        configuration.get().launch();

        // Wait for application shutdown
        SHUTDOWN_SEMAPHORE.acquire();
    } catch (final InterruptedException e) {
        throw Throwables.propagate(e);
    } finally {
        if (configurator.isPresent()) {
            configurator.get().shutdown();
        }
        if (configuration.isPresent()) {
            configuration.get().shutdown();
        }
        // Notify the shutdown that we're done
        SHUTDOWN_SEMAPHORE.release();
    }
}

From source file:es.emergya.Main.java

/**
 * Starts the app.//  w w w  .  java  2s.c om
 * 
 * @param args
 */
public static void main(final String[] args) {
    try {
        Thread.setDefaultUncaughtExceptionHandler(new UncaughtExceptionHandler() {

            @Override
            public void uncaughtException(final Thread t, final Throwable e) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Excepcion descontrolada en " + t.toString(), e);
                } else {
                    LOG.error("Excepcion descontrolada en " + t.toString() + " :: " + e.toString(), e);
                }
            }
        });
    } catch (Throwable t) {
        LOG.error(t, t);
    }

    try {
        TimeZone.setDefault(TimeZone.getTimeZone("Europe/Madrid")); //$NON-NLS-1$
        new Initializer().run();
    } catch (Throwable t) {
        LOG.error("Fallo el SwingUtilities.invokeLater", t);
    }
}

From source file:com.arpnetworking.tsdaggregator.TsdAggregator.java

/**
 * Entry point for Time Series Data (TSD) Aggregator.
 *
 * @param args the command line arguments
 *//*w  w  w  .  ja  v  a2s.co  m*/
public static void main(final String[] args) {
    LOGGER.info("Launching tsd-aggregator");

    // Global initialization
    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override
        public void uncaughtException(final Thread thread, final Throwable throwable) {
            LOGGER.error("Unhandled exception!", throwable);
        }
    });

    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            final LoggerContext context = (LoggerContext) LoggerFactory.getILoggerFactory();
            context.stop();
        }
    }));

    System.setProperty("org.vertx.logger-delegate-factory-class-name",
            "org.vertx.java.core.logging.impl.SLF4JLogDelegateFactory");

    // Run the tsd aggregator
    if (args.length != 1) {
        throw new RuntimeException("No configuration file specified");
    }
    LOGGER.debug(String.format("Loading configuration from file; file=%s", args[0]));

    final File configurationFile = new File(args[0]);
    final Configurator<TsdAggregator, TsdAggregatorConfiguration> configurator = new Configurator<>(
            TsdAggregator.class, TsdAggregatorConfiguration.class);
    final ObjectMapper objectMapper = TsdAggregatorConfiguration.createObjectMapper();
    final DynamicConfiguration configuration = new DynamicConfiguration.Builder().setObjectMapper(objectMapper)
            .addSourceBuilder(
                    new JsonNodeFileSource.Builder().setObjectMapper(objectMapper).setFile(configurationFile))
            .addTrigger(new FileTrigger.Builder().setFile(configurationFile).build()).addListener(configurator)
            .build();

    configuration.launch();

    final AtomicBoolean isRunning = new AtomicBoolean(true);
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            LOGGER.info("Stopping tsd-aggregator");
            configuration.shutdown();
            configurator.shutdown();
            isRunning.set(false);
        }
    }));

    while (isRunning.get()) {
        try {
            Thread.sleep(30000);
        } catch (final InterruptedException e) {
            break;
        }
    }

    LOGGER.info("Exiting tsd-aggregator");
}

From source file:com.spotify.heroic.HeroicShell.java

public static void main(String[] args) throws IOException {
    HeroicLogging.configure();//from  w ww.j  av a 2s .c  o m

    Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
        try {
            log.error("Uncaught exception in thread {}, exiting...", t, e);
        } finally {
            System.exit(1);
        }
    });

    final Parameters params = new Parameters();
    final CmdLineParser parser = setupParser(params);
    final ParsedArguments parsed = ParsedArguments.parse(args);

    try {
        parser.parseArgument(parsed.primary);
    } catch (CmdLineException e) {
        log.error("Argument error", e);
        System.exit(1);
        return;
    }

    if (params.help()) {
        parser.printUsage(System.out);
        System.out.println();
        HeroicModules.printAllUsage(System.out, "-P");
        System.exit(0);
        return;
    }

    final AsyncFramework async = TinyAsync.builder().executor(Executors.newSingleThreadExecutor()).build();

    if (parsed.child.isEmpty()) {
        final CoreInterface bridge;

        try {
            bridge = setupCoreBridge(params, async);
        } catch (Exception e) {
            log.error("Failed to setup core bridge", e);
            System.exit(1);
            return;
        }
        try {
            interactive(params, bridge);
        } catch (Exception e) {
            log.error("Error when running shell", e);
            System.exit(1);
        }
        System.exit(0);
        return;
    }

    final HeroicCore.Builder builder = setupBuilder(params);

    try {
        standalone(parsed.child, builder);
    } catch (Exception e) {
        log.error("Failed to run standalone task", e);
    }

    System.exit(0);
}

From source file:org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer.java

public static void main(String[] args) {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
    try {/*w  w w  .j  ava2s. c  o  m*/
        YarnConfiguration configuration = new YarnConfiguration();
        WebAppProxyServer proxyServer = startServer(configuration);
        proxyServer.proxy.join();
    } catch (Throwable t) {
        LOG.fatal("Error starting Proxy server", t);
        System.exit(-1);
    }
}

From source file:org.apache.hadoop.mapred.YarnTezDagChild.java

public static void main(String[] args) throws Throwable {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    if (LOG.isDebugEnabled()) {
        LOG.debug("Child starting");
    }// w w  w  .  j a v  a2s . co  m

    final Configuration defaultConf = new Configuration();
    // Security settings will be loaded based on core-site and core-default. Don't
    // depend on the jobConf for this.
    UserGroupInformation.setConfiguration(defaultConf);
    Limits.setConfiguration(defaultConf);

    assert args.length == 5;
    String host = args[0];
    int port = Integer.parseInt(args[1]);
    final InetSocketAddress address = NetUtils.createSocketAddrForHost(host, port);
    final String containerIdentifier = args[2];
    final String tokenIdentifier = args[3];
    final int attemptNumber = Integer.parseInt(args[4]);
    if (LOG.isDebugEnabled()) {
        LOG.info("Info from cmd line: AM-host: " + host + " AM-port: " + port + " containerIdentifier: "
                + containerIdentifier + " attemptNumber: " + attemptNumber + " tokenIdentifier: "
                + tokenIdentifier);
    }
    // FIXME fix initialize metrics in child runner
    DefaultMetricsSystem.initialize("VertexTask");

    // Security framework already loaded the tokens into current ugi
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    if (LOG.isDebugEnabled()) {
        LOG.info("Executing with tokens:");
        for (Token<?> token : credentials.getAllTokens()) {
            LOG.info(token);
        }
    }

    // Create TaskUmbilicalProtocol as actual task owner.
    UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(tokenIdentifier);

    Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(credentials);
    SecurityUtil.setTokenService(jobToken, address);
    taskOwner.addToken(jobToken);
    final TezTaskUmbilicalProtocol umbilical = taskOwner
            .doAs(new PrivilegedExceptionAction<TezTaskUmbilicalProtocol>() {
                @Override
                public TezTaskUmbilicalProtocol run() throws Exception {
                    return (TezTaskUmbilicalProtocol) RPC.getProxy(TezTaskUmbilicalProtocol.class,
                            TezTaskUmbilicalProtocol.versionID, address, defaultConf);
                }
            });

    // report non-pid to application master
    String pid = System.getenv().get("JVM_PID");
    if (LOG.isDebugEnabled()) {
        LOG.debug("PID, containerId: " + pid + ", " + containerIdentifier);
    }
    TezEngineTaskContext taskContext = null;
    ContainerTask containerTask = null;
    UserGroupInformation childUGI = null;
    TezTaskAttemptID taskAttemptId = null;
    ContainerContext containerContext = new ContainerContext(containerIdentifier, pid);
    int getTaskMaxSleepTime = defaultConf.getInt(TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX,
            TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX_DEFAULT);

    try {
        while (true) {
            // poll for new task
            for (int idle = 0; null == containerTask; ++idle) {
                long sleepTimeMilliSecs = Math.min(idle * 100, getTaskMaxSleepTime);
                LOG.info("Sleeping for " + sleepTimeMilliSecs + "ms before retrying again. Got null now.");
                MILLISECONDS.sleep(sleepTimeMilliSecs);
                containerTask = umbilical.getTask(containerContext);
            }
            LOG.info("TaskInfo: shouldDie: " + containerTask.shouldDie() + (containerTask.shouldDie() == true
                    ? ""
                    : ", taskAttemptId: " + containerTask.getTezEngineTaskContext().getTaskAttemptId()));

            if (containerTask.shouldDie()) {
                return;
            }
            taskContext = (TezEngineTaskContext) containerTask.getTezEngineTaskContext();
            if (LOG.isDebugEnabled()) {
                LOG.debug("New container task context:" + taskContext.toString());
            }
            taskAttemptId = taskContext.getTaskAttemptId();

            final Task t = createAndConfigureTezTask(taskContext, umbilical, credentials, jobToken,
                    attemptNumber);

            final Configuration conf = ((RuntimeTask) t).getConfiguration();

            // TODO Initiate Java VM metrics
            // JvmMetrics.initSingleton(containerId.toString(), job.getSessionId());
            childUGI = UserGroupInformation
                    .createRemoteUser(System.getenv(ApplicationConstants.Environment.USER.toString()));
            // Add tokens to new user so that it may execute its task correctly.
            childUGI.addCredentials(credentials);

            childUGI.doAs(new PrivilegedExceptionAction<Object>() {
                @Override
                public Object run() throws Exception {
                    runTezTask(t, umbilical, conf); // run the task
                    return null;
                }
            });
            FileSystem.closeAllForUGI(childUGI);
            containerTask = null;
        }
    } catch (FSError e) {
        LOG.fatal("FSError from child", e);
        umbilical.fsError(taskAttemptId, e.getMessage());
    } catch (Throwable throwable) {
        LOG.fatal("Error running child : " + StringUtils.stringifyException(throwable));
        if (taskAttemptId != null) {
            Throwable tCause = throwable.getCause();
            String cause = tCause == null ? throwable.getMessage() : StringUtils.stringifyException(tCause);
            umbilical.fatalError(taskAttemptId, cause);
        }
    } finally {
        RPC.stopProxy(umbilical);
        DefaultMetricsSystem.shutdown();
        // Shutting down log4j of the child-vm...
        // This assumes that on return from Task.run()
        // there is no more logging done.
        LogManager.shutdown();
    }
}

From source file:com.amazon.kinesis.streaming.agent.Agent.java

public static void main(String[] args) throws Exception {
    AgentOptions opts = AgentOptions.parse(args);
    String configFile = opts.getConfigFile();
    AgentConfiguration config = tryReadConfigurationFile(Paths.get(opts.getConfigFile()));
    Path logFile = opts.getLogFile() != null ? Paths.get(opts.getLogFile())
            : (config != null ? config.logFile() : null);
    String logLevel = opts.getLogLevel() != null ? opts.getLogLevel()
            : (config != null ? config.logLevel() : null);
    int logMaxBackupFileIndex = (config != null ? config.logMaxBackupIndex() : -1);
    long logMaxFileSize = (config != null ? config.logMaxFileSize() : -1L);
    Logging.initialize(logFile, logLevel, logMaxBackupFileIndex, logMaxFileSize);
    final Logger logger = Logging.getLogger(Agent.class);

    // Install an unhandled exception hook
    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override/*from   ww w.  j a  v a  2 s .co m*/
        public void uncaughtException(Thread t, Throwable e) {
            if (e instanceof OutOfMemoryError) {
                // This prevents the JVM from hanging in case of an OOME
                dontShutdownOnExit = true;
            }
            String msg = "FATAL: Thread " + t.getName() + " threw an unrecoverable error. Aborting application";
            try {
                try { // We don't know if logging is still working
                    logger.error(msg, e);
                } finally {
                    System.err.println(msg);
                    e.printStackTrace();
                }
            } finally {
                System.exit(1);
            }
        }
    });

    try {
        logger.info("Reading configuration from file: {}", configFile);
        if (config == null) {
            config = readConfigurationFile(Paths.get(opts.getConfigFile()));
        }
        // Initialize and start the agent
        AgentContext agentContext = new AgentContext(config);
        if (agentContext.flows().isEmpty()) {
            throw new ConfigurationException("There are no flows configured in configuration file.");
        }
        final Agent agent = new Agent(agentContext);

        // Make sure everything terminates cleanly when process is killed
        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                if (!dontShutdownOnExit && agent.isRunning()) {
                    agent.stopAsync();
                    agent.awaitTerminated();
                }
            }
        });

        agent.startAsync();
        agent.awaitRunning();
        agent.awaitTerminated();
    } catch (Exception e) {
        logger.error("Unhandled error.", e);
        System.err.println("Unhandled error.");
        e.printStackTrace();
        System.exit(1);
    }
}

From source file:org.apache.hadoop.mapred.YarnChild.java

public static void main(String[] args) throws Throwable {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    LOG.debug("Child starting");

    final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
    // Initing with our JobConf allows us to avoid loading confs twice
    Limits.init(job);/*from  w w w .  j  a v a 2s  . c  o  m*/
    UserGroupInformation.setConfiguration(job);

    String host = args[0];
    int port = Integer.parseInt(args[1]);
    final InetSocketAddress address = NetUtils.createSocketAddrForHost(host, port);
    final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
    long jvmIdLong = Long.parseLong(args[3]);
    JVMId jvmId = new JVMId(firstTaskid.getJobID(), firstTaskid.getTaskType() == TaskType.MAP, jvmIdLong);

    CallerContext.setCurrent(new CallerContext.Builder("mr_" + firstTaskid.toString()).build());

    // initialize metrics
    DefaultMetricsSystem.initialize(StringUtils.camelize(firstTaskid.getTaskType().name()) + "Task");

    // Security framework already loaded the tokens into current ugi
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    LOG.info("Executing with tokens:");
    for (Token<?> token : credentials.getAllTokens()) {
        LOG.info(token);
    }

    // Create TaskUmbilicalProtocol as actual task owner.
    UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString());
    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
    SecurityUtil.setTokenService(jt, address);
    taskOwner.addToken(jt);
    final TaskUmbilicalProtocol umbilical = taskOwner
            .doAs(new PrivilegedExceptionAction<TaskUmbilicalProtocol>() {
                @Override
                public TaskUmbilicalProtocol run() throws Exception {
                    return (TaskUmbilicalProtocol) RPC.getProxy(TaskUmbilicalProtocol.class,
                            TaskUmbilicalProtocol.versionID, address, job);
                }
            });

    // report non-pid to application master
    JvmContext context = new JvmContext(jvmId, "-1000");
    LOG.debug("PID: " + System.getenv().get("JVM_PID"));
    Task task = null;
    UserGroupInformation childUGI = null;
    ScheduledExecutorService logSyncer = null;

    try {
        int idleLoopCount = 0;
        JvmTask myTask = null;
        ;
        // poll for new task
        for (int idle = 0; null == myTask; ++idle) {
            long sleepTimeMilliSecs = Math.min(idle * 500, 1500);
            LOG.info("Sleeping for " + sleepTimeMilliSecs + "ms before retrying again. Got null now.");
            MILLISECONDS.sleep(sleepTimeMilliSecs);
            myTask = umbilical.getTask(context);
        }
        if (myTask.shouldDie()) {
            return;
        }

        task = myTask.getTask();
        YarnChild.taskid = task.getTaskID();

        // Create the job-conf and set credentials
        configureTask(job, task, credentials, jt);

        // log the system properties
        String systemPropsToLog = MRApps.getSystemPropertiesToLog(job);
        if (systemPropsToLog != null) {
            LOG.info(systemPropsToLog);
        }

        // Initiate Java VM metrics
        JvmMetrics.initSingleton(jvmId.toString(), job.getSessionId());
        childUGI = UserGroupInformation
                .createRemoteUser(System.getenv(ApplicationConstants.Environment.USER.toString()));
        // Add tokens to new user so that it may execute its task correctly.
        childUGI.addCredentials(credentials);

        // set job classloader if configured before invoking the task
        MRApps.setJobClassLoader(job);

        logSyncer = TaskLog.createLogSyncer();

        // Create a final reference to the task for the doAs block
        final Task taskFinal = task;
        childUGI.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                // use job-specified working directory
                setEncryptedSpillKeyIfRequired(taskFinal);
                FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory());
                taskFinal.run(job, umbilical); // run the task
                return null;
            }
        });
    } catch (FSError e) {
        LOG.fatal("FSError from child", e);
        if (!ShutdownHookManager.get().isShutdownInProgress()) {
            umbilical.fsError(taskid, e.getMessage());
        }
    } catch (Exception exception) {
        LOG.warn("Exception running child : " + StringUtils.stringifyException(exception));
        try {
            if (task != null) {
                // do cleanup for the task
                if (childUGI == null) { // no need to job into doAs block
                    task.taskCleanup(umbilical);
                } else {
                    final Task taskFinal = task;
                    childUGI.doAs(new PrivilegedExceptionAction<Object>() {
                        @Override
                        public Object run() throws Exception {
                            taskFinal.taskCleanup(umbilical);
                            return null;
                        }
                    });
                }
            }
        } catch (Exception e) {
            LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
        }
        // Report back any failures, for diagnostic purposes
        if (taskid != null) {
            if (!ShutdownHookManager.get().isShutdownInProgress()) {
                umbilical.fatalError(taskid, StringUtils.stringifyException(exception));
            }
        }
    } catch (Throwable throwable) {
        LOG.fatal("Error running child : " + StringUtils.stringifyException(throwable));
        if (taskid != null) {
            if (!ShutdownHookManager.get().isShutdownInProgress()) {
                Throwable tCause = throwable.getCause();
                String cause = tCause == null ? throwable.getMessage() : StringUtils.stringifyException(tCause);
                umbilical.fatalError(taskid, cause);
            }
        }
    } finally {
        RPC.stopProxy(umbilical);
        DefaultMetricsSystem.shutdown();
        TaskLog.syncLogsShutdown(logSyncer);
    }
}