Example usage for java.lang Thread setUncaughtExceptionHandler

List of usage examples for java.lang Thread setUncaughtExceptionHandler

Introduction

In this page you can find the example usage for java.lang Thread setUncaughtExceptionHandler.

Prototype

public void setUncaughtExceptionHandler(UncaughtExceptionHandler eh) 

Source Link

Document

Set the handler invoked when this thread abruptly terminates due to an uncaught exception.

Usage

From source file:org.apache.zeppelin.interpreter.InterpreterSetting.java

public void close() {
    LOGGER.info("Close InterpreterSetting: " + name);
    List<Thread> closeThreads = interpreterGroups.values().stream()
            .map(g -> new Thread(g::close, name + "-close"))
            .peek(t -> t
                    .setUncaughtExceptionHandler((th, e) -> LOGGER.error("InterpreterSetting close error", e)))
            .peek(Thread::start).collect(Collectors.toList());
    interpreterGroups.clear();/* ww w.  j  ava 2s . co m*/
    for (Thread t : closeThreads) {
        try {
            t.join();
        } catch (InterruptedException e) {
            LOGGER.error("Can't wait InterpreterSetting close threads", e);
            Thread.currentThread().interrupt();
            break;
        }
    }
}

From source file:org.apache.xml.security.stax.impl.processor.input.AbstractDecryptInputProcessor.java

private XMLSecEvent processEvent(InputProcessorChain inputProcessorChain, boolean isSecurityHeaderEvent)
        throws XMLStreamException, XMLSecurityException {

    if (!tmpXmlEventList.isEmpty()) {
        return tmpXmlEventList.pollLast();
    }//from   w w w. j a  v  a 2s.  c  om

    XMLSecEvent xmlSecEvent = isSecurityHeaderEvent ? inputProcessorChain.processHeaderEvent()
            : inputProcessorChain.processEvent();

    boolean encryptedHeader = false;

    if (xmlSecEvent.getEventType() == XMLStreamConstants.START_ELEMENT) {
        XMLSecStartElement xmlSecStartElement = xmlSecEvent.asStartElement();

        //buffer the events until the EncryptedData Element appears and discard it if we found the reference inside it
        //otherwise replay it
        if (xmlSecStartElement.getName().equals(XMLSecurityConstants.TAG_wsse11_EncryptedHeader)) {
            xmlSecEvent = readAndBufferEncryptedHeader(inputProcessorChain, isSecurityHeaderEvent, xmlSecEvent);
            xmlSecStartElement = xmlSecEvent.asStartElement();
            encryptedHeader = true;
        }

        //check if the current start-element has the name EncryptedData and an Id attribute
        if (xmlSecStartElement.getName().equals(XMLSecurityConstants.TAG_xenc_EncryptedData)) {
            ReferenceType referenceType = null;
            if (references != null) {
                referenceType = matchesReferenceId(xmlSecStartElement);
                if (referenceType == null) {
                    //if the events were not for us (no matching reference-id the we have to replay the EncryptedHeader elements)
                    if (!tmpXmlEventList.isEmpty()) {
                        return tmpXmlEventList.pollLast();
                    }
                    return xmlSecEvent;
                }
                //duplicate id's are forbidden
                if (processedReferences.contains(referenceType)) {
                    throw new XMLSecurityException("signature.Verification.MultipleIDs");
                }

                processedReferences.add(referenceType);
            }
            tmpXmlEventList.clear();

            //the following logic reads the encryptedData structure and doesn't pass them further
            //through the chain
            InputProcessorChain subInputProcessorChain = inputProcessorChain.createSubChain(this);

            EncryptedDataType encryptedDataType = parseEncryptedDataStructure(isSecurityHeaderEvent,
                    xmlSecEvent, subInputProcessorChain);
            if (encryptedDataType.getId() == null) {
                encryptedDataType.setId(IDGenerator.generateID(null));
            }

            InboundSecurityToken inboundSecurityToken = getSecurityToken(inputProcessorChain,
                    xmlSecStartElement, encryptedDataType);
            handleSecurityToken(inboundSecurityToken, inputProcessorChain.getSecurityContext(),
                    encryptedDataType);

            final String algorithmURI = encryptedDataType.getEncryptionMethod().getAlgorithm();
            final int ivLength = JCEAlgorithmMapper.getIVLengthFromURI(algorithmURI) / 8;
            Cipher symCipher = getCipher(algorithmURI);

            if (encryptedDataType.getCipherData().getCipherReference() != null) {
                handleCipherReference(inputProcessorChain, encryptedDataType, symCipher, inboundSecurityToken);
                subInputProcessorChain.reset();
                return isSecurityHeaderEvent ? subInputProcessorChain.processHeaderEvent()
                        : subInputProcessorChain.processEvent();
            }

            //create a new Thread for streaming decryption
            DecryptionThread decryptionThread = new DecryptionThread(subInputProcessorChain,
                    isSecurityHeaderEvent);
            Key decryptionKey = inboundSecurityToken.getSecretKey(algorithmURI, XMLSecurityConstants.Enc,
                    encryptedDataType.getId());
            decryptionKey = XMLSecurityUtils.prepareSecretKey(algorithmURI, decryptionKey.getEncoded());
            decryptionThread.setSecretKey(decryptionKey);
            decryptionThread.setSymmetricCipher(symCipher);
            decryptionThread.setIvLength(ivLength);
            XMLSecStartElement parentXMLSecStartElement = xmlSecStartElement.getParentXMLSecStartElement();
            if (encryptedHeader) {
                parentXMLSecStartElement = parentXMLSecStartElement.getParentXMLSecStartElement();
            }
            AbstractDecryptedEventReaderInputProcessor decryptedEventReaderInputProcessor = newDecryptedEventReaderInputProcessor(
                    encryptedHeader, parentXMLSecStartElement, encryptedDataType, inboundSecurityToken,
                    inputProcessorChain.getSecurityContext());

            //add the new created EventReader processor to the chain.
            inputProcessorChain.addProcessor(decryptedEventReaderInputProcessor);

            inputProcessorChain.getDocumentContext().setIsInEncryptedContent(
                    inputProcessorChain.getProcessors().indexOf(decryptedEventReaderInputProcessor),
                    decryptedEventReaderInputProcessor);

            //fire here only ContentEncryptedElementEvents
            //the other ones will be fired later, because we don't know the encrypted element name yet
            //important: this must occur after setIsInEncryptedContent!
            if (SecurePart.Modifier.Content.getModifier().equals(encryptedDataType.getType())) {
                handleEncryptedContent(inputProcessorChain, xmlSecStartElement.getParentXMLSecStartElement(),
                        inboundSecurityToken, encryptedDataType);
            }

            Thread thread = new Thread(decryptionThread);
            thread.setPriority(Thread.NORM_PRIORITY + 1);
            thread.setName("decryption thread");
            //when an exception in the decryption thread occurs, we want to forward them:
            thread.setUncaughtExceptionHandler(decryptedEventReaderInputProcessor);

            decryptedEventReaderInputProcessor.setDecryptionThread(thread);

            //we have to start the thread before we call decryptionThread.getPipedInputStream().
            //Otherwise we will end in a deadlock, because the StAX reader expects already data.
            //@See some lines below:
            log.debug("Starting decryption thread");
            thread.start();

            InputStream prologInputStream;
            InputStream epilogInputStream;
            try {
                prologInputStream = writeWrapperStartElement(xmlSecStartElement);
                epilogInputStream = writeWrapperEndElement();
            } catch (UnsupportedEncodingException e) {
                throw new XMLSecurityException(e);
            } catch (IOException e) {
                throw new XMLSecurityException(e);
            }

            InputStream decryptInputStream = decryptionThread.getPipedInputStream();
            decryptInputStream = applyTransforms(referenceType, decryptInputStream);

            //spec says (4.2): "The cleartext octet sequence obtained in step 3 is
            //interpreted as UTF-8 encoded character data."
            XMLStreamReader xmlStreamReader = inputProcessorChain.getSecurityContext()
                    .<XMLInputFactory>get(XMLSecurityConstants.XMLINPUTFACTORY).createXMLStreamReader(
                            new MultiInputStream(prologInputStream, decryptInputStream, epilogInputStream),
                            "UTF-8");

            //forward to wrapper element
            forwardToWrapperElement(xmlStreamReader);

            decryptedEventReaderInputProcessor.setXmlStreamReader(xmlStreamReader);

            if (isSecurityHeaderEvent) {
                return decryptedEventReaderInputProcessor.processNextHeaderEvent(inputProcessorChain);
            } else {
                return decryptedEventReaderInputProcessor.processNextEvent(inputProcessorChain);
            }
        }
    }
    return xmlSecEvent;
}

From source file:org.pentaho.di.job.entries.hadoopjobexecutor.JobEntryHadoopJobExecutor.java

public Result execute(final Result result, int arg1) throws KettleException {
    result.setNrErrors(0);/*from  w  ww .j a v  a2  s.  c o m*/

    Log4jFileAppender appender = null;
    String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$

    try {
        appender = LogWriter.createFileAppender(logFileName, true, false);
        LogWriter.getInstance().addAppender(appender);
        log.setLogLevel(parentJob.getLogLevel());
    } catch (Exception e) {
        logError(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.FailedToOpenLogFile", logFileName, //$NON-NLS-1$
                e.toString()));
        logError(Const.getStackTracker(e));
    }

    try {
        URL resolvedJarUrl = resolveJarUrl(jarUrl);
        if (log.isDetailed()) {
            logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.ResolvedJar",
                    resolvedJarUrl.toExternalForm()));
        }
        HadoopShim shim = getHadoopConfiguration().getHadoopShim();

        if (isSimple) {
            String simpleLoggingIntervalS = environmentSubstitute(getSimpleLoggingInterval());
            int simpleLogInt = 60;
            try {
                simpleLogInt = Integer.parseInt(simpleLoggingIntervalS, 10);
            } catch (NumberFormatException e) {
                logError(BaseMessages.getString(PKG, "ErrorParsingLogInterval", simpleLoggingIntervalS,
                        simpleLogInt));
            }

            final Class<?> mainClass = locateDriverClass(resolvedJarUrl, shim);

            if (log.isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.UsingDriverClass",
                        mainClass == null ? "null" : mainClass.getName()));
                logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.SimpleMode"));
            }
            final AtomicInteger threads = new AtomicInteger(1);
            final NoExitSecurityManager nesm = new NoExitSecurityManager(System.getSecurityManager());
            smStack.setSecurityManager(nesm);
            try {
                Runnable r = new Runnable() {
                    public void run() {
                        try {
                            try {
                                executeMainMethod(mainClass);
                            } finally {
                                restoreSecurityManager(threads, nesm);
                            }
                        } catch (NoExitSecurityManager.NoExitSecurityException ex) {
                            // Only log if we're blocking and waiting for this to complete
                            if (simpleBlocking) {
                                logExitStatus(result, mainClass, ex);
                            }
                        } catch (InvocationTargetException ex) {
                            if (ex.getTargetException() instanceof NoExitSecurityManager.NoExitSecurityException) {
                                // Only log if we're blocking and waiting for this to complete
                                if (simpleBlocking) {
                                    logExitStatus(result, mainClass,
                                            (NoExitSecurityManager.NoExitSecurityException) ex
                                                    .getTargetException());
                                }
                            } else {
                                throw new RuntimeException(ex);
                            }
                        } catch (Exception ex) {
                            throw new RuntimeException(ex);
                        }
                    }
                };
                Thread t = new Thread(r);
                t.setDaemon(true);
                t.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
                    @Override
                    public void uncaughtException(Thread t, Throwable e) {
                        restoreSecurityManager(threads, nesm);
                        if (simpleBlocking) {
                            // Only log if we're blocking and waiting for this to complete
                            logError(BaseMessages.getString(JobEntryHadoopJobExecutor.class,
                                    "JobEntryHadoopJobExecutor.ErrorExecutingClass", mainClass.getName()), e);
                            result.setResult(false);
                        }
                    }
                });
                nesm.addBlockedThread(t);
                t.start();
                if (simpleBlocking) {
                    // wait until the thread is done
                    do {
                        logDetailed(BaseMessages.getString(JobEntryHadoopJobExecutor.class,
                                "JobEntryHadoopJobExecutor.Blocking", mainClass.getName()));
                        t.join(simpleLogInt * 1000);
                    } while (!parentJob.isStopped() && t.isAlive());
                    if (t.isAlive()) {
                        // Kill thread if it's still running. The job must have been stopped.
                        t.interrupt();
                    }
                }
            } finally {
                // If we're not performing simple blocking spawn a watchdog thread to restore the security manager when all
                // threads are complete
                if (!simpleBlocking) {
                    Runnable threadWatchdog = new Runnable() {
                        @Override
                        public void run() {
                            while (threads.get() > 0) {
                                try {
                                    Thread.sleep(100);
                                } catch (InterruptedException e) {
                                    /* ignore */
                                }
                            }
                            restoreSecurityManager(threads, nesm);
                        }
                    };
                    Thread watchdog = new Thread(threadWatchdog);
                    watchdog.setDaemon(true);
                    watchdog.start();
                }
            }
        } else {
            if (log.isDetailed()) {
                logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.AdvancedMode"));
            }
            Configuration conf = shim.createConfiguration();
            FileSystem fs = shim.getFileSystem(conf);
            URL[] urls = new URL[] { resolvedJarUrl };
            URLClassLoader loader = new URLClassLoader(urls, shim.getClass().getClassLoader());
            String hadoopJobNameS = environmentSubstitute(hadoopJobName);
            conf.setJobName(hadoopJobNameS);

            String outputKeyClassS = environmentSubstitute(outputKeyClass);
            conf.setOutputKeyClass(loader.loadClass(outputKeyClassS));
            String outputValueClassS = environmentSubstitute(outputValueClass);
            conf.setOutputValueClass(loader.loadClass(outputValueClassS));

            if (mapperClass != null) {
                String mapperClassS = environmentSubstitute(mapperClass);
                Class<?> mapper = loader.loadClass(mapperClassS);
                conf.setMapperClass(mapper);
            }
            if (combinerClass != null) {
                String combinerClassS = environmentSubstitute(combinerClass);
                Class<?> combiner = loader.loadClass(combinerClassS);
                conf.setCombinerClass(combiner);
            }
            if (reducerClass != null) {
                String reducerClassS = environmentSubstitute(reducerClass);
                Class<?> reducer = loader.loadClass(reducerClassS);
                conf.setReducerClass(reducer);
            }

            if (inputFormatClass != null) {
                String inputFormatClassS = environmentSubstitute(inputFormatClass);
                Class<?> inputFormat = loader.loadClass(inputFormatClassS);
                conf.setInputFormat(inputFormat);
            }
            if (outputFormatClass != null) {
                String outputFormatClassS = environmentSubstitute(outputFormatClass);
                Class<?> outputFormat = loader.loadClass(outputFormatClassS);
                conf.setOutputFormat(outputFormat);
            }

            String hdfsHostnameS = environmentSubstitute(hdfsHostname);
            String hdfsPortS = environmentSubstitute(hdfsPort);
            String jobTrackerHostnameS = environmentSubstitute(jobTrackerHostname);
            String jobTrackerPortS = environmentSubstitute(jobTrackerPort);

            List<String> configMessages = new ArrayList<String>();
            shim.configureConnectionInformation(hdfsHostnameS, hdfsPortS, jobTrackerHostnameS, jobTrackerPortS,
                    conf, configMessages);
            for (String m : configMessages) {
                logBasic(m);
            }

            String inputPathS = environmentSubstitute(inputPath);
            String[] inputPathParts = inputPathS.split(",");
            List<Path> paths = new ArrayList<Path>();
            for (String path : inputPathParts) {
                paths.add(fs.asPath(conf.getDefaultFileSystemURL(), path));
            }
            Path[] finalPaths = paths.toArray(new Path[paths.size()]);

            conf.setInputPaths(finalPaths);
            String outputPathS = environmentSubstitute(outputPath);
            conf.setOutputPath(fs.asPath(conf.getDefaultFileSystemURL(), outputPathS));

            // process user defined values
            for (UserDefinedItem item : userDefined) {
                if (item.getName() != null && !"".equals(item.getName()) && item.getValue() != null
                        && !"".equals(item.getValue())) {
                    String nameS = environmentSubstitute(item.getName());
                    String valueS = environmentSubstitute(item.getValue());
                    conf.set(nameS, valueS);
                }
            }

            conf.setJar(environmentSubstitute(jarUrl));

            String numMapTasksS = environmentSubstitute(numMapTasks);
            String numReduceTasksS = environmentSubstitute(numReduceTasks);
            int numM = 1;
            try {
                numM = Integer.parseInt(numMapTasksS);
            } catch (NumberFormatException e) {
                logError("Can't parse number of map tasks '" + numMapTasksS + "'. Setting num"
                        + "map tasks to 1");
            }
            int numR = 1;
            try {
                numR = Integer.parseInt(numReduceTasksS);
            } catch (NumberFormatException e) {
                logError("Can't parse number of reduce tasks '" + numReduceTasksS + "'. Setting num"
                        + "reduce tasks to 1");
            }

            conf.setNumMapTasks(numM);
            conf.setNumReduceTasks(numR);

            RunningJob runningJob = shim.submitJob(conf);

            String loggingIntervalS = environmentSubstitute(getLoggingInterval());
            int logIntv = 60;
            try {
                logIntv = Integer.parseInt(loggingIntervalS);
            } catch (NumberFormatException e) {
                logError(BaseMessages.getString(PKG, "ErrorParsingLogInterval", loggingIntervalS, logIntv));
            }
            if (blocking) {
                try {
                    int taskCompletionEventIndex = 0;
                    while (!parentJob.isStopped() && !runningJob.isComplete()) {
                        if (logIntv >= 1) {
                            printJobStatus(runningJob);
                            taskCompletionEventIndex = logTaskMessages(runningJob, taskCompletionEventIndex);
                            Thread.sleep(logIntv * 1000);
                        } else {
                            Thread.sleep(60000);
                        }
                    }

                    if (parentJob.isStopped() && !runningJob.isComplete()) {
                        // We must stop the job running on Hadoop
                        runningJob.killJob();
                        // Indicate this job entry did not complete
                        result.setResult(false);
                    }

                    printJobStatus(runningJob);
                    // Log any messages we may have missed while polling
                    logTaskMessages(runningJob, taskCompletionEventIndex);
                } catch (InterruptedException ie) {
                    logError(ie.getMessage(), ie);
                }

                // Entry is successful if the MR job is successful overall
                result.setResult(runningJob.isSuccessful());
            }

        }
    } catch (Throwable t) {
        t.printStackTrace();
        result.setStopped(true);
        result.setNrErrors(1);
        result.setResult(false);
        logError(t.getMessage(), t);
    }

    if (appender != null) {
        LogWriter.getInstance().removeAppender(appender);
        appender.close();

        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(),
                parentJob.getJobname(), getName());
        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
    }

    return result;
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.java

@Override
public PigStats launchPig(PhysicalPlan php, String grpName, PigContext pc)
        throws PlanException, VisitorException, IOException, ExecException, JobCreationException, Exception {
    long sleepTime = 500;
    aggregateWarning = Boolean.valueOf(pc.getProperties().getProperty("aggregate.warning"));
    MROperPlan mrp = compile(php, pc);//www.  j a va 2 s  .c o  m

    ConfigurationValidator.validatePigProperties(pc.getProperties());
    Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());

    MRExecutionEngine exe = (MRExecutionEngine) pc.getExecutionEngine();
    Properties defaultProperties = new Properties();
    JobConf defaultJobConf = exe.getLocalConf();
    Utils.recomputeProperties(defaultJobConf, defaultProperties);

    // This is a generic JobClient for checking progress of the jobs
    JobClient statsJobClient = new JobClient(exe.getJobConf());

    JobControlCompiler jcc = new JobControlCompiler(pc, conf,
            ConfigurationUtil.toConfiguration(defaultProperties));

    MRScriptState.get().addWorkflowAdjacenciesToConf(mrp, conf);

    // start collecting statistics
    PigStats.start(pc.getExecutionEngine().instantiatePigStats());
    MRPigStatsUtil.startCollection(pc, statsJobClient, jcc, mrp);

    // Find all the intermediate data stores. The plan will be destroyed during compile/execution
    // so this needs to be done before.
    MRIntermediateDataVisitor intermediateVisitor = new MRIntermediateDataVisitor(mrp);
    intermediateVisitor.visit();

    List<Job> failedJobs = new LinkedList<Job>();
    List<NativeMapReduceOper> failedNativeMR = new LinkedList<NativeMapReduceOper>();
    List<Job> completeFailedJobsInThisRun = new LinkedList<Job>();
    List<Job> succJobs = new LinkedList<Job>();
    int totalMRJobs = mrp.size();
    int numMRJobsCompl = 0;
    double lastProg = -1;
    long scriptSubmittedTimestamp = System.currentTimeMillis();

    //create the exception handler for the job control thread
    //and register the handler with the job control thread
    JobControlThreadExceptionHandler jctExceptionHandler = new JobControlThreadExceptionHandler();

    boolean stop_on_failure = Boolean.valueOf(pc.getProperties().getProperty("stop.on.failure", "false"));

    // jc is null only when mrp.size == 0
    while (mrp.size() != 0) {
        jc = jcc.compile(mrp, grpName);
        if (jc == null) {
            List<MapReduceOper> roots = new LinkedList<MapReduceOper>();
            roots.addAll(mrp.getRoots());

            // run the native mapreduce roots first then run the rest of the roots
            for (MapReduceOper mro : roots) {
                if (mro instanceof NativeMapReduceOper) {
                    NativeMapReduceOper natOp = (NativeMapReduceOper) mro;
                    try {
                        MRScriptState.get().emitJobsSubmittedNotification(1);
                        natOp.runJob();
                        numMRJobsCompl++;
                    } catch (IOException e) {

                        mrp.trimBelow(natOp);
                        failedNativeMR.add(natOp);

                        String msg = "Error running native mapreduce" + " operator job :" + natOp.getJobId()
                                + e.getMessage();

                        String stackTrace = Utils.getStackStraceStr(e);
                        LogUtils.writeLog(msg, stackTrace, pc.getProperties().getProperty("pig.logfile"), log);
                        log.info(msg);

                        if (stop_on_failure) {
                            int errCode = 6017;

                            throw new ExecException(msg, errCode, PigException.REMOTE_ENVIRONMENT);
                        }

                    }
                    double prog = ((double) numMRJobsCompl) / totalMRJobs;
                    notifyProgress(prog, lastProg);
                    lastProg = prog;
                    mrp.remove(natOp);
                }
            }
            continue;
        }
        // Initially, all jobs are in wait state.
        List<Job> jobsWithoutIds = jc.getWaitingJobs();
        log.info(jobsWithoutIds.size() + " map-reduce job(s) waiting for submission.");
        //notify listeners about jobs submitted
        MRScriptState.get().emitJobsSubmittedNotification(jobsWithoutIds.size());

        // update Pig stats' job DAG with just compiled jobs
        MRPigStatsUtil.updateJobMroMap(jcc.getJobMroMap());

        // determine job tracker url
        String jobTrackerLoc;
        JobConf jobConf = jobsWithoutIds.get(0).getJobConf();
        try {
            String port = jobConf.get(MRConfiguration.JOB_TRACKER_HTTP_ADDRESS);
            String jobTrackerAdd = jobConf.get(MRConfiguration.JOB_TRACKER);

            jobTrackerLoc = jobTrackerAdd.substring(0, jobTrackerAdd.indexOf(":"))
                    + port.substring(port.indexOf(":"));
        } catch (Exception e) {
            // Could not get the job tracker location, most probably we are running in local mode.
            // If it is the case, we don't print out job tracker location,
            // because it is meaningless for local mode.
            jobTrackerLoc = null;
            log.debug("Failed to get job tracker location.");
        }

        completeFailedJobsInThisRun.clear();

        // Set the thread UDFContext so registered classes are available.
        final UDFContext udfContext = UDFContext.getUDFContext();
        Thread jcThread = new Thread(jc, "JobControl") {
            @Override
            public void run() {
                UDFContext.setUdfContext(udfContext.clone()); //PIG-2576
                super.run();
            }
        };

        jcThread.setUncaughtExceptionHandler(jctExceptionHandler);

        jcThread.setContextClassLoader(PigContext.getClassLoader());

        // mark the times that the jobs were submitted so it's reflected in job history props
        for (Job job : jc.getWaitingJobs()) {
            JobConf jobConfCopy = job.getJobConf();
            jobConfCopy.set("pig.script.submitted.timestamp", Long.toString(scriptSubmittedTimestamp));
            jobConfCopy.set("pig.job.submitted.timestamp", Long.toString(System.currentTimeMillis()));
            job.setJobConf(jobConfCopy);
        }

        //All the setup done, now lets launch the jobs.
        jcThread.start();

        try {
            // a flag whether to warn failure during the loop below, so users can notice failure earlier.
            boolean warn_failure = true;

            // Now wait, till we are finished.
            while (!jc.allFinished()) {

                try {
                    jcThread.join(sleepTime);
                } catch (InterruptedException e) {
                }

                List<Job> jobsAssignedIdInThisRun = new ArrayList<Job>();

                for (Job job : jobsWithoutIds) {
                    if (job.getAssignedJobID() != null) {

                        jobsAssignedIdInThisRun.add(job);
                        log.info("HadoopJobId: " + job.getAssignedJobID());

                        // display the aliases being processed
                        MapReduceOper mro = jcc.getJobMroMap().get(job);
                        if (mro != null) {
                            String alias = MRScriptState.get().getAlias(mro);
                            log.info("Processing aliases " + alias);
                            String aliasLocation = MRScriptState.get().getAliasLocation(mro);
                            log.info("detailed locations: " + aliasLocation);
                        }

                        if (!HadoopShims.isHadoopYARN() && jobTrackerLoc != null) {
                            log.info("More information at: http://" + jobTrackerLoc + "/jobdetails.jsp?jobid="
                                    + job.getAssignedJobID());
                        }

                        // update statistics for this job so jobId is set
                        MRPigStatsUtil.addJobStats(job);
                        MRScriptState.get().emitJobStartedNotification(job.getAssignedJobID().toString());
                    } else {
                        // This job is not assigned an id yet.
                    }
                }
                jobsWithoutIds.removeAll(jobsAssignedIdInThisRun);

                double prog = (numMRJobsCompl + calculateProgress(jc)) / totalMRJobs;
                if (notifyProgress(prog, lastProg)) {
                    List<Job> runnJobs = jc.getRunningJobs();
                    if (runnJobs != null) {
                        StringBuilder msg = new StringBuilder();
                        for (Object object : runnJobs) {
                            Job j = (Job) object;
                            if (j != null) {
                                msg.append(j.getAssignedJobID()).append(",");
                            }
                        }
                        if (msg.length() > 0) {
                            msg.setCharAt(msg.length() - 1, ']');
                            log.info("Running jobs are [" + msg);
                        }
                    }
                    lastProg = prog;
                }

                // collect job stats by frequently polling of completed jobs (PIG-1829)
                MRPigStatsUtil.accumulateStats(jc);

                // if stop_on_failure is enabled, we need to stop immediately when any job has failed
                checkStopOnFailure(stop_on_failure);
                // otherwise, we just display a warning message if there's any failure
                if (warn_failure && !jc.getFailedJobs().isEmpty()) {
                    // we don't warn again for this group of jobs
                    warn_failure = false;
                    log.warn("Ooops! Some job has failed! Specify -stop_on_failure if you "
                            + "want Pig to stop immediately on failure.");
                }
            }

            //check for the jobControlException first
            //if the job controller fails before launching the jobs then there are
            //no jobs to check for failure
            if (jobControlException != null) {
                if (jobControlException instanceof PigException) {
                    if (jobControlExceptionStackTrace != null) {
                        LogUtils.writeLog("Error message from job controller", jobControlExceptionStackTrace,
                                pc.getProperties().getProperty("pig.logfile"), log);
                    }
                    throw jobControlException;
                } else {
                    int errCode = 2117;
                    String msg = "Unexpected error when launching map reduce job.";
                    throw new ExecException(msg, errCode, PigException.BUG, jobControlException);
                }
            }

            if (!jc.getFailedJobs().isEmpty()) {
                // stop if stop_on_failure is enabled
                checkStopOnFailure(stop_on_failure);

                // If we only have one store and that job fail, then we sure
                // that the job completely fail, and we shall stop dependent jobs
                for (Job job : jc.getFailedJobs()) {
                    completeFailedJobsInThisRun.add(job);
                    log.info("job " + job.getAssignedJobID() + " has failed! Stop running all dependent jobs");
                }
                failedJobs.addAll(jc.getFailedJobs());
            }

            int removedMROp = jcc.updateMROpPlan(completeFailedJobsInThisRun);

            numMRJobsCompl += removedMROp;

            List<Job> jobs = jc.getSuccessfulJobs();
            jcc.moveResults(jobs);
            succJobs.addAll(jobs);

            // collecting final statistics
            MRPigStatsUtil.accumulateStats(jc);

        } catch (Exception e) {
            throw e;
        } finally {
            jc.stop();
        }
    }

    MRScriptState.get().emitProgressUpdatedNotification(100);

    log.info("100% complete");

    boolean failed = false;

    if (failedNativeMR.size() > 0) {
        failed = true;
    }

    if (Boolean.valueOf(pc.getProperties().getProperty(PigConfiguration.PIG_DELETE_TEMP_FILE, "true"))) {
        // Clean up all the intermediate data
        for (String path : intermediateVisitor.getIntermediate()) {
            // Skip non-file system paths such as hbase, see PIG-3617
            if (HadoopShims.hasFileSystemImpl(new Path(path), conf)) {
                FileLocalizer.delete(path, pc);
            }
        }
    }

    // Look to see if any jobs failed.  If so, we need to report that.
    if (failedJobs != null && failedJobs.size() > 0) {

        Exception backendException = null;
        for (Job fj : failedJobs) {
            try {
                getStats(fj, true, pc);
            } catch (Exception e) {
                backendException = e;
            }
            List<POStore> sts = jcc.getStores(fj);
            for (POStore st : sts) {
                failureMap.put(st.getSFile(), backendException);
            }
            MRPigStatsUtil.setBackendException(fj, backendException);
        }
        failed = true;
    }

    // stats collection is done, log the results
    MRPigStatsUtil.stopCollection(true);

    // PigStatsUtil.stopCollection also computes the return code based on
    // total jobs to run, jobs successful and jobs failed
    failed = failed || !PigStats.get().isSuccessful();

    Map<Enum, Long> warningAggMap = new HashMap<Enum, Long>();

    if (succJobs != null) {
        for (Job job : succJobs) {
            List<POStore> sts = jcc.getStores(job);
            for (POStore st : sts) {
                if (Utils.isLocal(pc, job.getJobConf())) {
                    HadoopShims.storeSchemaForLocal(job, st);
                }

                if (!st.isTmpStore()) {
                    // create an "_SUCCESS" file in output location if
                    // output location is a filesystem dir
                    createSuccessFile(job, st);
                } else {
                    log.debug("Successfully stored result in: \"" + st.getSFile().getFileName() + "\"");
                }
            }

            getStats(job, false, pc);
            if (aggregateWarning) {
                computeWarningAggregate(job, warningAggMap);
            }
        }

    }

    if (aggregateWarning) {
        CompilationMessageCollector.logAggregate(warningAggMap, MessageType.Warning, log);
    }

    if (!failed) {
        log.info("Success!");
    } else {
        if (succJobs != null && succJobs.size() > 0) {
            log.info("Some jobs have failed! Stop running all dependent jobs");
        } else {
            log.info("Failed!");
        }
    }
    jcc.reset();

    int ret = failed
            ? ((succJobs != null && succJobs.size() > 0) ? ReturnCode.PARTIAL_FAILURE : ReturnCode.FAILURE)
            : ReturnCode.SUCCESS;

    PigStats pigStats = PigStatsUtil.getPigStats(ret);
    // run cleanup for all of the stores
    for (OutputStats output : pigStats.getOutputStats()) {
        POStore store = output.getPOStore();
        try {
            if (!output.isSuccessful()) {
                store.getStoreFunc().cleanupOnFailure(store.getSFile().getFileName(),
                        new org.apache.hadoop.mapreduce.Job(output.getConf()));
            } else {
                store.getStoreFunc().cleanupOnSuccess(store.getSFile().getFileName(),
                        new org.apache.hadoop.mapreduce.Job(output.getConf()));
            }
        } catch (IOException e) {
            throw new ExecException(e);
        } catch (AbstractMethodError nsme) {
            // Just swallow it.  This means we're running against an
            // older instance of a StoreFunc that doesn't implement
            // this method.
        }
    }
    return pigStats;
}

From source file:org.jshybugger.server.DebugServer.java

/**
 * Instantiates a new debug server.//from ww  w.j ava 2  s  . c  o m
 * @param debugPort the tcp listen port number
 * @param context application context
 * @param productName product identifier
 * @param application the application context
 * @throws IOException 
 */
public DebugServer(final int debugPort) throws IOException {

    Thread webServerThread = new Thread(new Runnable() {

        @Override
        public void run() {

            debugSessionsHandler = new DebugSessionsWebSocketHandler(DebugServer.this);
            webServer = WebServers.createWebServer(debugPort).add("/", getRootHandler())
                    .add("/json/version", getVersionHandler()).add("/json", getJsonHandler())
                    .add("/devtools/page/.*", debugSessionsHandler);

            webServer.connectionExceptionHandler(new Thread.UncaughtExceptionHandler() {
                @Override
                public void uncaughtException(Thread t, Throwable e) {
                    Log.e(TAG, "Debug server terminated unexpected", e);
                }
            });

            // increase content length: default 65k length is sometimes to less
            ((NettyWebServer) webServer).maxContentLength(131072);

            Log.i(TAG, "starting debug server on port: " + debugPort);
            webServer.start();

            debugServerStarted.countDown();
        }

    });

    webServerThread.start();
    webServerThread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override
        public void uncaughtException(Thread t, Throwable e) {
            Log.e(TAG, "Bootstraping debug server terminated unexpected", e);
        }
    });
}

From source file:edu.brown.benchmark.auctionmark.AuctionMarkLoader.java

/**
 * Call by the benchmark framework to load the table data
 *///from  w ww  . j a  va  2 s  . co m
@Override
public void runLoop() {
    final EventObservableExceptionHandler handler = new EventObservableExceptionHandler();
    final List<Thread> threads = new ArrayList<Thread>();
    for (AbstractTableGenerator generator : this.generators.values()) {
        // if (isSubGenerator(generator)) continue;
        Thread t = new Thread(generator);
        t.setUncaughtExceptionHandler(handler);
        threads.add(t);
        // Make sure we call init first before starting any thread
        generator.init();
    } // FOR
    assert (threads.size() > 0);
    handler.addObserver(new EventObserver<Pair<Thread, Throwable>>() {
        @Override
        public void update(EventObservable<Pair<Thread, Throwable>> o, Pair<Thread, Throwable> t) {
            for (Thread thread : threads)
                thread.interrupt();
        }
    });

    // Construct a new thread to load each table
    // Fire off the threads and wait for them to complete
    // If debug is set to true, then we'll execute them serially
    try {
        for (Thread t : threads) {
            t.start();
        } // FOR
        for (Thread t : threads) {
            t.join();
        } // FOR
    } catch (InterruptedException e) {
        LOG.fatal("Unexpected error", e);
    } finally {
        if (handler.hasError()) {
            throw new RuntimeException("Error while generating table data.", handler.getError());
        }
    }

    profile.saveProfile(this);
    LOG.info("Finished generating data for all tables");
    if (debug.val)
        LOG.debug("Table Sizes:\n" + this.getTableTupleCounts());
}

From source file:com.oltpbenchmark.benchmarks.auctionmark.AuctionMarkLoader.java

@Override
public void load() {
    if (LOG.isDebugEnabled())
        LOG.debug(String.format("Starting loader [scaleFactor=%.2f]", profile.getScaleFactor()));

    final EventObservableExceptionHandler handler = new EventObservableExceptionHandler();
    final List<Thread> threads = new ArrayList<Thread>();
    for (AbstractTableGenerator generator : this.generators.values()) {
        // if (isSubGenerator(generator)) continue;
        Thread t = new Thread(generator);
        t.setName(generator.getTableName());
        t.setUncaughtExceptionHandler(handler);

        // Call init() before we start!
        // This will setup non-data related dependencies
        generator.init();//from  w  w w .  j  av a 2 s .  co  m

        threads.add(t);
    } // FOR
    assert (threads.size() > 0);
    handler.addObserver(new EventObserver<Pair<Thread, Throwable>>() {
        @Override
        public void update(EventObservable<Pair<Thread, Throwable>> o, Pair<Thread, Throwable> t) {
            fail = true;
            for (Thread thread : threads)
                thread.interrupt();
            t.second.printStackTrace();
        }
    });

    // Construct a new thread to load each table
    // Fire off the threads and wait for them to complete
    // If debug is set to true, then we'll execute them serially
    try {
        for (Thread t : threads) {
            t.start();
        } // FOR
        for (Thread t : threads) {
            t.join();
        } // FOR
    } catch (InterruptedException e) {
        LOG.fatal("Unexpected error", e);
    } finally {
        if (handler.hasError()) {
            throw new RuntimeException("Error while generating table data.", handler.getError());
        }
    }

    // Save the benchmark profile out to disk so that we can send it
    // to all of the clients
    try {
        profile.saveProfile(this.conn);
    } catch (SQLException ex) {
        throw new RuntimeException("Failed to save profile information in database", ex);
    }
    LOG.info("Finished generating data for all tables");
}

From source file:org.apache.samza.storage.ContainerStorageManager.java

private void startSideInputs() {

    LOG.info("SideInput Restore started");

    // initialize the sideInputStorageManagers
    getSideInputStorageManagers().forEach(sideInputStorageManager -> sideInputStorageManager.init());

    // start the checkpointing thread at the commit-ms frequency
    sideInputsFlushFuture = sideInputsFlushExecutor.scheduleWithFixedDelay(new Runnable() {
        @Override/*w  w w.j ava 2s .  c  o  m*/
        public void run() {
            try {
                getSideInputStorageManagers()
                        .forEach(sideInputStorageManager -> sideInputStorageManager.flush());
            } catch (Exception e) {
                LOG.error("Exception during flushing side inputs", e);
                sideInputException = Optional.of(e);
            }
        }
    }, 0, new TaskConfig(config).getCommitMs(), TimeUnit.MILLISECONDS);

    // set the latch to the number of sideInput SSPs
    this.sideInputsCaughtUp = new CountDownLatch(this.sideInputStorageManagers.keySet().size());

    // register all side input SSPs with the consumers
    for (SystemStreamPartition ssp : sideInputStorageManagers.keySet()) {
        String startingOffset = sideInputStorageManagers.get(ssp).getStartingOffset(ssp);

        if (startingOffset == null) {
            throw new SamzaException("No offset defined for SideInput SystemStreamPartition : " + ssp);
        }

        // register startingOffset with the sysConsumer and register a metric for it
        sideInputSystemConsumers.register(ssp, startingOffset, null);
        taskInstanceMetrics.get(sideInputStorageManagers.get(ssp).getTaskName()).addOffsetGauge(ssp,
                ScalaJavaUtil
                        .toScalaFunction(() -> sideInputStorageManagers.get(ssp).getLastProcessedOffset(ssp)));

        SystemStreamMetadata systemStreamMetadata = streamMetadataCache
                .getSystemStreamMetadata(ssp.getSystemStream(), false);
        SystemStreamMetadata.SystemStreamPartitionMetadata sspMetadata = (systemStreamMetadata == null) ? null
                : systemStreamMetadata.getSystemStreamPartitionMetadata().get(ssp.getPartition());

        // record a copy of the sspMetadata, to later check if its caught up
        initialSideInputSSPMetadata.put(ssp, sspMetadata);

        // check if the ssp is caught to upcoming, even at start
        checkSideInputCaughtUp(ssp, startingOffset, SystemStreamMetadata.OffsetType.UPCOMING, false);
    }

    // start the systemConsumers for consuming input
    this.sideInputSystemConsumers.start();

    // create a thread for sideInput reads
    Thread readSideInputs = new Thread(() -> {
        while (!shutDownSideInputRead) {
            IncomingMessageEnvelope envelope = sideInputSystemConsumers.choose(true);
            if (envelope != null) {

                if (!envelope.isEndOfStream())
                    sideInputStorageManagers.get(envelope.getSystemStreamPartition()).process(envelope);

                checkSideInputCaughtUp(envelope.getSystemStreamPartition(), envelope.getOffset(),
                        SystemStreamMetadata.OffsetType.NEWEST, envelope.isEndOfStream());

            } else {
                LOG.trace("No incoming message was available");
            }
        }
    });

    readSideInputs.setDaemon(true);
    readSideInputs.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        @Override
        public void uncaughtException(Thread t, Throwable e) {
            sideInputException = Optional.of(e);
            sideInputsCaughtUp.countDown();
        }
    });

    try {
        readSideInputs.start();
        // Make the main thread wait until all sideInputs have been caughtup or thrown an exception
        this.sideInputsCaughtUp.await();

        if (sideInputException.isPresent()) { // Throw exception if there was an exception in catching-up sideInputs
            // TODO: SAMZA-2113 relay exception to main thread
            throw new SamzaException("Exception in restoring side inputs", sideInputException.get());
        }
    } catch (InterruptedException e) {
        sideInputException = Optional.of(e);
        throw new SamzaException("Side inputs read was interrupted", e);
    }

    LOG.info("SideInput Restore complete");
}

From source file:org.cesecore.certificates.ca.internal.CaCertificateCacheTest.java

@Test
public void test02loadCertificates() throws Exception {
    Collection<Certificate> certs = new ArrayList<Certificate>();
    X509Certificate testrootcert = CertTools.getCertfromByteArray(testroot, X509Certificate.class);
    certs.add(testrootcert);//from  www.  j a  v  a  2 s  .  co  m
    X509Certificate testrootnewcert = CertTools.getCertfromByteArray(testrootnew, X509Certificate.class);
    certs.add(testrootnewcert);
    X509Certificate testsubcert = CertTools.getCertfromByteArray(testsub, X509Certificate.class);
    certs.add(testsubcert);
    Certificate testcvccert = CertTools.getCertfromByteArray(testcvc, Certificate.class);
    certs.add(testcvccert);
    X509Certificate testscepcert = CertTools.getCertfromByteArray(testscepca, X509Certificate.class);
    certs.add(testscepcert);
    CaCertificateCache cache = CaCertificateCache.INSTANCE;
    cache.loadCertificates(certs);

    Thread no1 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testscepcert)), "no1"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no2 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testrootcert)), "no2"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no3 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testrootnewcert)), "no3"); // NOPMD we want to use thread here, it's not a JEE app
    // No4 uses CV Certificates, and it will never return anything from the cache because this cache (OCSP) only handles X.509 certificates
    Thread no4 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testcvccert)), "no4"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no5 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testscepcert)), "no5"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no11 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testscepcert)), "no1"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no22 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testrootcert)), "no2"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no33 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testrootnewcert)), "no3"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no44 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testcvccert)), "no4"); // NOPMD we want to use thread here, it's not a JEE app
    Thread no55 = new Thread(new CacheTester(cache, CertTools.getSubjectDN(testscepcert)), "no5"); // NOPMD we want to use thread here, it's not a JEE app
    CacheExceptionHandler handler = new CacheExceptionHandler();
    no1.setUncaughtExceptionHandler(handler);
    no2.setUncaughtExceptionHandler(handler);
    no3.setUncaughtExceptionHandler(handler);
    no4.setUncaughtExceptionHandler(handler);
    no5.setUncaughtExceptionHandler(handler);
    no11.setUncaughtExceptionHandler(handler);
    no22.setUncaughtExceptionHandler(handler);
    no33.setUncaughtExceptionHandler(handler);
    no44.setUncaughtExceptionHandler(handler);
    no55.setUncaughtExceptionHandler(handler);
    long start = new Date().getTime();
    no1.start();
    log.info("Started no1");
    no2.start();
    log.info("Started no2");
    no3.start();
    log.info("Started no3");
    no4.start();
    log.info("Started no4");
    no5.start();
    log.info("Started no5");
    no11.start();
    log.info("Started no11");
    no22.start();
    log.info("Started no22");
    no33.start();
    log.info("Started no33");
    no44.start();
    log.info("Started no44");
    no55.start();
    log.info("Started no55");
    no1.join();
    no2.join();
    no3.join();
    no4.join();
    no5.join();
    no11.join();
    no22.join();
    no33.join();
    no44.join();
    no55.join();
    long end = new Date().getTime();
    log.info("Time consumed: " + (end - start));
    assertNull(threadException != null ? threadException.getMessage() : "null", threadException);
}

From source file:edu.brown.hstore.HStoreSite.java

/**
 * Start the MapReduceHelper Thread/*from  w w w . j a v  a  2s. co m*/
 */
private void startMapReduceHelper() {
    synchronized (this.mr_helper) {
        if (this.mr_helper_started)
            return;
        if (debug.val)
            LOG.debug("Starting " + this.mr_helper.getClass().getSimpleName());

        Thread t = new Thread(this.mr_helper);
        t.setDaemon(true);
        t.setUncaughtExceptionHandler(this.exceptionHandler);
        t.start();
        this.mr_helper_started = true;
    } // SYNCH
}