List of usage examples for org.apache.commons.logging Log error
void error(Object message, Throwable t);
From source file:org.openmrs.module.reportingcompatibility.reporting.export.DataExportUtil.java
/** * Auto generated method comment//from w w w .j a va2 s . c o m * * @param dataExport * @param patientSet * @param functions * @param context * @throws Exception */ public static void generateExport(DataExportReportObject dataExport, Cohort patientSet, DataExportFunctions functions, EvaluationContext context) throws Exception { // defining log file here to attempt to reduce memory consumption Log log = LogFactory.getLog(DataExportUtil.class); VelocityEngine velocityEngine = new VelocityEngine(); velocityEngine.setProperty(RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.CommonsLogLogChute"); velocityEngine.setProperty(CommonsLogLogChute.LOGCHUTE_COMMONS_LOG_NAME, "dataexport_velocity"); try { velocityEngine.init(); } catch (Exception e) { log.error("Error initializing Velocity engine", e); } File file = getGeneratedFile(dataExport); PrintWriter report = new PrintWriter(file); VelocityContext velocityContext = new VelocityContext(); // Set up list of patients if one wasn't passed into this method if (patientSet == null) { patientSet = dataExport.generatePatientSet(context); functions.setPatientSet(patientSet); } String sizeGp = Context.getAdministrationService() .getGlobalProperty(ReportingCompatibilityConstants.BATCH_SIZE_GP); Integer batchSize = ReportingCompatibilityConstants.BATCH_SIZE_GP_DEFAULT; try { batchSize = Integer.parseInt(sizeGp); } catch (Exception e) { // Do nothing, just use the default } functions.setBatchSize(batchSize); // add the error handler EventCartridge ec = new EventCartridge(); ec.addEventHandler(new VelocityExceptionHandler()); velocityContext.attachEventCartridge(ec); // Set up velocity utils Locale locale = Context.getLocale(); velocityContext.put("locale", locale); velocityContext.put("fn", functions); /* * If we have any additional velocity objects that need to * be added, do so here. */ if (dataExportKeys != null && dataExportKeys.size() != 0) { for (Map.Entry<String, Object> entry : dataExportKeys.entrySet()) { velocityContext.put(entry.getKey(), entry.getValue()); } } velocityContext.put("patientSet", patientSet); String template = dataExport.generateTemplate(); // check if some deprecated columns are being used in this export // warning: hacky. if (template.contains("fn.getPatientAttr('Patient', 'tribe')")) { throw new APIException("Unable to generate export: " + dataExport.getName() + " because it contains a reference to an outdated 'tribe' column. You must install the 'Tribe Module' into OpenMRS to continue to reference tribes in OpenMRS."); } if (log.isDebugEnabled()) log.debug("Template: " + template.substring(0, template.length() < 3500 ? template.length() : 3500) + "..."); try { velocityEngine.evaluate(velocityContext, report, DataExportUtil.class.getName(), template); } catch (Exception e) { log.error("Error evaluating data export " + dataExport.getReportObjectId(), e); log.error("Template: " + template.substring(0, template.length() < 3500 ? template.length() : 3500) + "..."); report.print("\n\nError: \n" + e.toString() + "\n Stacktrace: \n"); e.printStackTrace(report); } finally { report.close(); velocityContext.remove("fn"); velocityContext.remove("patientSet"); velocityContext = null; // reset the ParserPool to something else now? // using this to get to RuntimeInstance.init(); velocityEngine.init(); velocityEngine = null; patientSet = null; functions.clear(); functions = null; template = null; dataExport = null; log.debug("Clearing hibernate session"); Context.clearSession(); // clear out the excess objects System.gc(); System.gc(); } }
From source file:org.openmrs.reporting.export.DataExportUtil.java
/** * Auto generated method comment//from www .j a v a 2 s . com * * @param dataExport * @param patientSet * @param functions * @param context * @throws Exception */ public static void generateExport(DataExportReportObject dataExport, Cohort patientSet, DataExportFunctions functions, EvaluationContext context) throws Exception { // defining log file here to attempt to reduce memory consumption Log log = LogFactory.getLog(DataExportUtil.class); VelocityEngine velocityEngine = new VelocityEngine(); velocityEngine.setProperty(RuntimeConstants.RUNTIME_LOG_LOGSYSTEM_CLASS, "org.apache.velocity.runtime.log.CommonsLogLogChute"); velocityEngine.setProperty(CommonsLogLogChute.LOGCHUTE_COMMONS_LOG_NAME, "dataexport_velocity"); try { velocityEngine.init(); } catch (Exception e) { log.error("Error initializing Velocity engine", e); } File file = getGeneratedFile(dataExport); PrintWriter report = new PrintWriter(file); VelocityContext velocityContext = new VelocityContext(); // Set up list of patients if one wasn't passed into this method if (patientSet == null) { patientSet = dataExport.generatePatientSet(context); functions.setAllPatients(dataExport.isAllPatients()); } // add the error handler EventCartridge ec = new EventCartridge(); ec.addEventHandler(new VelocityExceptionHandler()); velocityContext.attachEventCartridge(ec); // Set up velocity utils Locale locale = Context.getLocale(); velocityContext.put("locale", locale); velocityContext.put("fn", functions); /* * If we have any additional velocity objects that need to * be added, do so here. */ if (dataExportKeys != null && dataExportKeys.size() != 0) { for (Map.Entry<String, Object> entry : dataExportKeys.entrySet()) { velocityContext.put(entry.getKey(), entry.getValue()); } } velocityContext.put("patientSet", patientSet); String template = dataExport.generateTemplate(); // check if some deprecated columns are being used in this export // warning: hacky. if (template.contains("fn.getPatientAttr('Patient', 'tribe')")) { throw new APIException("Unable to generate export: " + dataExport.getName() + " because it contains a reference to an outdated 'tribe' column. You must install the 'Tribe Module' into OpenMRS to continue to reference tribes in OpenMRS."); } if (log.isDebugEnabled()) log.debug("Template: " + template.substring(0, template.length() < 3500 ? template.length() : 3500) + "..."); try { velocityEngine.evaluate(velocityContext, report, DataExportUtil.class.getName(), template); } catch (Exception e) { log.error("Error evaluating data export " + dataExport.getReportObjectId(), e); log.error("Template: " + template.substring(0, template.length() < 3500 ? template.length() : 3500) + "..."); report.print("\n\nError: \n" + e.toString() + "\n Stacktrace: \n"); e.printStackTrace(report); } finally { report.close(); velocityContext.remove("fn"); velocityContext.remove("patientSet"); velocityContext = null; // reset the ParserPool to something else now? // using this to get to RuntimeInstance.init(); velocityEngine.init(); velocityEngine = null; patientSet = null; functions.clear(); functions = null; template = null; dataExport = null; log.debug("Clearing hibernate session"); Context.clearSession(); // clear out the excess objects System.gc(); System.gc(); } }
From source file:org.openmrs.web.filter.update.UpdateFilterModel.java
/** * Convenience method that reads from liquibase again to get the most recent list of changesets * that still need to be run.// w w w . j ava 2 s . co m */ public void updateChanges() { Log log = LogFactory.getLog(getClass()); try { changes = DatabaseUpdater.getUnrunDatabaseChanges(); // not sure why this is necessary... if (changes == null && DatabaseUpdater.isLocked()) { changes = DatabaseUpdater.getUnrunDatabaseChanges(); } } catch (Exception e) { log.error("Unable to get the database changes", e); } }
From source file:org.openmrs.web.Listener.java
/** * Convenience method to empty out the dwr-modules.xml file to fix any errors that might have * occurred in it when loading or unloading modules. * * @param servletContext/* w w w . j a v a2 s . com*/ */ private void clearDWRFile(ServletContext servletContext) { Log log = LogFactory.getLog(Listener.class); String realPath = servletContext.getRealPath(""); String absPath = realPath + "/WEB-INF/dwr-modules.xml"; File dwrFile = new File(absPath.replace("/", File.separator)); try { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); db.setEntityResolver(new EntityResolver() { public InputSource resolveEntity(String publicId, String systemId) throws SAXException, IOException { // When asked to resolve external entities (such as a DTD) we return an InputSource // with no data at the end, causing the parser to ignore the DTD. return new InputSource(new StringReader("")); } }); Document doc = db.parse(dwrFile); Element elem = doc.getDocumentElement(); elem.setTextContent(""); OpenmrsUtil.saveDocument(doc, dwrFile); } catch (Exception e) { // got here because the dwr-modules.xml file is empty for some reason. This might // happen because the servlet container (i.e. tomcat) crashes when first loading this file log.debug("Error clearing dwr-modules.xml", e); dwrFile.delete(); FileWriter writer = null; try { writer = new FileWriter(dwrFile); writer.write( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE dwr PUBLIC \"-//GetAhead Limited//DTD Direct Web Remoting 2.0//EN\" \"http://directwebremoting.org/schema/dwr20.dtd\">\n<dwr></dwr>"); } catch (IOException io) { log.error("Unable to clear out the " + dwrFile.getAbsolutePath() + " file. Please redeploy the openmrs war file", io); } finally { if (writer != null) { try { writer.close(); } catch (IOException io) { log.warn("Couldn't close Writer: " + io); } } } } }
From source file:org.openspaces.grid.gsm.containers.ContainersSlaUtils.java
static FutureGridServiceContainer startGridServiceContainerAsync(final InternalAdmin admin, final InternalGridServiceAgent gsa, final GridServiceContainerConfig config, final Log logger, final long duration, final TimeUnit timeUnit) { final AtomicReference<Object> ref = new AtomicReference<Object>(null); final long startTimestamp = System.currentTimeMillis(); final long end = startTimestamp + timeUnit.toMillis(duration); admin.scheduleAdminOperation(new Runnable() { public void run() { try { final OperatingSystemStatistics operatingSystemStatistics = gsa.getMachine() .getOperatingSystem().getStatistics(); // get total free system memory + cached (without sigar returns -1) long freeBytes = operatingSystemStatistics.getActualFreePhysicalMemorySizeInBytes(); if (freeBytes <= 0) { // fallback - no sigar. Provides a pessimistic number since does not take into // account OS cache that can be allocated. freeBytes = operatingSystemStatistics.getFreePhysicalMemorySizeInBytes(); if (freeBytes <= 0) { // machine is probably going down. ref.set(new AdminException("Cannot determine machine " + machineToString(gsa.getMachine()) + " free memory.")); }/*w ww . jav a2 s .com*/ } final long freeInMB = MemoryUnit.MEGABYTES.convert(freeBytes, MemoryUnit.BYTES); if (freeInMB < config.getMaximumJavaHeapSizeInMB()) { ref.set(new AdminException("Machine " + machineToString(gsa.getMachine()) + " free memory " + freeInMB + "MB is not enough to start a container with " + config.getMaximumJavaHeapSizeInMB() + "MB. Free machine memory or increase machine provisioning reservedMemoryPerMachine property.")); } else { ref.set(gsa.internalStartGridService(config)); } } catch (AdminException e) { ref.set(e); } catch (Throwable e) { logger.error("Unexpected Exception " + e.getMessage(), e); ref.set(e); } } }); FutureGridServiceContainer future = new FutureGridServiceContainer() { public boolean isTimedOut() { return System.currentTimeMillis() > end; } public ExecutionException getException() { Object result = ref.get(); if (result != null && result instanceof Throwable) { Throwable throwable = (Throwable) result; return new ExecutionException(throwable.getMessage(), throwable); } return null; } public GridServiceContainer get() throws ExecutionException, IllegalStateException, TimeoutException { Object result = ref.get(); if (getException() != null) { throw getException(); } GridServiceContainer container = null; if (result != null) { int agentId = (Integer) result; container = getGridServiceContainerInternal(agentId); //container could still be null if not discovered } if (container == null) { if (isTimedOut()) { throw new TimeoutException("Starting a new container took more than " + timeUnit.toSeconds(duration) + " seconds to complete."); } throw new IllegalStateException("Async operation is not done yet."); } return container; } public boolean isDone() { Object result = ref.get(); if (System.currentTimeMillis() > end) { return true; } if (result == null) { return false; } if (result instanceof Throwable) { return true; } GridServiceContainer container = getGridServiceContainerInternal((Integer) result); if (container != null) { return true; } return false; } public GridServiceContainer getGridServiceContainerInternal(int agentId) { for (GridServiceContainer container : admin.getGridServiceContainers()) { String agentUid = ((InternalGridServiceContainer) container).getAgentUid(); if (agentUid != null && agentUid.equals(gsa.getUid())) { if (agentId == container.getAgentId()) { return container; } } } return null; } public GridServiceAgent getGridServiceAgent() { return gsa; } public GridServiceContainerConfig getGridServiceContainerConfig() { return config; } public Date getTimestamp() { return new Date(startTimestamp); } @Override public int getAgentId() throws ExecutionException, TimeoutException { ExecutionException exception = getException(); if (exception != null) { throw exception; } if (isTimedOut() && ref.get() == null) { throw new TimeoutException( "Starting a new container on machine " + gsa.getMachine().getHostAddress() + " took more than " + timeUnit.toSeconds(duration) + " seconds to complete."); } if (ref.get() == null) { throw new IllegalStateException("Async operation is not done yet."); } return (Integer) ref.get(); } public boolean isStarted() { return ref.get() != null; } }; return future; }
From source file:org.openspaces.grid.gsm.rebalancing.RebalancingUtils.java
static Collection<FutureStatelessProcessingUnitInstance> incrementNumberOfStatelessInstancesAsync( final ProcessingUnit pu, final GridServiceContainer[] containers, final Log logger, final long duration, final TimeUnit timeUnit) { if (pu.getMaxInstancesPerVM() != 1) { throw new IllegalArgumentException("Only one instance per VM is allowed"); }// w w w .j a v a 2 s. c o m List<GridServiceContainer> unusedContainers = getUnusedContainers(pu, containers); final Admin admin = pu.getAdmin(); final Map<GridServiceContainer, FutureStatelessProcessingUnitInstance> futureInstances = new HashMap<GridServiceContainer, FutureStatelessProcessingUnitInstance>(); final AtomicInteger targetNumberOfInstances = new AtomicInteger(pu.getNumberOfInstances()); final long start = System.currentTimeMillis(); final long end = start + timeUnit.toMillis(duration); for (GridServiceContainer container : unusedContainers) { final GridServiceContainer targetContainer = container; futureInstances.put(container, new FutureStatelessProcessingUnitInstance() { AtomicReference<Throwable> throwable = new AtomicReference<Throwable>(); ProcessingUnitInstance newInstance; public boolean isTimedOut() { return System.currentTimeMillis() > end; } public boolean isDone() { end(); return isTimedOut() || throwable.get() != null || newInstance != null; } public ProcessingUnitInstance get() throws ExecutionException, IllegalStateException, TimeoutException { end(); if (getException() != null) { throw getException(); } if (newInstance == null) { if (isTimedOut()) { throw new TimeoutException("Relocation timeout"); } throw new IllegalStateException("Async operation is not done yet."); } return newInstance; } public Date getTimestamp() { return new Date(start); } public ExecutionException getException() { end(); Throwable t = throwable.get(); if (t != null) { return new ExecutionException(t.getMessage(), t); } return null; } public GridServiceContainer getTargetContainer() { return targetContainer; } public ProcessingUnit getProcessingUnit() { return pu; } public String getFailureMessage() throws IllegalStateException { if (isTimedOut()) { return "deployment timeout of processing unit " + pu.getName() + " on " + gscToString(targetContainer); } if (getException() != null) { return getException().getMessage(); } throw new IllegalStateException("Relocation has not encountered any failure."); } private void end() { if (!targetContainer.isDiscovered()) { throwable.set(new RemovedContainerProcessingUnitDeploymentException(pu, targetContainer)); } else if (throwable.get() != null || newInstance != null) { //do nothing. idempotent method } else { incrementInstance(); ProcessingUnitInstance[] instances = targetContainer .getProcessingUnitInstances(pu.getName()); if (instances.length > 0) { newInstance = instances[0]; } } } private void incrementInstance() { final String uuid = "[incrementUid:" + UUID.randomUUID().toString() + "] "; int numberOfInstances = pu.getNumberOfInstances(); int maxNumberOfInstances = getContainersOnMachines(pu).length; if (numberOfInstances < maxNumberOfInstances) { if (targetNumberOfInstances.get() == numberOfInstances + 1) { if (logger.isInfoEnabled()) { logger.info("Waiting for pu.numberOfInstances to increment from " + numberOfInstances + " to " + targetNumberOfInstances.get() + ". " + "Number of relevant containers " + maxNumberOfInstances); } } else if (admin.getGridServiceManagers().getSize() > 1 && !((InternalProcessingUnit) pu).isBackupGsmInSync()) { if (logger.isInfoEnabled()) { logger.info("Waiting for backup gsm to sync with active gsm"); } } else { targetNumberOfInstances.set(numberOfInstances + 1); if (logger.isInfoEnabled()) { logger.info(uuid + " Planning to increment pu.numberOfInstances from " + numberOfInstances + " to " + targetNumberOfInstances.get() + ". " + "Number of relevant containers " + maxNumberOfInstances); } ((InternalAdmin) admin).scheduleAdminOperation(new Runnable() { public void run() { try { // this is an async operation // pu.getNumberOfInstances() still shows the old value. pu.incrementInstance(); if (logger.isInfoEnabled()) { logger.info(uuid + " pu.incrementInstance() called"); } } catch (AdminException e) { throwable.set(e); } catch (Throwable e) { logger.error(uuid + " Unexpected Exception: " + e.getMessage(), e); throwable.set(e); } } }); } } } }); } return futureInstances.values(); }
From source file:org.openspaces.grid.gsm.rebalancing.RebalancingUtils.java
static FutureStatefulProcessingUnitInstance relocateProcessingUnitInstanceAsync( final GridServiceContainer targetContainer, final ProcessingUnitInstance puInstance, final Log logger, final long duration, final TimeUnit timeUnit) { final ProcessingUnit pu = puInstance.getProcessingUnit(); final GridServiceContainer[] replicationSourceContainers = getReplicationSourceContainers(puInstance); final int instanceId = puInstance.getInstanceId(); final AtomicReference<Throwable> relocateThrowable = new AtomicReference<Throwable>(); final Admin admin = puInstance.getAdmin(); final int runningNumber = puInstance.getClusterInfo().getRunningNumber(); final String puName = puInstance.getName(); final GridServiceContainer sourceContainer = puInstance.getGridServiceContainer(); final Set<ProcessingUnitInstance> puInstancesFromSamePartition = getOtherInstancesFromSamePartition( puInstance);//w w w .j a v a 2 s .co m if (logger.isDebugEnabled()) { logger.debug( "Found instances from the same partition as " + RebalancingUtils.puInstanceToString(puInstance) + " : " + RebalancingUtils.puInstancesToString(puInstancesFromSamePartition)); } if (puInstancesFromSamePartition.size() != pu.getNumberOfBackups()) { // total number of instances per partition = numberOfBackups + 1 throw new IllegalStateException("puInstancesFromSamePartition has " + puInstancesFromSamePartition.size() + " instances instead of " + pu.getNumberOfBackups()); } final long start = System.currentTimeMillis(); final long end = start + timeUnit.toMillis(duration); ((InternalAdmin) admin).scheduleAdminOperation(new Runnable() { public void run() { try { logger.debug("Relocation of " + RebalancingUtils.puInstanceToString(puInstance) + " to " + ContainersSlaUtils.gscToString(targetContainer) + " has started."); puInstance.relocate(targetContainer); } catch (AdminException e) { logger.error("Admin exception " + e.getMessage(), e); relocateThrowable.set(e); } catch (Throwable e) { logger.error("Unexpected exception " + e.getMessage(), e); relocateThrowable.set(e); } } }); return new FutureStatefulProcessingUnitInstance() { Throwable throwable; ProcessingUnitInstance newInstance; public boolean isTimedOut() { return System.currentTimeMillis() > end; } public boolean isDone() { endRelocation(); return isTimedOut() || throwable != null || newInstance != null; } public ProcessingUnitInstance get() throws ExecutionException, IllegalStateException, TimeoutException { endRelocation(); ExecutionException exception = getException(); if (exception != null) { throw exception; } if (newInstance == null) { if (isTimedOut()) { throw new TimeoutException("Relocation timeout"); } throw new IllegalStateException("Async operation is not done yet."); } return newInstance; } public Date getTimestamp() { return new Date(start); } public ExecutionException getException() { endRelocation(); if (throwable != null) { return new ExecutionException(throwable.getMessage(), throwable); } return null; } /** * populates this.exception or this.newInstance if relocation is complete */ private void endRelocation() { boolean inProgress = true; tryStateChange(); // this makes relocation synchronous if (newInstance != null || throwable != null) { inProgress = false; } if (inProgress) { if (logger.isDebugEnabled()) { logger.debug("Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer()) + " to " + ContainersSlaUtils.gscToString(getTargetContainer()) + " is in progress."); } // do nothing. relocate() method running on another thread has not returned yet. } } private void tryStateChange() { ProcessingUnitInstance relocatedInstance = getRelocatedProcessingUnitInstance(); if (relocatedInstance != null) { if (relocatedInstance.getGridServiceContainer().equals(targetContainer)) { if (relocatedInstance.getSpaceInstance() != null && relocatedInstance.getSpaceInstance().getMode() != SpaceMode.NONE) { if (logger.isDebugEnabled()) { logger.debug( "Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer()) + " to " + ContainersSlaUtils.gscToString(getTargetContainer()) + " had ended successfully."); } newInstance = relocatedInstance; } } else { if (logger.isDebugEnabled()) { logger.debug("Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer()) + " to " + ContainersSlaUtils.gscToString(getTargetContainer()) + " has ended with an error."); } throwable = new WrongContainerProcessingUnitRelocationException(puInstance, targetContainer); } } } private ProcessingUnitInstance getRelocatedProcessingUnitInstance() { for (GridServiceContainer container : admin.getGridServiceContainers()) { for (ProcessingUnitInstance instance : container.getProcessingUnitInstances(puName)) { if (!instance.equals(puInstance) && instance.getClusterInfo().getRunningNumber() == runningNumber && !puInstancesFromSamePartition.contains(instance)) { return instance; } } } return null; } private boolean isAtLeastOneInstanceValid(Set<ProcessingUnitInstance> instances) { boolean isValidState = false; for (ProcessingUnitInstance instance : instances) { if (instance.isDiscovered() && instance.getGridServiceContainer().isDiscovered()) { isValidState = true; break; } } return isValidState; } public String getFailureMessage() { if (isTimedOut()) { return "relocation timeout of processing unit instance " + instanceId + " from " + gscToString(sourceContainer) + " to " + gscToString(targetContainer); } if (getException() != null) { return getException().getMessage(); } throw new IllegalStateException("Relocation has not encountered any failure."); } public GridServiceContainer getTargetContainer() { return targetContainer; } public ProcessingUnit getProcessingUnit() { return pu; } public int getInstanceId() { return instanceId; } public GridServiceContainer getSourceContainer() { return sourceContainer; } public GridServiceContainer[] getReplicaitonSourceContainers() { return replicationSourceContainers; } }; }
From source file:org.ops4j.gaderian.impl.DefaultErrorHandler.java
public void error(Log log, String message, Location location, Throwable cause) { String output = location == null ? ImplMessages.unlocatedError(message) : ImplMessages.locatedError(location, message); log.error(output, cause); }
From source file:org.ops4j.gaderian.impl.TestErrorHandler.java
public void testDefaultErrorHandlerWithLocation() { Log log = (Log) createMock(Log.class); Resource r = new ClasspathResource(getClassResolver(), "/foo/bar/Baz.module"); Location l = new LocationImpl(r, 13); Throwable ex = new IllegalArgumentException(); log.error("Error at classpath:/foo/bar/Baz.module, line 13: Bad frob value.", ex); replayAllRegisteredMocks();// w w w . j a v a 2 s. c o m ErrorHandler eh = new DefaultErrorHandler(); eh.error(log, "Bad frob value.", l, ex); verifyAllRegisteredMocks(); }
From source file:org.ops4j.gaderian.impl.TestErrorHandler.java
public void testDefaultErrorHandlerWithNoLocation() { Log log = (Log) createMock(Log.class); Throwable ex = new IllegalArgumentException(); log.error("Error: Bad frob value.", ex); replayAllRegisteredMocks();/*from w w w. j a va 2 s. c o m*/ ErrorHandler eh = new DefaultErrorHandler(); eh.error(log, "Bad frob value.", null, ex); verifyAllRegisteredMocks(); }