Example usage for java.lang StackTraceElement toString

List of usage examples for java.lang StackTraceElement toString

Introduction

In this page you can find the example usage for java.lang StackTraceElement toString.

Prototype

public String toString() 

Source Link

Document

Returns a string representation of this stack trace element.

Usage

From source file:de.axelfaust.alfresco.nashorn.repo.web.scripts.console.ExecutePost.java

protected void collectCallstackLines(final Throwable exception, final List<String> callstackLines) {
    if (exception.getCause() != null) {
        this.collectCallstackLines(exception.getCause(), callstackLines);

        callstackLines.add("Wrapped in " + exception.toString());
        callstackLines.add(CALLSTACK_AT_PREFIX + exception.getStackTrace()[0].toString());
    } else {//  w  ww  .j a  v  a  2s  .  c om
        callstackLines.add(exception.toString());
        for (final StackTraceElement element : exception.getStackTrace()) {
            callstackLines.add(CALLSTACK_AT_PREFIX + element.toString());
        }
    }
}

From source file:org.apache.tajo.master.TajoMaster.java

public void dumpThread(Writer writer) {
    PrintWriter stream = new PrintWriter(writer);
    int STACK_DEPTH = 20;
    boolean contention = threadBean.isThreadContentionMonitoringEnabled();
    long[] threadIds = threadBean.getAllThreadIds();
    stream.println("Process Thread Dump: Tajo Worker");
    stream.println(threadIds.length + " active threads");
    for (long tid : threadIds) {
        ThreadInfo info = threadBean.getThreadInfo(tid, STACK_DEPTH);
        if (info == null) {
            stream.println("  Inactive");
            continue;
        }//from  w w  w  .  j  ava2  s.co  m
        stream.println("Thread " + getThreadTaskName(info.getThreadId(), info.getThreadName()) + ":");
        Thread.State state = info.getThreadState();
        stream.println("  State: " + state + ", Blocked count: " + info.getBlockedCount() + ", Waited count: "
                + info.getWaitedCount());
        if (contention) {
            stream.println(
                    "  Blocked time: " + info.getBlockedTime() + ", Waited time: " + info.getWaitedTime());
        }
        if (state == Thread.State.WAITING) {
            stream.println("  Waiting on " + info.getLockName());
        } else if (state == Thread.State.BLOCKED) {
            stream.println("  Blocked on " + info.getLockName() + ", Blocked by "
                    + getThreadTaskName(info.getLockOwnerId(), info.getLockOwnerName()));
        }
        stream.println("  Stack:");
        for (StackTraceElement frame : info.getStackTrace()) {
            stream.println("    " + frame.toString());
        }
        stream.println("");
    }
}

From source file:uk.ac.diamond.scisoft.ncd.rcp.handlers.DataReductionHandler.java

@Override
public Object execute(ExecutionEvent event) throws ExecutionException {

    final IWorkbenchWindow window = PlatformUI.getWorkbench().getActiveWorkbenchWindow();
    IWorkbenchPage page = window.getActivePage();
    IStructuredSelection sel = (IStructuredSelection) page.getSelection(ProjectExplorer.VIEW_ID);
    if (sel == null || sel.isEmpty()) {
        sel = (IStructuredSelection) page.getSelection(Activator.FILEVIEW_ID);
    }/*w  w  w . j  a  v  a 2 s  .  co m*/

    if (sel != null && !sel.isEmpty()) {

        try {
            // We get the data reduction service using OSGI
            service = (IDataReductionService) Activator.getService(IDataReductionService.class);
            // This is a workaround for DAWNSCI-858
            if (service == null) {
                ActorBundleInitializer initer = com.isencia.passerelle.starter.Activator.getInitializer();
                if (initer != null)
                    initer.start();
            }
            if (service == null) {
                throw new RuntimeException("Cannot find IDataReductionService using activator!");
            }

            // Get data from NcdProcessingSourceProvider's and store in IDataReductionContext
            context = service.createContext();
            createData(context, window);

            // Now we configure the context, which throws exceptions if 
            // the configuration is invalid.
            createMaskAndRegion(context);
            service.configure(context);
        } catch (Exception e) {
            String msg = "SCISOFT NCD: Error reading data reduction parameters";
            logger.error(msg, e);
            MultiStatus mStatus = new MultiStatus(Activator.PLUGIN_ID, IStatus.ERROR, msg, e);
            for (StackTraceElement ste : e.getStackTrace()) {
                mStatus.add(new Status(IStatus.ERROR, Activator.PLUGIN_ID, ste.toString()));
            }
            StatusManager.getManager().handle(mStatus, StatusManager.BLOCK | StatusManager.SHOW);
            return Boolean.FALSE;
        }

        if (context.isEnableSector() && !isCalibrationResultsBean(context)) {
            boolean proceed = MessageDialog.openConfirm(window.getShell(), "Missing NCD calibration data",
                    "IMPORTANT! NCD calibration data was not found for currently selected SAXS detector.\n"
                            + "Data reduction pipeline will look for calibration information in the input files.\n"
                            + "Proceed with data reduction anyway?");
            if (!proceed) {
                return Boolean.FALSE;
            }
        }

        selObjects = sel.toArray();

        boolean runModal = uk.ac.diamond.scisoft.ncd.core.rcp.Activator.getDefault().getPreferenceStore()
                .getBoolean(NcdPreferences.NCD_REDUCTION_MODAL);
        final NcdDataReductionJob ncdProcess = new NcdDataReductionJob();
        if (runModal) {
            try {
                ProgressMonitorDialog dlg = new ProgressMonitorDialog(window.getShell());
                dlg.run(true, true, ncdProcess);
            } catch (InvocationTargetException ex) {
                Throwable cause = ex.getCause();
                String msg = "NCD Data Reduction has failed";
                logger.error(msg, cause);
                Status status = new Status(IStatus.ERROR, Activator.PLUGIN_ID, msg, cause);
                StatusManager.getManager().handle(status, StatusManager.BLOCK | StatusManager.SHOW);
                return Boolean.FALSE;
            } catch (InterruptedException ex) {
                Throwable cause = ex.getCause();
                String msg = "NCD Data Reduction was interrupted";
                logger.error(msg, cause);
                Status status = new Status(IStatus.ERROR, Activator.PLUGIN_ID, msg, cause);
                StatusManager.getManager().handle(status, StatusManager.BLOCK | StatusManager.SHOW);
                return Boolean.FALSE;
            }
        } else {
            final Job ncdJob = new Job("Running NCD data reduction") {

                @Override
                protected IStatus run(IProgressMonitor monitor) {
                    monitor.beginTask("Running NCD data reduction",
                            context.getWorkAmount() * selObjects.length);
                    ncdProcess.run(monitor);
                    monitor.done();
                    return Status.OK_STATUS;
                }
            };

            ncdJob.setUser(true);
            ncdJob.schedule();
        }
    } else {
        String msg = "Please select NeXus files to process in Project Explorer view before running NCD Data Reduction.";
        Status status = new Status(IStatus.CANCEL, Activator.PLUGIN_ID, msg);
        StatusManager.getManager().handle(status, StatusManager.BLOCK | StatusManager.SHOW);
    }
    return Boolean.TRUE;
}

From source file:fullThreadDump.java

private String printThreadInfo(ThreadInfo ti) {
    // print stack trace with locks
    StackTraceElement[] stacktrace = ti.getStackTrace();
    MonitorInfo[] monitors = ti.getLockedMonitors();
    currentThreadInfo result = new currentThreadInfo(ti);
    StringBuilder threadOutput = new StringBuilder(result.getThreadName());
    threadOutput.append(result.getThreadStateDesc());
    if (stats == true) {
        threadOutput.append(result.getThreadStats());
    }//from   ww  w. jav  a2 s .c o m
    for (int i = 0; i < stacktrace.length; i++) {
        StackTraceElement ste = stacktrace[i];
        if (i == 0) {
            threadOutput.append("\n    java.lang.Thread.State: " + result.getThreadState());
            threadOutput.append("\n" + INDENT + "at " + ste.toString());
            if (ste.toString().contains("java.lang.Object.wait(Native Method)")
                    && result.getLockName() != null) {
                threadOutput.append("\n" + INDENT + "- waiting on " + result.getLockName());
            }
            if (ste.toString().contains("sun.misc.Unsafe.park(Native Method)")
                    && result.getLockName() != null) {
                threadOutput.append("\n" + INDENT + "- parking to wait for " + result.getLockName());
            }
            if (result.getThreadStateDesc().contains("BLOCKED") && result.getLockName() != null) {
                threadOutput.append("\n" + INDENT + "- waiting to lock " + result.getLockName());
            }
        } else {
            threadOutput.append("\n" + INDENT + "at " + ste.toString());
        }
        for (MonitorInfo mi : monitors) {
            if (mi.getLockedStackDepth() == i) {
                threadOutput.append("\n" + INDENT + " - locked " + mi);
            }
        }
    }
    threadOutput.append("\n");
    //System.out.print(threadOutput.toString());
    //printMonitorInfo(ti, monitors);
    return threadOutput.toString();
}

From source file:com.fluidops.iwb.api.ProviderServiceImpl.java

/**
 * Note: the history repository is only used in the fiwbcom version of the
 * method, i.e. when overriding the method in class ProviderServiceImplCom.
 * No historic data management is done at all in the fiwb version.
 * /*  w w w.jav  a2s.  c  o m*/
 * @param provider
 * @param data
 *            Externally passed data
 * @param repository
 * @param historyRepository
 */
private void load(AbstractFlexProvider provider, URI parameter, List<Statement> data, Repository repository,
        Repository historyRepository) throws Exception {
    logger.info("Starting provider with ID " + provider.providerID.stringValue());
    // fetch new data from provider
    List<Statement> newStmts = new LinkedList<Statement>();
    long start = System.currentTimeMillis();
    provider.running = true;
    try {
        if (data != null)
            newStmts = data; // data provided externally, there is no
        // need to run the provider
        else {
            if (parameter != null && provider instanceof LookupProvider) {
                ((LookupProvider) provider).gather(newStmts, parameter);
            } else
                provider.gather(newStmts);

        }

        provider.error = null;

        long now = System.currentTimeMillis();
        processProviderData(provider, newStmts, parameter, repository, historyRepository, now);
        provider.size = newStmts.size();
    } catch (Throwable t) {
        logger.error("Provider load error: ", t);
        StringBuilder error = new StringBuilder();
        error.append(t.getMessage());
        for (StackTraceElement tl : t.getStackTrace())
            error.append("\n").append(tl.toString());
        provider.error = error.toString();
    }

    // update statistics
    provider.lastUpdate = new Date();
    provider.lastDuration = System.currentTimeMillis() - start;
    provider.running = false;

    logger.info("Provider run of provider with ID " + provider.providerID + " finished");
}

From source file:bkampfbot.Instance.java

/**
 * Sammelt Informationen ber Benutzer/*from  w w  w  .j  a  v  a2  s. co  m*/
 * 
 * @param login
 *            gibt an, ob es der Aufruf nach dem Login ist
 */
private final String getCharacter(boolean login) throws FatalError {
    HttpGet httpget = new HttpGet(Config.getHost() + "characters");
    httpget.addHeader("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8");
    httpget.addHeader("Accept-Language", "de-de,de;q=0.8,en-us;q=0.5,en;q=0.3");
    // httpget.addHeader("Accept-Encoding","deflate");
    httpget.addHeader("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7");
    httpget.addHeader("Keep-Alive", "300");

    if (login) {
        httpget.addHeader("Referer", Config.getHost() + "signups/login");
    }

    String s = "";

    try {
        // Create a response handler
        HttpResponse response = this.httpclient.execute(httpget);
        HttpEntity entity = response.getEntity();
        if (entity != null) {
            s = EntityUtils.toString(entity);

            this.testStatus(s);

            int navi2 = s.indexOf("flashvars");
            int lineFront = s.indexOf('{', navi2);
            int lineEnd = s.indexOf(';', lineFront + 1);
            String s2 = s.substring(lineFront + 1, lineEnd + 1);
            s2 = "{" + s2;

            JSONTokener tk = new JSONTokener(s2);
            JSONObject character = new JSONObject(tk);
            User.setLevel(Integer.parseInt(character.getString("lvl")));
            User.setMaxLivePoints(Integer.parseInt(character.getString("max_lp")));
            User.setCurrentLivePoints(Integer.parseInt(character.getString("lp")));
            User.setGold(Integer.parseInt(character.getString("water")));

            // try to find race
            s2 = "<b>Bundesland:</b> </span><br/><span style=\"color:#000000; font-size:12px;\">";

            lineFront = s.indexOf(s2);
            lineEnd = s.indexOf('<', lineFront + 1 + s2.length());

            User.setRace(s.substring(lineFront + s2.length() + 1, lineEnd - 1));
            // Output.user(user);
        }
    } catch (JSONException e) {
        if (s.contains("form action=\"/signups/login\" method=\"post\"")) {
            throw new FatalError("Login fehlgeschlagen. " + "Vermutlich sind die Benutzerdaten nicht korrekt.");
        }
        String message = "Get an error at initiation\n";
        if (login) {
            message += "Reason 1: Login failed\n" + "Reason 2: Something on server side changed.\n";
        } else {
            message += "Possible reason: Something on server side changed.\n";
        }

        message += "\n" + "If you want to report a bug, please post this:\n\n" + e.getMessage() + "\n";

        StackTraceElement[] trace = e.getStackTrace();
        for (StackTraceElement elem : trace) {
            message += elem.toString() + "\n";
        }
        throw new FatalError(message + "\nResponse was:\n" + s);
    } catch (IOException e) {
        Output.error(e);
        throw new FatalError("Es gab einen Verbindungsfehler.");
    }

    return s;
}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

public File fetch(String sourceFileUrl, String destinationFile, String hadoopConfigPath) throws IOException {
    if (this.globalThrottleLimit != null) {
        if (this.globalThrottleLimit.getSpeculativeRate() < this.minBytesPerSecond)
            throw new VoldemortException("Too many push jobs.");
        this.globalThrottleLimit.incrementNumJobs();
    }/*from w w w. j a  v  a2  s .  com*/

    ObjectName jmxName = null;
    try {

        final Configuration config = new Configuration();
        FileSystem fs = null;
        config.setInt("io.socket.receive.buffer", bufferSize);
        config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        final Path path = new Path(sourceFileUrl);

        boolean isHftpBasedFetch = sourceFileUrl.length() > 4 && sourceFileUrl.substring(0, 4).equals("hftp");
        logger.info("URL : " + sourceFileUrl + " and hftp protocol enabled = " + isHftpBasedFetch);
        logger.info("Hadoop path = " + hadoopConfigPath + " , keytab path = " + HdfsFetcher.keytabPath
                + " , kerberos principal = " + HdfsFetcher.kerberosPrincipal);

        if (hadoopConfigPath.length() > 0 && !isHftpBasedFetch) {

            config.addResource(new Path(hadoopConfigPath + "/core-site.xml"));
            config.addResource(new Path(hadoopConfigPath + "/hdfs-site.xml"));

            String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

            if (security == null || !security.equals("kerberos")) {
                logger.error("Security isn't turned on in the conf: "
                        + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                        + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
                logger.error("Please make sure that the Hadoop config directory path is valid.");
                throw new VoldemortException(
                        "Error in getting Hadoop filesystem. Invalid Hadoop config directory path.");
            } else {
                logger.info("Security is turned on in the conf. Trying to authenticate ...");

            }
        }

        if (HdfsFetcher.keytabPath.length() > 0 && !isHftpBasedFetch) {

            /*
             * We're seeing intermittent errors while trying to get the
             * Hadoop filesystem in a privileged doAs block. This happens
             * when we fetch the files over hdfs or webhdfs. This retry loop
             * is inserted here as a temporary measure.
             */
            for (int attempt = 0; attempt < maxAttempts; attempt++) {
                boolean isValidFilesystem = false;

                if (!new File(HdfsFetcher.keytabPath).exists()) {
                    logger.error("Invalid keytab file path. Please provide a valid keytab path");
                    throw new VoldemortException(
                            "Error in getting Hadoop filesystem. Invalid keytab file path.");
                }

                /*
                 * The Hadoop path for getting a Filesystem object in a
                 * privileged doAs block is not thread safe. This might be
                 * causing intermittent NPE exceptions. Adding a
                 * synchronized block.
                 */
                synchronized (this) {
                    /*
                     * First login using the specified principal and keytab
                     * file
                     */
                    UserGroupInformation.setConfiguration(config);
                    UserGroupInformation.loginUserFromKeytab(HdfsFetcher.kerberosPrincipal,
                            HdfsFetcher.keytabPath);

                    /*
                     * If login is successful, get the filesystem object.
                     * NOTE: Ideally we do not need a doAs block for this.
                     * Consider removing it in the future once the Hadoop
                     * jars have the corresponding patch (tracked in the
                     * Hadoop Apache project: HDFS-3367)
                     */
                    try {
                        logger.info("I've logged in and am now Doasing as "
                                + UserGroupInformation.getCurrentUser().getUserName());
                        fs = UserGroupInformation.getCurrentUser()
                                .doAs(new PrivilegedExceptionAction<FileSystem>() {

                                    @Override
                                    public FileSystem run() throws Exception {
                                        FileSystem fs = path.getFileSystem(config);
                                        return fs;
                                    }
                                });
                        isValidFilesystem = true;
                    } catch (InterruptedException e) {
                        logger.error(e.getMessage(), e);
                    } catch (Exception e) {
                        logger.error("Got an exception while getting the filesystem object: ");
                        logger.error("Exception class : " + e.getClass());
                        e.printStackTrace();
                        for (StackTraceElement et : e.getStackTrace()) {
                            logger.error(et.toString());
                        }
                    }
                }

                if (isValidFilesystem) {
                    break;
                } else if (attempt < maxAttempts - 1) {
                    logger.error(
                            "Attempt#" + attempt + " Could not get a valid Filesystem object. Trying again in "
                                    + retryDelayMs + " ms");
                    sleepForRetryDelayMs();
                }
            }
        } else {
            fs = path.getFileSystem(config);
        }

        CopyStats stats = new CopyStats(sourceFileUrl, sizeOfPath(fs, path));
        jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
        File destination = new File(destinationFile);

        if (destination.exists()) {
            throw new VoldemortException(
                    "Version directory " + destination.getAbsolutePath() + " already exists");
        }

        logger.info("Starting fetch for : " + sourceFileUrl);
        boolean result = fetch(fs, path, destination, stats);
        logger.info("Completed fetch : " + sourceFileUrl);

        // Close the filesystem
        fs.close();

        if (result) {
            return destination;
        } else {
            return null;
        }
    } catch (Throwable te) {
        te.printStackTrace();
        logger.error("Error thrown while trying to get data from Hadoop filesystem", te);
        throw new VoldemortException("Error thrown while trying to get data from Hadoop filesystem : " + te);
    } finally {
        if (this.globalThrottleLimit != null) {
            this.globalThrottleLimit.decrementNumJobs();
        }
        if (jmxName != null)
            JmxUtils.unregisterMbean(jmxName);
    }
}

From source file:terse.vm.Terp.java

public void tick() {
    --tickCounter;//from w w  w  .ja  v  a2  s  . co  m
    if (tickCounter < 1) {
        try {
            throw new RuntimeException("Going To Throw TooManyTicks");
        } catch (RuntimeException ex) {
            ex.printStackTrace();

            StringBuffer sb = new StringBuffer(ex.toString());
            StackTraceElement[] elems = ex.getStackTrace();
            for (StackTraceElement e : elems) {
                sb.append("\n  * ");
                sb.append(e.toString());
            }

            say(sb.toString());
        }
        throw new TooManyTicks();
    }

}

From source file:ca.sqlpower.sqlobject.SQLDatabase.java

/**
 * Returns a JDBC connection to the backing database, if there
 * is one.  The connection that you get will be yours and only yours
 * until you call close() on it.  To maximize efficiency of the pool,
 * try to call close() as soon as you are done with the connection.
 *
 * @return an open connection if this database has a valid
 * dataSource; null if this is a dummy database (such as the
 * playpen instance).//from www.j ava2  s  .c  om
 */
@NonProperty
public Connection getConnection() throws SQLObjectException {
    if (dataSource == null) {
        return null;
    } else {
        try {
            int newActiveCount = getConnectionPool().getNumActive() + 1;
            maxActiveConnections = Math.max(maxActiveConnections, newActiveCount);
            if (logger.isDebugEnabled()) {
                logger.debug("getConnection(): giving out active connection " + newActiveCount); //$NON-NLS-1$
                for (StackTraceElement ste : Thread.currentThread().getStackTrace()) {
                    logger.debug(ste.toString());
                }
            }
            return (Connection) getConnectionPool().borrowObject();
        } catch (Exception e) {
            final SQLObjectException ex = new SQLObjectException(
                    "Couldn't connect to database: " + e.getMessage(), e); //$NON-NLS-1$
            runInForeground(new Runnable() {
                public void run() {
                    try {
                        setChildrenInaccessibleReason(ex, SQLObject.class, false);
                    } catch (SQLObjectException e) {
                        throw new SQLObjectRuntimeException(e);
                    }
                }
            });
            throw ex;
        }
    }
}

From source file:terse.vm.Terp.java

public Dict handleUrl(String url, HashMap<String, String> query) {
    say("runUrl: %s", url);
    query = (query == null) ? new HashMap<String, String>() : query;
    Ur[] queryArr = new Ur[query.size()];
    int i = 0;//from   w w w .  j a  v a 2s .  c om
    for (String k : query.keySet()) {
        String v = query.get(k);
        if (k == null)
            k = "HOW_DID_WE_GET_A_NULL_KEY";
        if (v == null)
            v = "HOW_DID_WE_GET_A_NULL_VALUE";
        Ur queryKey = newStr(k);
        Ur queryValue = newStr(v.replaceAll("\r\n", "\n"));
        queryArr[i] = new Vec(this, urs(queryKey, queryValue));
        ++i;
    }
    Dict qDict = newDict(queryArr);
    assert url.startsWith("/");
    if (url.equals("/")) {
        url = "/Top";
    }

    // To get app name, skip the initial '/', and split on dots.
    String[] word = url.substring(1).split("[.]");
    assert word.length > 0;
    String appName = word[0];

    Dict result = null;
    try {
        Cls cls = getTerp().clss.get(appName.toLowerCase());
        if (cls == null) {
            toss("Rendering class does not exist: <%s>", appName);
        }

        String urlRepr = newStr(url).repr();
        String qDictRepr = qDict.repr(); // Inefficient. TODO.
        Ur result_ur = instNil;
        int id = 0;

        Obj inst = null;
        try {
            id = Integer.parseInt(word[1]);
            if (cls instanceof Usr.UsrCls) {
                inst = ((Usr.UsrCls) cls).cache.find(id);
            }
        } catch (Exception _) {
            // pass.
        }
        long before = tickCounter;
        long nanosBefore = System.nanoTime();
        if (inst != null) {
            result_ur = inst.eval(fmt("self handle: (%s) query: (%s)", urlRepr, qDictRepr));
        } else if (Send.understands(cls, "handle:query:")) {
            say("CLS <%s> understands handle:query: so sending to class.", cls);
            // First try sending to the class.
            result_ur = cls.eval(fmt("self handle: (%s) query: (%s)", urlRepr, qDictRepr));
        } else {
            Ur instance = cls.eval("self new");
            Usr usrInst = instance.asUsr();
            // TODO: LRU & mention() conflict with Cls.insts map.
            id = usrInst == null ? 0 : usrInst.omention(); // LRU Cache

            // Next try creating new instance, and send to it.
            result_ur = instance.asObj()
                    .eval(fmt("self handle: (%s) query: (%s)", newStr(url).repr(), qDict.repr()));
        }
        result = result_ur.asDict();
        if (result == null) {
            toss("Sending <handle:query:> to instance of <%s> did not return a Dict: <%s>", appName, result_ur);
        }
        result.dict.put(newStr("id"), newStr(Integer.toString(id)));
        long after = tickCounter;
        long nanosAfter = System.nanoTime();
        result.dict.put(newStr("ticks"), newNum(before - after));
        result.dict.put(newStr("nanos"), newNum(nanosAfter - nanosBefore));
        say("<handle:query:> used %d ticks and %.3f secs.", before - after,
                (double) (nanosAfter - nanosBefore) / 1000000000.0);

    } catch (Exception ex) {
        ex.printStackTrace();
        StringBuffer sb = new StringBuffer(ex.toString());
        StackTraceElement[] elems = ex.getStackTrace();
        for (StackTraceElement e : elems) {
            sb.append("\n  * ");
            sb.append(e.toString());
        }
        Ur[] dict_arr = urs(new Vec(this, urs(newStr("type"), newStr("text"))),
                new Vec(this, urs(newStr("title"), newStr(ex.toString()))),
                new Vec(this, urs(newStr("value"), newStr(sb.toString()))));
        result = newDict(dict_arr);
    } catch (TooManyTicks err) {
        err.printStackTrace();
        String s = fmt("TOO_MANY_TICKS_IN_handleUrl <%s> qdict <%s>", url, qDict);
        Ur[] dict_arr = urs(new Vec(this, urs(newStr("type"), newStr("text"))),
                new Vec(this, urs(newStr("title"), newStr(err.toString()))),
                new Vec(this, urs(newStr("value"), newStr(s))));
        result = newDict(dict_arr);
    } catch (Error err) {
        err.printStackTrace();
        Ur[] dict_arr = urs(new Vec(this, urs(newStr("type"), newStr("text"))),
                new Vec(this, urs(newStr("title"), newStr(err.toString()))),
                new Vec(this, urs(newStr("value"), newStr(err.toString()))));
        result = newDict(dict_arr);
    }
    return result;
}