Example usage for java.lang Thread getName

List of usage examples for java.lang Thread getName

Introduction

In this page you can find the example usage for java.lang Thread getName.

Prototype

public final String getName() 

Source Link

Document

Returns this thread's name.

Usage

From source file:com.nuvolect.deepdive.probe.DecompileApk.java

private JSONObject unpackApk() {

    final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = new Thread.UncaughtExceptionHandler() {
        @Override/*ww  w .ja  va2s .c o  m*/
        public void uncaughtException(Thread t, Throwable e) {

            LogUtil.log(LogUtil.LogType.DECOMPILE, "Uncaught exception: " + e.toString());
            m_progressStream.putStream("Uncaught exception: " + t.getName());
            m_progressStream.putStream("Uncaught exception: " + e.toString());
        }
    };

    m_unpack_apk_time = System.currentTimeMillis(); // Save start time for tracking

    m_unpackApkThread = new Thread(m_threadGroup, new Runnable() {
        @Override
        public void run() {
            boolean success = false;
            try {

                m_progressStream = new ProgressStream(
                        new OmniFile(m_volumeId, m_appFolderPath + "unpack_apk_log.txt"));
                m_progressStream.putStream("Unpack APK starting");
                if (m_apkFile.exists() && m_apkFile.isFile()) {

                    // Extract all files except for XML, to be extracted later
                    success = ApkZipUtil.unzipAllExceptXML(m_apkFile, m_appFolder, m_progressStream);

                    ApkParser apkParser = ApkParser.create(m_apkFile.getStdFile());
                    ArrayList<OmniFile> dexFiles = new ArrayList<>();

                    // Get a list of all files in the APK and iterate and extract by type
                    List<String> paths = OmniZip.getFilesList(m_apkFile);
                    for (String path : paths) {

                        OmniFile file = new OmniFile(m_volumeId, m_appFolderPath + path);
                        OmniUtil.forceMkdirParent(file);

                        String extension = FilenameUtils.getExtension(path);

                        if (extension.contentEquals("xml")) {

                            String xml = apkParser.transBinaryXml(path);
                            OmniUtil.writeFile(file, xml);
                            m_progressStream.putStream("Translated: " + path);
                        }
                        if (extension.contentEquals("dex")) {
                            dexFiles.add(file);
                        }
                    }
                    paths = null; // Release memory

                    // Write over manifest with unencoded version
                    String manifestXml = apkParser.getManifestXml();
                    OmniFile manifestFile = new OmniFile(m_volumeId, m_appFolderPath + "AndroidManifest.xml");
                    OmniUtil.writeFile(manifestFile, manifestXml);
                    m_progressStream.putStream("Translated and parsed: " + "AndroidManifest.xml");

                    // Uses original author CaoQianLi's apk-parser
                    // compile 'net.dongliu:apk-parser:2.1.7'
                    //                        for( CertificateMeta cm : apkParser.getCertificateMetaList()){
                    //
                    //                            m_progressStream.putStream("Certficate base64 MD5: "+cm.getCertBase64Md5());
                    //                            m_progressStream.putStream("Certficate MD5: "+cm.getCertMd5());
                    //                            m_progressStream.putStream("Sign algorithm OID: "+cm.getSignAlgorithmOID());
                    //                            m_progressStream.putStream("Sign algorithm: "+cm.getSignAlgorithm());
                    //                        }

                    for (OmniFile f : dexFiles) {

                        String formatted_count = String.format(Locale.US, "%,d", f.length()) + " bytes";
                        m_progressStream.putStream("DEX extracted: " + f.getName() + ": " + formatted_count);
                    }
                    dexFiles = new ArrayList<>();// Release memory

                    CertificateMeta cm = null;
                    try {
                        cm = apkParser.getCertificateMeta();
                        m_progressStream.putStream("Certficate base64 MD5: " + cm.certBase64Md5);
                        m_progressStream.putStream("Certficate MD5: " + cm.certMd5);
                        m_progressStream.putStream("Sign algorithm OID: " + cm.signAlgorithmOID);
                        m_progressStream.putStream("Sign algorithm: " + cm.signAlgorithm);

                    } catch (Exception e1) {
                        e1.printStackTrace();
                    }

                    m_progressStream.putStream("ApkSignStatus: " + apkParser.verifyApk());

                    /**
                     * Create a file for the user to include classes to omit in the optimize DEX task.
                     */
                    OmniFile optimizedDexOF = new OmniFile(m_volumeId,
                            m_appFolderPath + OPTIMIZED_CLASSES_EXCLUSION_FILENAME);
                    if (!optimizedDexOF.exists()) {

                        String assetFilePath = CConst.ASSET_DATA_FOLDER + OPTIMIZED_CLASSES_EXCLUSION_FILENAME;
                        OmniFile omniFile = new OmniFile(m_volumeId,
                                m_appFolderPath + OPTIMIZED_CLASSES_EXCLUSION_FILENAME);
                        OmniUtil.copyAsset(m_ctx, assetFilePath, omniFile);

                        m_progressStream.putStream("File created: " + OPTIMIZED_CLASSES_EXCLUSION_FILENAME);
                    }
                    /**
                     * Create a README file for the user.
                     */
                    OmniFile README_file = new OmniFile(m_volumeId, m_appFolderPath + README_FILENAME);
                    if (!README_file.exists()) {

                        String assetFilePath = CConst.ASSET_DATA_FOLDER + README_FILENAME;
                        OmniFile omniFile = new OmniFile(m_volumeId, m_appFolderPath + README_FILENAME);
                        OmniUtil.copyAsset(m_ctx, assetFilePath, omniFile);

                        m_progressStream.putStream("File created: " + README_FILENAME);
                    }
                } else {
                    m_progressStream.putStream("APK not found. Select Extract APK.");
                }

            } catch (Exception | StackOverflowError e) {
                m_progressStream.putStream(e.toString());
            }
            String time = TimeUtil.deltaTimeHrMinSec(m_unpack_apk_time);
            m_unpack_apk_time = 0;
            if (success) {
                m_progressStream.putStream("Unpack APK complete: " + time);
            } else {
                m_progressStream.putStream("Unpack APK failed: " + time);
            }
            m_progressStream.close();

        }
    }, UNZIP_APK_THREAD, STACK_SIZE);

    m_unpackApkThread.setPriority(Thread.MAX_PRIORITY);
    m_unpackApkThread.setUncaughtExceptionHandler(uncaughtExceptionHandler);
    m_unpackApkThread.start();

    final JSONObject wrapper = new JSONObject();
    try {
        wrapper.put("unpack_apk_thread", getThreadStatus(true, m_unpackApkThread));

    } catch (JSONException e) {
        LogUtil.logException(LogUtil.LogType.DECOMPILE, e);
    }

    return wrapper;
}

From source file:com.rapid.server.RapidServletContextListener.java

@Override
public void contextDestroyed(ServletContextEvent event) {

    _logger.info("Shutting down...");

    // interrupt the page monitor if we have one
    if (_monitor != null)
        _monitor.interrupt();/*from   w  w w .  j a  v a 2  s .co m*/

    // get the servletContext
    ServletContext servletContext = event.getServletContext();

    // get all of the applications
    Applications applications = (Applications) servletContext.getAttribute("applications");
    // if we got some
    if (applications != null) {
        // loop the application ids
        for (String id : applications.getIds()) {
            // get the application
            Versions versions = applications.getVersions(id);
            // loop the versions of each app
            for (String version : versions.keySet()) {
                // get the application
                Application application = applications.get(id, version);
                // have it close any sensitive resources 
                application.close(servletContext);
            }
        }
    }

    // sleep for 2 seconds to allow any database connection cleanup to complete
    try {
        Thread.sleep(2000);
    } catch (Exception ex) {
    }

    // This manually deregisters JDBC drivers, which prevents Tomcat from complaining about memory leaks from this class
    Enumeration<Driver> drivers = DriverManager.getDrivers();
    while (drivers.hasMoreElements()) {
        Driver driver = drivers.nextElement();
        try {
            DriverManager.deregisterDriver(driver);
            _logger.info(String.format("Deregistering jdbc driver: %s", driver));
        } catch (SQLException e) {
            _logger.error(String.format("Error deregistering driver %s", driver), e);
        }
    }

    // Thanks to http://stackoverflow.com/questions/11872316/tomcat-guice-jdbc-memory-leak
    Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
    Thread[] threadArray = threadSet.toArray(new Thread[threadSet.size()]);
    for (Thread t : threadArray) {
        if (t.getName().contains("Abandoned connection cleanup thread")) {
            synchronized (t) {
                try {
                    _logger.info("Forcing stop of Abandoned connection cleanup thread");
                    t.stop(); //don't complain, it works
                } catch (Exception ex) {
                    _logger.info("Error forcing stop of Abandoned connection cleanup thread", ex);
                }
            }
        }
    }

    // sleep for 1 second to allow any database connection cleanup to complete
    try {
        Thread.sleep(1000);
    } catch (Exception ex) {
    }

    // last log
    _logger.info("Logger shutdown");
    // shutdown logger
    if (_logger != null)
        LogManager.shutdown();

}

From source file:org.sakaiproject.status.StatusServlet.java

protected void reportThreadDetails(HttpServletResponse response) throws Exception {
    PrintWriter pw = response.getWriter();

    for (Thread thread : findAllThreads()) {
        if (thread != null) {
            String threadLocation = "";
            try {
                StackTraceElement ste = thread.getStackTrace()[0];
                StackTraceElement ste2 = thread.getStackTrace()[1];
                threadLocation = ste.getClassName() + "." + ste.getMethodName() + "()," + ste.getFileName()
                        + ":" + ste.getLineNumber() + "," + ste2.getClassName() + "." + ste2.getMethodName()
                        + "()," + ste2.getFileName() + ":" + ste2.getLineNumber();
            } catch (Exception e) {
                threadLocation = "?,?,?,?";
            }/* w  ww  . j  a  v a  2  s .c om*/
            pw.print(thread.getThreadGroup().getName() + "," + thread.getId() + "," + thread.getName() + ","
                    + thread.getPriority() + "," + thread.getState().name() + ","
                    + (thread.isAlive() ? "" : "notalive") + "," + (thread.isDaemon() ? "daemon" : "") + ","
                    + (thread.isInterrupted() ? "interrupted" : "") + "," + threadLocation + "\n");
        }
    }
}

From source file:com.microsoft.tfs.util.process.ProcessRunner.java

/**
 * Starts another process to run the commands this runner was constructed
 * with. Blocks until the process exits or {@link #interrupt()} is invoked.
 *
 * @see Runnable#run()//  ww  w .  j av a  2 s .  co  m
 */
@Override
public void run() {
    synchronized (this) {
        if (state != ProcessRunnerState.NEW) {
            throw new IllegalStateException("Can only run a ProcessRunner once"); //$NON-NLS-1$
        }
    }

    /*
     * If the commands were empty, we can skip the process creation which
     * may be heavy on some platforms and simply report a success.
     */
    if (commands.length == 0) {
        synchronized (this) {
            exitCode = 0;
            state = ProcessRunnerState.COMPLETED;
        }

        notifyTerminalState();
        return;
    }

    try {
        synchronized (this) {
            process = Runtime.getRuntime().exec(commands, environment, workingDirectory);
            state = ProcessRunnerState.RUNNING;
        }
    } catch (final IOException e) {
        synchronized (this) {
            error = e;
            state = ProcessRunnerState.EXEC_FAILED;
        }

        notifyTerminalState();
        return;
    }

    /*
     * If we do not pump (read) the child process's streams (standard out
     * and standard error), Windows will cause the child to block if it
     * writes more than a small amount of output (512 bytes or chars [not
     * sure which] in our testing).
     *
     * If the user of this runner is interested in the child's output, we
     * have to service the streams in other threads in order to prevent
     * becoming blind to an interruption delivered to this thread.
     * Specifically, reading from these streams is a blocking task, and
     * there is no way for the user to interrupt us while we block. If we
     * launch other threads, we arrive at process.waitFor() quickly in this
     * thread and can accept the interruption, then interrupt the readers.
     */

    final Thread outputReaderThread = new Thread(
            new ProcessOutputReader(process.getInputStream(), capturedStandardOutput));

    String messageFormat = "Standard Output Reader {0}"; //$NON-NLS-1$
    String message = MessageFormat.format(messageFormat, Long.toString(getNewThreadID()));
    outputReaderThread.setName(message);
    outputReaderThread.start();

    messageFormat = "Started IO waiter thread '{0}'"; //$NON-NLS-1$
    message = MessageFormat.format(messageFormat, outputReaderThread.getName());
    log.debug(message);

    final Thread errorReaderThread = new Thread(
            new ProcessOutputReader(process.getErrorStream(), capturedStandardError));

    messageFormat = "Standard Error Reader {0}"; //$NON-NLS-1$
    message = MessageFormat.format(messageFormat, Long.toString(getNewThreadID()));
    errorReaderThread.setName(message);
    errorReaderThread.start();

    messageFormat = "Started IO waiter thread '{0}'"; //$NON-NLS-1$
    message = MessageFormat.format(messageFormat, errorReaderThread.getName());
    log.debug(message);

    int ret;
    try {
        /*
         * We must not hold the lock on this while we wait on the child, or
         * we could not be interrupted.
         */
        ret = process.waitFor();
    } catch (final InterruptedException e) {
        log.debug("Normal interruption, interrupting all IO readers"); //$NON-NLS-1$

        /*
         * We must join on all IO readers before entering a terminal state
         * to prevent the reader threads from later writing to their
         * streams. This method also performs the immediate interrupt.
         *
         * Ignore if there was an error joining because we just want to
         * terminate as INTERRUPTED anyway.
         */
        joinReaders(new Thread[] { outputReaderThread, errorReaderThread }, true);

        /*
         * This is the normal abort scenario. No exit code is available and
         * no error occurred.
         */
        synchronized (this) {
            state = ProcessRunnerState.INTERRUPTED;
        }

        notifyTerminalState();
        return;
    }

    /*
     * If we launched output reader threads, we have to wait for them to
     * complete here. This is usually a short wait because once we're this
     * far, process.waitFor() has finished so the readers will be reaching
     * the end of their input streams soon (and terminating).
     *
     * If we get an error back from the join, we want to consider this
     * entire runner INTERRUPTED because we can't trust the output streams
     * to have the entire contents of the process.
     */

    if (joinReaders(new Thread[] { outputReaderThread, errorReaderThread }, false) == false) {
        log.error("Error joining IO reader threads, setting INTERRUPTED"); //$NON-NLS-1$

        synchronized (this) {
            state = ProcessRunnerState.INTERRUPTED;
        }

        notifyTerminalState();
        return;
    }

    /*
     * Now that we have joined the IO reader threads, we can close the close
     * the streams in order to prevent Java from leaking the handles.
     */

    try {
        process.getOutputStream().close();
        process.getInputStream().close();
        process.getErrorStream().close();
    } catch (final IOException e) {
        /*
         * This exception is from Stream.close().
         *
         * A failure to configure the output streams is a critical error and
         * should be treated as a failure to launch the process. Setting
         * different state could cause the user to trust that his process
         * which returned a 0 exit code also printed no error text when it
         * actually did (and therefore failed).
         */

        log.error("Error closing child process's output streams after join, setting INTERRUPTED", e); //$NON-NLS-1$

        synchronized (this) {
            state = ProcessRunnerState.INTERRUPTED;
        }

        notifyTerminalState();
        return;
    }

    synchronized (this) {
        exitCode = ret;
        state = ProcessRunnerState.COMPLETED;
    }

    notifyTerminalState();
}

From source file:net.sf.ehcache.distribution.RMICacheReplicatorTest.java

/**
 * {@inheritDoc}//from w w w.j  ava2s.co m
 *
 * @throws Exception
 */
protected void tearDown() throws Exception {

    if (JVMUtil.isSingleRMIRegistryPerVM()) {
        return;
    }

    if (manager1 != null) {
        manager1.shutdown();
    }
    if (manager2 != null) {
        manager2.shutdown();
    }
    if (manager3 != null) {
        manager3.shutdown();
    }
    if (manager4 != null) {
        manager4.shutdown();
    }
    if (manager5 != null) {
        manager5.shutdown();
    }
    if (manager6 != null) {
        manager6.shutdown();
    }
    Thread.sleep(5000);

    List threads = JVMUtil.enumerateThreads();
    for (int i = 0; i < threads.size(); i++) {
        Thread thread = (Thread) threads.get(i);
        if (thread.getName().equals("Replication Thread")) {
            fail("There should not be any replication threads running after shutdown");
        }
    }

}

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

private void closeThread(Thread t) {
    if (t != null && t.isAlive()) {
        LOG.info("Stopping " + t.getName());
        t.interrupt();/*from   w  ww .  j a v  a 2 s  . c o  m*/
        try {
            t.join();
        } catch (InterruptedException ex) {
            ex.printStackTrace();
        }
    }
}

From source file:com.android.exchange.SyncManager.java

static public void reloadFolderList(Context context, long accountId, boolean force) {
    SyncManager syncManager = INSTANCE;/*from w w  w .j av  a 2  s .c  o m*/
    if (syncManager == null)
        return;
    Cursor c = context.getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION,
            MailboxColumns.ACCOUNT_KEY + "=? AND " + MailboxColumns.TYPE + "=?",
            new String[] { Long.toString(accountId), Long.toString(Mailbox.TYPE_EAS_ACCOUNT_MAILBOX) }, null);
    try {
        if (c.moveToFirst()) {
            synchronized (sSyncLock) {
                Mailbox m = new Mailbox().restore(c);
                Account acct = Account.restoreAccountWithId(context, accountId);
                if (acct == null) {
                    reloadFolderListFailed(accountId);
                    return;
                }
                String syncKey = acct.mSyncKey;
                // No need to reload the list if we don't have one
                if (!force && (syncKey == null || syncKey.equals("0"))) {
                    reloadFolderListFailed(accountId);
                    return;
                }

                // Change all ping/push boxes to push/hold
                ContentValues cv = new ContentValues();
                cv.put(Mailbox.SYNC_INTERVAL, Mailbox.CHECK_INTERVAL_PUSH_HOLD);
                context.getContentResolver().update(Mailbox.CONTENT_URI, cv,
                        WHERE_PUSH_OR_PING_NOT_ACCOUNT_MAILBOX, new String[] { Long.toString(accountId) });
                log("Set push/ping boxes to push/hold");

                long id = m.mId;
                AbstractSyncService svc = syncManager.mServiceMap.get(id);
                // Tell the service we're done
                if (svc != null) {
                    synchronized (svc.getSynchronizer()) {
                        svc.stop();
                    }
                    // Interrupt the thread so that it can stop
                    Thread thread = svc.mThread;
                    thread.setName(thread.getName() + " (Stopped)");
                    thread.interrupt();
                    // Abandon the service
                    syncManager.releaseMailbox(id);
                    // And have it start naturally
                    kick("reload folder list");
                }
            }
        }
    } finally {
        c.close();
    }
}

From source file:com.cloudbees.hudson.plugins.folder.AbstractFolder.java

/**
 * {@inheritDoc}/*from  w  ww. j av  a  2s . c  om*/
 */
@Override
public void onLoad(ItemGroup<? extends Item> parent, String name) throws IOException {
    super.onLoad(parent, name);
    init();
    final Thread t = Thread.currentThread();
    String n = t.getName();
    try {
        if (items == null) {
            // When Jenkins is getting reloaded, we want children being loaded to be able to find existing items that they will be overriding.
            // This is necessary for them to correctly keep the running builds, for example.
            // ItemGroupMixIn.loadChildren handles the rest of this logic.
            Item current = parent.getItem(name);
            if (current != null && current.getClass() == getClass()) {
                this.items = ((AbstractFolder) current).items;
            }
        }

        final ChildNameGenerator<AbstractFolder<I>, I> childNameGenerator = childNameGenerator();
        items = loadChildren(this, getJobsDir(), new Function1<String, I>() {
            @Override
            public String call(I item) {
                String fullName = item.getFullName();
                t.setName("Loading job " + fullName);
                float percentage = 100.0f * jobEncountered.incrementAndGet() / Math.max(1, jobTotal.get());
                long now = System.currentTimeMillis();
                if (loadingTick == 0) {
                    loadingTick = now;
                } else if (now - loadingTick > TICK_INTERVAL) {
                    LOGGER.log(Level.INFO, String.format("Loading job %s (%.1f%%)", fullName, percentage));
                    loadingTick = now;
                }
                if (childNameGenerator == null) {
                    return item.getName();
                } else {
                    String name = childNameGenerator.itemNameFromItem(AbstractFolder.this, item);
                    if (name == null) {
                        return childNameGenerator.itemNameFromLegacy(AbstractFolder.this, item.getName());
                    }
                    return name;
                }
            }
        });
    } finally {
        t.setName(n);
    }
}

From source file:org.craftercms.studio.impl.v1.repository.alfresco.AlfrescoContentRepository.java

private void addDebugStack() {
    if (logger.getLevel().equals(Logger.LEVEL_DEBUG)) {
        Thread thread = Thread.currentThread();
        String threadName = thread.getName();
        logger.debug("Thread: " + threadName);
        StackTraceElement[] stackTraceElements = thread.getStackTrace();
        StringBuilder sbStack = new StringBuilder();
        int stackSize = (10 < stackTraceElements.length - 2) ? 10 : stackTraceElements.length;
        for (int i = 2; i < stackSize + 2; i++) {
            sbStack.append("\n\t").append(stackTraceElements[i].toString());
        }/*from w  w w  . j  av a  2  s.  co m*/
        RequestContext context = RequestContext.getCurrent();
        CronJobContext cronJobContext = CronJobContext.getCurrent();
        if (context != null) {
            HttpServletRequest request = context.getRequest();
            String url = request.getRequestURI() + "?" + request.getQueryString();
            logger.debug("Http request: " + url);
        } else if (cronJobContext != null) {
            logger.debug("Cron Job");

        }
        logger.debug("TRACE: Stack trace (depth 10): " + sbStack.toString());
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSImage.java

private void waitForThreads(List<Thread> threads) {
    for (Thread thread : threads) {
        while (thread.isAlive()) {
            try {
                thread.join();//w w  w .  j ava  2  s .c o m
            } catch (InterruptedException iex) {
                LOG.error("Caught interrupted exception while waiting for thread " + thread.getName()
                        + " to finish. Retrying join");
            }
        }
    }
}