Example usage for java.lang Thread isAlive

List of usage examples for java.lang Thread isAlive

Introduction

In this page you can find the example usage for java.lang Thread isAlive.

Prototype

public final native boolean isAlive();

Source Link

Document

Tests if this thread is alive.

Usage

From source file:org.apache.hadoop.hbase.TestMetaTableLocator.java

/**
 * Test interruptable while blocking wait on meta.
 * @throws IOException// ww w.j a v a  2  s. c  om
 * @throws ServiceException
 * @throws InterruptedException
 */
@Test
public void testInterruptWaitOnMeta() throws IOException, InterruptedException, ServiceException {
    final ClientProtos.ClientService.BlockingInterface client = Mockito
            .mock(ClientProtos.ClientService.BlockingInterface.class);

    Mockito.when(client.get((RpcController) Mockito.any(), (GetRequest) Mockito.any()))
            .thenReturn(GetResponse.newBuilder().build());

    final MetaTableLocator mtl = new MetaTableLocator();
    ServerName meta = new MetaTableLocator().getMetaRegionLocation(this.watcher);
    assertNull(meta);
    Thread t = new Thread() {
        @Override
        public void run() {
            try {
                mtl.waitMetaRegionLocation(watcher);
            } catch (InterruptedException e) {
                throw new RuntimeException("Interrupted", e);
            }
        }
    };
    t.start();
    while (!t.isAlive())
        Threads.sleep(1);
    Threads.sleep(1);
    assertTrue(t.isAlive());
    mtl.stop();
    // Join the thread... should exit shortly.
    t.join();
}

From source file:com.strategicgains.docussandra.controller.perf.remote.parent.PerfTestParent.java

public void loadData() throws IOException, ParseException, InterruptedException {
    logger.info("------------Loading Data into: " + this.getDb().name() + " with Docussandra!------------");
    ArrayList<Thread> workers = new ArrayList<>(NUM_WORKERS + 1);
    int numDocs = getNumDocuments();
    int docsPerWorker = numDocs / NUM_WORKERS;
    try {/*from w  ww.j ava  2  s. c  o  m*/
        List<Document> docs = getDocumentsFromFS();
        ArrayList<List<Document>> documentQueues = new ArrayList<>(NUM_WORKERS + 1);
        int numDocsAssigned = 0;
        while ((numDocsAssigned + 1) < numDocs) {
            int start = numDocsAssigned;
            int end = numDocsAssigned + docsPerWorker;
            if (end > numDocs) {
                end = numDocs - 1;
            }
            documentQueues.add(new ArrayList(docs.subList(start, end)));
            numDocsAssigned = end;
        }
        for (final List<Document> queue : documentQueues) {
            workers.add(new Thread() {
                @Override
                public void run() {
                    for (Document d : queue) {
                        //logger.debug("Processing document: " + d.toString());
                        postDocument(getDb(), getTb(), d);
                    }
                    logger.info("Thread " + Thread.currentThread().getName() + " is done.");
                }
            });
        }
    } catch (UnsupportedOperationException e)//we can't read everything in at once
    {
        //all we need to do in this block is find a way to set "workers"
        for (int i = 0; i < NUM_WORKERS; i++) {
            workers.add(new Thread() {
                private final int chunk = (int) (Math.random() * 100) + 150;//pick a random chunk so we are not going back to the FS all at the same time and potentially causing a bottle neck

                @Override
                public void run() {
                    ThreadLocal<Integer> counter = new ThreadLocal<>();
                    counter.set(new Integer(0));
                    try {
                        List<Document> docs = getDocumentsFromFS(chunk);//grab a handful of documents
                        while (docs.size() > 0) {
                            for (Document d : docs)//process the documents we grabbed
                            {
                                //logger.debug("Processing document: " + d.toString());
                                postDocument(getDb(), getTb(), d);//post them up
                                counter.set(counter.get() + 1);
                            }
                            docs = getDocumentsFromFS(chunk);//grab another handful of documents
                        }
                        logger.info("Thread " + Thread.currentThread().getName() + " is done. It processed "
                                + counter.get() + " documents.");
                    } catch (IOException | ParseException e) {
                        logger.error("Couldn't read from document", e);
                    }
                }
            });
        }
    }

    //long start = new Date().getTime();
    StopWatch sw = new StopWatch();
    sw.start();
    //start your threads!
    for (Thread t : workers) {
        t.start();
    }
    logger.info("All threads started, waiting for completion.");
    boolean allDone = false;
    boolean first = true;
    while (!allDone || first) {
        first = false;
        boolean done = true;
        for (Thread t : workers) {
            if (t.isAlive()) {
                done = false;
                logger.info("Thread " + t.getName() + " is still running.");
                break;
            }
        }
        if (done) {
            allDone = true;
            sw.stop();
        } else {
            logger.info("We still have workers running...");
            Thread.sleep(5000);
        }
    }

    long miliseconds = sw.getTime();
    double seconds = (double) miliseconds / 1000d;
    output.info("Doc: Done loading data using: " + NUM_WORKERS + " and URL: " + BASE_URI + ". Took: " + seconds
            + " seconds");
    double tpms = (double) numDocs / (double) miliseconds;
    double tps = tpms * 1000;
    double transactionTime = (double) miliseconds / (double) numDocs;
    output.info(this.getDb().name() + " Doc: Average Transactions Per Second: " + tps);
    output.info(this.getDb().name() + " Doc: Average Transactions Time (in miliseconds): " + transactionTime);
    Thread.sleep(100000);//sleep a bit to let the DB digest that before trying anything else
}

From source file:org.apache.hadoop.hbase.catalog.TestCatalogTracker.java

/**
 * Test interruptable while blocking wait on meta.
 * @throws IOException/*from   www .  ja  v a2s  . com*/
 * @throws ServiceException
 * @throws InterruptedException
 */
@Test
public void testInterruptWaitOnMeta() throws IOException, InterruptedException, ServiceException {
    final ClientProtos.ClientService.BlockingInterface client = Mockito
            .mock(ClientProtos.ClientService.BlockingInterface.class);
    HConnection connection = mockConnection(null, client);

    Mockito.when(client.get((RpcController) Mockito.any(), (GetRequest) Mockito.any()))
            .thenReturn(GetResponse.newBuilder().build());
    final CatalogTracker ct = constructAndStartCatalogTracker(connection);
    ServerName meta = ct.getMetaLocation();
    Assert.assertNull(meta);
    Thread t = new Thread() {
        @Override
        public void run() {
            try {
                ct.waitForMeta();
            } catch (InterruptedException e) {
                throw new RuntimeException("Interrupted", e);
            }
        }
    };
    t.start();
    while (!t.isAlive())
        Threads.sleep(1);
    Threads.sleep(1);
    assertTrue(t.isAlive());
    ct.stop();
    // Join the thread... should exit shortly.
    t.join();
}

From source file:net.sf.taverna.t2.servicedescriptions.impl.ServiceDescriptionRegistryImpl.java

private void updateServiceDescriptions(boolean refreshAll, boolean waitFor) {
    List<Thread> threads = new ArrayList<>();
    for (ServiceDescriptionProvider provider : getServiceDescriptionProviders()) {
        synchronized (providerDescriptions) {
            if (providerDescriptions.containsKey(provider) && !refreshAll)
                // We'll used the cached values
                continue;
            Thread oldThread = serviceDescriptionThreads.get(provider);
            if (oldThread != null && oldThread.isAlive()) {
                if (refreshAll)
                    // New thread will override the old thread
                    oldThread.interrupt();
                else {
                    // observers.notify(new ProviderStatusNotification(provider, "Waiting for provider"));
                    continue;
                }//from   w ww .j  av a  2 s  .c  om
            }
            // Not run yet - we'll start a new tread
            Thread thread = new FindServiceDescriptionsThread(provider);
            threads.add(thread);
            serviceDescriptionThreads.put(provider, thread);
            thread.start();
        }
    }
    if (waitFor)
        joinThreads(threads, DESCRIPTION_THREAD_TIMEOUT_MS);
}

From source file:com.samknows.measurement.test.TestExecutor.java

public void executeTest(TestDescription td, TestResult result) {
    try {// w  ww. j  ava  2 s.c  om
        List<Param> params = tc.paramsManager.prepareParams(td.params);

        executingTest = TestFactory.create(td.type, params);
        if (executingTest != null) {
            getPartialResult();
            Logger.d(TestExecutor.class, "start to execute test: " + td.displayName);
            showNotification(tc.getString(R.string.ntf_running_test) + td.displayName);

            //execute the test in a new thread and kill it it it doesn't terminate after
            //Constants.WAIT_TEST_BEFORE_ABORT
            Thread t = new Thread(new Runnable() {
                @Override
                public void run() {
                    executingTest.execute();
                }
            });
            t.start();
            t.join(Constants.WAIT_TEST_BEFORE_ABORT);
            if (t.isAlive()) {
                Logger.e(this, "Test is still runnuing after " + Constants.WAIT_TEST_BEFORE_ABORT / 1000
                        + " seconds.");
                t.interrupt();
                t = null;
            } else {
                lastTestBytes = executingTest.getNetUsage();
                result.isSuccess = executingTest.isSuccessful();
                String out = executingTest.getOutputString();
                result.addTestString(out);
                rc.addTest(executingTest.getJSONResult());
                // HACK TO INCLUDE THE JUDPJITTER RESULTS
                if (td.type.equalsIgnoreCase("latency")) {
                    String[] judp = executingTest.getOutputFields();
                    DCSStringBuilder jjitter = new DCSStringBuilder();
                    String jitter = "" + (Integer.parseInt(judp[5]) - Integer.parseInt(judp[6]));
                    String sent = "" + (Integer.parseInt(judp[9]) + Integer.parseInt(judp[10]));
                    String received = "" + (Integer.parseInt(judp[9]) - Integer.parseInt(judp[10]));
                    jjitter.append("JUDPJITTER");
                    jjitter.append(judp[1]); // TIMESTAMP
                    jjitter.append(judp[2]); // STATUS
                    jjitter.append(judp[3]); // TARGET
                    jjitter.append(judp[4]); // TARGET IP ADDRESS
                    jjitter.append(128); // PACKETSIZE
                    jjitter.append(0); // BITRATE
                    jjitter.append(0); // DURATION
                    jjitter.append(sent); // PACKETS SENT UP
                    jjitter.append(sent); // PACKETS SENT DOWN
                    jjitter.append(received); // PACKETS RECEIVED UP
                    jjitter.append(received); // PACKETS RECEIVED DOWN
                    jjitter.append(jitter); // JITTER UP
                    jjitter.append(jitter); // JITTER DOWN
                    jjitter.append(judp[5]); // AVERAGE RTT
                    result.addTestString(jjitter.build());
                }

                if (result.isSuccess) {
                    tc.paramsManager.processOutParams(out, td.outParamsDescription);
                    if (executingTest.getHumanReadable() != null) {
                        HashMap<String, String> last_values = executingTest.getHumanReadable().getValues();
                        for (String key : last_values.keySet()) {
                            String value = last_values.get(key);
                            Logger.d(TestExecutor.class, "last_" + key + " " + value);
                            AppSettings.getInstance().saveString("last_" + key, value);
                        }
                    }
                }

                Logger.d(TAG, "finished execution test: " + td.type);
            }
        } else {
            Logger.e(TAG, "Can't find test for: " + td.type, new RuntimeException());
            result.isSuccess = false;
        }
    } catch (Throwable e) {
        Logger.e(this, "Error in executing the test. ", e);
        result.isSuccess = false;
    } finally {
        cancelNotification();
    }
}

From source file:net.sourceforge.vulcan.jabber.SmackKeepAliveThreadInterrupter.java

public void interrupt() {
    final ThreadGroup group = Thread.currentThread().getThreadGroup();

    final Thread[] threads = new Thread[group.activeCount()];

    group.enumerate(threads);/*from   w  w w  .  j  a  va2  s  . co m*/

    for (Thread thread : threads) {
        if (!thread.getName().startsWith("Smack Keep Alive")) {
            continue;
        }

        if (!thread.getContextClassLoader().equals(getClass().getClassLoader())) {
            // only wake up threads from our own class loader
            LOG.info("Not waking up " + thread.getName() + " because it uses a different class loader.");
            continue;
        }

        LOG.info("Interrupting " + thread.getName());

        thread.interrupt();

        try {
            thread.join(1000);
        } catch (InterruptedException ignore) {
        }

        if (thread.isAlive()) {
            LOG.error("Smack Keep Alive thread still alive after interruption.");
        }
    }
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from   ww  w  .  ja v  a 2s. co  m
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers./*w w  w .  ja  va 2  s  .  c o m*/
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.opennms.netmgt.provision.server.SimpleServer.java

/**
 * <p>stopServer</p>//from  www . j  a v a2s. c o  m
 *
 * @throws java.io.IOException if any.
 */
public void stopServer() throws IOException {
    if (!m_stopped) {
        m_stopped = true;
        final Thread t = getServerThread();
        setServerThread(null);
        IOUtils.closeQuietly(getSocket());
        IOUtils.closeQuietly(getServerSocket());
        try {
            Thread.sleep(200);
        } catch (final InterruptedException e) {
        }

        if (t != null && t.isAlive()) {
            t.interrupt();
        }
        try {
            if (m_runnable != null)
                m_runnable.awaitShutdown();
        } catch (final InterruptedException e) {
            LOG.debug("Interrupted while shutting down.", e);
        }
    }
}

From source file:org.urbanstew.soundcloudapi.test.RequestTest.java

public final void testUploadCountedFile() throws Exception {
    File file = new File("empty.wav");
    assertTrue(file.exists());/*from   w ww  . j av a 2s  .c om*/

    final List<NameValuePair> params = new java.util.ArrayList<NameValuePair>();
    params.add(new BasicNameValuePair("track[title]", "This is a test upload"));
    params.add(new BasicNameValuePair("track[sharing]", "private"));

    final ProgressFileBody fileBody = new ProgressFileBody(file);

    Thread progressThread = new Thread(new Runnable() {
        public void run() {
            try {
                HttpResponse response = mApi.upload(fileBody, params);
                assertEquals(201, response.getStatusLine().getStatusCode());
                sCreatedTrack3Id = getId(response);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });

    progressThread.start();
    while (progressThread.isAlive()) {
        Thread.sleep(100);
        //         System.out.println(fileBody.getBytesTransferred());
    }

    assertEquals(73772, fileBody.getBytesTransferred());
}