Example usage for java.lang Thread enumerate

List of usage examples for java.lang Thread enumerate

Introduction

In this page you can find the example usage for java.lang Thread enumerate.

Prototype

public static int enumerate(Thread tarray[]) 

Source Link

Document

Copies into the specified array every active thread in the current thread's thread group and its subgroups.

Usage

From source file:eu.stratosphere.client.minicluster.NepheleMiniCluster.java

public void start() throws Exception {
    synchronized (startStopLock) {
        // set up the global configuration
        if (this.configDir != null) {
            GlobalConfiguration.loadConfiguration(configDir);
        } else {/*from   w ww  . j av  a 2  s . com*/
            Configuration conf = getMiniclusterDefaultConfig(jobManagerRpcPort, taskManagerRpcPort,
                    taskManagerDataPort, memorySize, hdfsConfigFile, lazyMemoryAllocation,
                    defaultOverwriteFiles, defaultAlwaysCreateDirectory, numTaskManager);
            GlobalConfiguration.includeConfiguration(conf);
        }

        // force the input/output format classes to load the default values from the configuration.
        // we need to do this here, because the format classes may have been initialized before the mini cluster was started
        initializeIOFormatClasses();

        // before we start the JobManager, we need to make sure that there are no lingering IPC threads from before
        // check that all threads are done before we return
        Thread[] allThreads = new Thread[Thread.activeCount()];
        int numThreads = Thread.enumerate(allThreads);

        for (int i = 0; i < numThreads; i++) {
            Thread t = allThreads[i];
            String name = t.getName();
            if (name.startsWith("IPC")) {
                t.join();
            }
        }

        // start the job manager
        jobManager = new JobManager(ExecutionMode.LOCAL);

        waitForJobManagerToBecomeReady(numTaskManager);
    }
}

From source file:edu.illinois.enforcemop.examples.hadoop.TestRPC.java

@Test
//@Schedule(name = "slowDone", value = "slowrpcDone@SlowRPC->beforeStop@main") 
public void testSlowRpc() throws Exception {
    System.out.println("Testing Slow RPC");
    // create a server with two handlers
    Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, 0, 2, false, conf, null);
    TestProtocol proxy = null;/*from  w ww. j av  a  2 s .c  o m*/

    try {
        server.start();

        InetSocketAddress addr = NetUtils.getConnectAddress(server);

        // create a client
        proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);

        SlowRPC slowrpc = new SlowRPC(proxy);
        Thread thread = new Thread(slowrpc, "SlowRPC");
        thread.start(); // send a slow RPC, which won't return until two fast pings
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // first fast ping

        // verify that the first RPC is still stuck
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // second fast ping

        // Now the slow ping should be able to be executed

        //Original code :
        //OPWAIT while (!slowrpc.isDone()) {
        //OPWAIT  System.out.println("Waiting for slow RPC to get done.");
        //OPWAIT  try {
        //    Thread.sleep(1000);
        //OPWAIT  } catch (Exception e) {}
        //OPWAIT }

        try {
            Thread.sleep(1000);
        } catch (Exception e) {
        }

    } finally {
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        System.out.println("Down slow rpc testing");
    }
    //Interrupt thread manually
    Thread[] t = new Thread[2];
    Thread.enumerate(t);
    t[1].interrupt();
}

From source file:edu.illinois.imunit.examples.hadoop.TestRPC.java

@Test
@Schedule(name = "slowDone", value = "slowrpcDone@SlowRPC->beforeStop@main")
public void testSlowRpc() throws Exception {
    System.out.println("Testing Slow RPC");
    // create a server with two handlers
    Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, 0, 2, false, conf, null);
    TestProtocol proxy = null;/* w  w w  .j a  va 2  s .com*/

    try {
        server.start();

        InetSocketAddress addr = NetUtils.getConnectAddress(server);

        // create a client
        proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);

        SlowRPC slowrpc = new SlowRPC(proxy);
        Thread thread = new Thread(slowrpc, "SlowRPC");
        thread.start(); // send a slow RPC, which won't return until two fast pings
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // first fast ping

        // verify that the first RPC is still stuck
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // second fast ping

        // Now the slow ping should be able to be executed

        //Original code :
        //OPWAIT while (!slowrpc.isDone()) {
        //OPWAIT  System.out.println("Waiting for slow RPC to get done.");
        //OPWAIT  try {
        //    Thread.sleep(1000);
        //OPWAIT  } catch (Exception e) {}
        //OPWAIT }

    } finally {
        fireEvent("beforeStop");
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        System.out.println("Down slow rpc testing");
    }
    //Interrupt thread manually
    Thread[] t = new Thread[2];
    Thread.enumerate(t);
    t[1].interrupt();
}

From source file:org.apache.flink.client.minicluster.NepheleMiniCluster.java

public void start() throws Exception {

    String forkNumberString = System.getProperty("forkNumber");
    int forkNumber = -1;
    try {/*ww w. ja va 2 s  .c  om*/
        forkNumber = Integer.parseInt(forkNumberString);
    } catch (NumberFormatException e) {
        // running inside and IDE, so the forkNumber property is not properly set
        // just ignore
    }
    if (forkNumber != -1) {
        // we are running inside a surefire/failsafe test, determine forkNumber and set
        // ports accordingly so that we can have multiple parallel instances

        jobManagerRpcPort = 1024 + forkNumber * 300;
        taskManagerRpcPort = 1024 + forkNumber * 300 + 100;
        taskManagerDataPort = 1024 + forkNumber * 300 + 200;
    }

    synchronized (startStopLock) {
        // set up the global configuration
        if (this.configDir != null) {
            GlobalConfiguration.loadConfiguration(configDir);
        } else {
            Configuration conf = getMiniclusterDefaultConfig(jobManagerRpcPort, taskManagerRpcPort,
                    taskManagerDataPort, memorySize, hdfsConfigFile, lazyMemoryAllocation,
                    defaultOverwriteFiles, defaultAlwaysCreateDirectory, taskManagerNumSlots, numTaskTracker);
            GlobalConfiguration.includeConfiguration(conf);
        }

        // force the input/output format classes to load the default values from the configuration.
        // we need to do this here, because the format classes may have been initialized before the mini cluster was started
        initializeIOFormatClasses();

        // before we start the JobManager, we need to make sure that there are no lingering IPC threads from before
        // check that all threads are done before we return
        Thread[] allThreads = new Thread[Thread.activeCount()];
        int numThreads = Thread.enumerate(allThreads);

        for (int i = 0; i < numThreads; i++) {
            Thread t = allThreads[i];
            String name = t.getName();
            if (name.startsWith("IPC")) {
                t.join();
            }
        }

        // start the job manager
        jobManager = new JobManager(ExecutionMode.LOCAL);

        waitForJobManagerToBecomeReady(numTaskTracker);
    }
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaIntegrationTest.java

private void brutallyKillConsumer(String victimName) {
    int nbThreads = Thread.activeCount();
    Thread[] threads = new Thread[nbThreads];
    Thread.enumerate(threads);

    for (Thread t : threads) {
        if (t.getName().equals(victimName)) {
            logger.error("BOOM: Killing consumer thread {}", victimName);
            t.stop(); // used by intention despite deprecation
        }/* w  ww  .  j  a  va 2 s.  co  m*/
    }
}

From source file:com.gargoylesoftware.htmlunit.SimpleWebTestCase.java

/**
 * Gets the active JavaScript threads./*  w  w w. jav a 2s. c  om*/
 * @return the threads
 */
protected List<Thread> getJavaScriptThreads() {
    final Thread[] threads = new Thread[Thread.activeCount() + 10];
    Thread.enumerate(threads);
    final List<Thread> jsThreads = new ArrayList<Thread>();
    for (final Thread t : threads) {
        if (t != null && t.getName().startsWith("JS executor for")) {
            jsThreads.add(t);
        }
    }

    return jsThreads;
}

From source file:org.apache.hadoop.dfs.ClusterTestDFS.java

public static String summarizeThreadGroup() {
    int n = 10;//from w w  w  .j  a v a 2 s . c  o m
    int k = 0;
    Thread[] tarray = null;
    StringBuffer sb = new StringBuffer(500);
    do {
        n = n * 10;
        tarray = new Thread[n];
        k = Thread.enumerate(tarray);
    } while (k == n); // while array is too small...
    for (int i = 0; i < k; i++) {
        Thread thread = tarray[i];
        sb.append(thread.toString());
        sb.append("\n");
    }
    return sb.toString();
}

From source file:com.gargoylesoftware.htmlunit.WebTestCase.java

/**
 * Gets the active JavaScript threads./*from  ww w. j a  v a  2 s  .co  m*/
 * @return the threads
 */
protected List<Thread> getJavaScriptThreads() {
    final Thread[] threads = new Thread[Thread.activeCount() + 10];
    Thread.enumerate(threads);
    final List<Thread> jsThreads = new ArrayList<>();
    for (final Thread t : threads) {
        if (t != null && t.getName().startsWith("JS executor for")) {
            jsThreads.add(t);
        }
    }

    return jsThreads;
}