Example usage for java.lang Thread interrupt

List of usage examples for java.lang Thread interrupt

Introduction

In this page you can find the example usage for java.lang Thread interrupt.

Prototype

public void interrupt() 

Source Link

Document

Interrupts this thread.

Usage

From source file:org.cloudata.core.tabletserver.TabletServer.java

public void shutdown() {
    //LOG.fatal("Kill Tablet Server:" + hostName + ",freeMemory:" + SizeOf.freeMemory() + "/" + SizeOf.totalMemory());
    LOG.fatal("Kill Tablet Server:" + hostName + "," + testMode);
    if (!testMode) {
        //      try {
        //        System.out.println("Kill Tablet Server:" + hostName + ",freeMemory:" + SizeOf.freeMemory() + "/" + SizeOf.totalMemory());
        //      } catch (Exception e) {
        //        LOG.error(e);
        //      }
        System.exit(-1);/*from w  w  w . ja v a  2s  .  co  m*/
    } else {
        //if (!stopRequested) {
        stopRequested = true;
        tablets.clear();

        try {
            server.stop();
        } catch (Exception e) {
            LOG.error(e);
        }
        try {
            ((DataXceiveServer) this.dataXceiveServer.getRunnable()).kill();
        } catch (Exception e) {
            LOG.error(e);
        }

        try {
            if (minorCompactionThread != null) {
                minorCompactionThread.interrupt();
            }
        } catch (Exception e) {
            LOG.error(e);
        }
        try {
            if (majorCompactionThread != null) {
                majorCompactionThread.interrupt();
            }
        } catch (Exception e) {
            LOG.error(e);
        }

        try {
            //ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
            Thread[] threads = new Thread[threadGroup.activeCount()];
            threadGroup.enumerate(threads, true);

            for (Thread thread : threads) {
                try {
                    thread.interrupt();
                } catch (Exception e) {

                }
            }
        } catch (Exception e) {
            LOG.error(e);
        }

        try {
            loadThreadExecutor.shutdownNow();
            compactionExecutor.shutdownNow();
            splitExecutor.shutdownNow();
        } catch (Exception e) {
            LOG.error(e);
        }

        try {
            zk.close();
        } catch (Exception e) {
            LOG.error(e);
        }

        tabletServerMetrics.shutdown();
        LOG.debug("shutdown tabletserver:" + hostName);
    }
}

From source file:uk.co.gidley.jmxmonitor.services.ThreadManager.java

public void initialise() throws InitialisationException {
    // Start shutdown socket service
    ShutdownRunner shutdownRunner = null;
    try {/*from  w  ww  .  j av  a  2  s. c o m*/
        shutdownRunner = new ShutdownRunner(mainConfiguration.getConfiguration());
    } catch (IOException e) {
        logger.error("{}", e);
        throw new InitialisationException(e);
    }
    Thread shutdownThread = new Thread(shutdownRunner, SHUTDOWN_MONITOR_THREAD);

    try {

        shutdownThread.start();

        // Configure Monitoring Group instances
        List<String> monitoringGroupNames = mainConfiguration.getConfiguration()
                .getList(PROPERTY_PREFIX + "groups");

        for (String groupName : monitoringGroupNames) {
            if (!StringUtils.isEmpty(groupName)) {
                logger.debug("Started initialising {}", groupName);
                initialiseMonitoringGroup(groupName, mainConfiguration.getConfiguration());
                logger.debug("Completed initialising {}", groupName);
            }
        }

        // Start threads to begin monitoring
        logger.info("Configuration complete starting all monitors");
        for (String groupName : monitoringGroups.keySet()) {
            Thread thread = monitoringGroups.get(groupName).getThread();
            thread.start();
        }

        // Continue to monitor for failures or stop message. On failure stop group, restart it if possible
        while (threadManagerRunning) {
            for (String groupName : monitoringGroups.keySet()) {
                MonitoringGroup monitoringGroup = monitoringGroups.get(groupName).getMonitoringGroup();
                if (!monitoringGroup.isAlive()) {
                    restartMonitoringGroup(groupName, mainConfiguration.getConfiguration());
                }
            }

            // Stop if the shutdown thread has triggered. 
            if (!shutdownThread.isAlive()) {
                threadManagerRunning = false;
                for (String groupName : monitoringGroups.keySet()) {
                    logger.debug("Stopping {}", groupName);
                    MonitoringGroup monitoringGroup = monitoringGroups.get(groupName).getMonitoringGroup();
                    monitoringGroup.stop();
                }
                for (String groupName : monitoringGroups.keySet()) {
                    Thread monitoringGroup = monitoringGroups.get(groupName).getThread();
                    monitoringGroup.join();
                    logger.debug("Stopped {}", groupName);
                }
            }

            Thread.sleep(5000);
        }

    } catch (NoSuchElementException e) {
        logger.error("{}", e);
        throw new InitialisationException(e);
    } catch (InterruptedException e) {
        logger.error("{}", e);
        throw new RuntimeException(e);
    } finally {
        // Must shutdown the shutdown thread
        shutdownThread.interrupt();

    }

}

From source file:gov.noaa.pfel.erddap.util.EDStatic.java

/** This interrupts the thread and waits up to maxSeconds for it to finish.
 * If it still isn't finished, it is stopped.
 * //from   w  w w.ja  v a 2 s  . c  om
 */
public static void stopThread(Thread thread, int maxSeconds) {
    try {
        if (thread == null)
            return;
        String name = thread.getName();
        if (verbose)
            String2.log("stopThread(" + name + ")...");
        if (!thread.isAlive()) {
            if (verbose)
                String2.log("thread=" + name + " was already not alive.");
            return;
        }
        thread.interrupt();
        int waitSeconds = 0;
        while (thread.isAlive() && waitSeconds < maxSeconds) {
            waitSeconds += 2;
            Math2.sleep(2000);
        }
        if (thread.isAlive()) {
            if (verbose)
                String2.log("!!!Stopping thread=" + name + " after " + waitSeconds + " s");
            thread.stop();
        } else {
            if (verbose)
                String2.log("thread=" + name + " noticed interrupt in " + waitSeconds + " s");
        }
    } catch (Throwable t) {
        String2.log(MustBe.throwableToString(t));
    }
}

From source file:edu.harvard.i2b2.crc.ejb.QueryManagerBeanUtil.java

public Map testSend(String domainId, String projectId, String ownerId, String generatedSql, String sessionId,
        String queryInstanceId, String patientSetId, String xmlRequest, long timeout) throws Exception {
    String status = null;//from   w  w  w.  j  ava 2  s .  c om
    int queryResultInstanceId = 0;

    log.debug("in testSend");
    QueryProcessorUtil qpUtil = QueryProcessorUtil.getInstance();
    ServiceLocator serviceLocator = ServiceLocator.getInstance();
    /*
    QueueConnection conn = serviceLocator.getQueueConnectionFactory(
    QUEUE_CONN_FACTORY_NAME).createQueueConnection();
    Queue sendQueue = serviceLocator.getQueue(SMALL_QUEUE_NAME);
    Queue responseQueue = serviceLocator.getQueue(RESPONSE_QUEUE_NAME);
    QueueSession session = conn.createQueueSession(false,
    javax.jms.Session.AUTO_ACKNOWLEDGE);
    String id = sessionId;
    String selector = "JMSCorrelationID='" + id + "'";
    QueueSender sender = session.createSender(sendQueue);
    MapMessage mapMsg = session.createMapMessage();
    mapMsg.setJMSCorrelationID(id);
    mapMsg.setJMSReplyTo(responseQueue);
            
    mapMsg.setString(XML_REQUEST_PARAM, xmlRequest);
    mapMsg.setString(QUERY_MASTER_GENERATED_SQL_PARAM, generatedSql);
    mapMsg.setString(QUERY_INSTANCE_ID_PARAM, queryInstanceId);
    mapMsg.setString(QUERY_PATIENT_SET_ID_PARAM, patientSetId);
    mapMsg.setString(DS_LOOKUP_DOMAIN_ID, domainId);
    mapMsg.setString(DS_LOOKUP_PROJECT_ID, projectId);
    mapMsg.setString(DS_LOOKUP_OWNER_ID, ownerId);
    sender.send(mapMsg);
            
    QueueConnection conn1 = serviceLocator.getQueueConnectionFactory(
    QUEUE_CONN_FACTORY_NAME).createQueueConnection();
    conn1.start();
            
    QueueSession recvSession = conn1.createQueueSession(false,
    javax.jms.Session.AUTO_ACKNOWLEDGE);
            
    QueueReceiver rcvr = recvSession
    .createReceiver(responseQueue, selector);
    MapMessage receivedMsg = (MapMessage) rcvr.receive(timeout);
            
            
    if (receivedMsg == null) {
       status = "RUNNING";
       log.info("STATUS IS RUNNING " + status);
    } else {
       String responseObj = (String) receivedMsg.getString("para1");
       status = (String) receivedMsg
       .getString(QueryManagerBeanUtil.QUERY_STATUS_PARAM);
       log.debug("Got back response from executor " + responseObj);
            
       if (status != null && status.indexOf("LOCKEDOUT") > -1) {
    ;
       } else {
    status = "DONE";
       }
       queryResultInstanceId = receivedMsg
       .getInt(QT_QUERY_RESULT_INSTANCE_ID_PARAM);
       log.info("RESULT INSTANCE ID " + queryResultInstanceId);
    }
     */
    //TODO mm bypass JMS and call directly

    long waitTime = getTimeout(xmlRequest);

    ExecRunnable exec = new ExecRunnable();

    exec.execute(generatedSql, queryInstanceId, patientSetId, xmlRequest, domainId, projectId, ownerId);

    Thread t = new Thread(exec);

    synchronized (t) {
        t.start();

        try {
            //if (waitTime > 0) {
            //   t.wait(waitTime);
            //} else {
            //   t.wait();
            //}

            long startTime = System.currentTimeMillis();
            long deltaTime = -1;
            while ((exec.isJobCompleteFlag() == false) && (deltaTime < waitTime)) {
                if (waitTime > 0) {
                    t.wait(waitTime - deltaTime);
                    deltaTime = System.currentTimeMillis() - startTime;
                } else {
                    t.wait();
                }
            }

            if (exec.isJobCompleteFlag() == false) {
                String timeOuterror = "Result waittime millisecond <result_waittime_ms> :" + waitTime
                        + " elapsed, setting to next queue";
                log.debug(timeOuterror);

                DAOFactoryHelper daoFactoryHelper = new DAOFactoryHelper(domainId, projectId, ownerId);

                IDAOFactory daoFactory = daoFactoryHelper.getDAOFactory();

                SetFinderDAOFactory sfDAOFactory = daoFactory.getSetFinderDAOFactory();
                //DataSourceLookup dsLookup = sfDAOFactory.getDataSourceLookup();

                // check if the status is cancelled
                IQueryInstanceDao queryInstanceDao = sfDAOFactory.getQueryInstanceDAO();
                QtQueryInstance queryInstance = queryInstanceDao.getQueryInstanceByInstanceId(queryInstanceId);

                queryInstance.setBatchMode(MEDIUM_QUEUE);
                //queryInstance.setEndDate(new Date(System
                //      .currentTimeMillis()));
                queryInstanceDao.update(queryInstance, false);

                log.debug("Set to MEDIUM Queue");
                Map returnMap = new HashMap();
                returnMap.put(QUERY_STATUS_PARAM, "RUNNING");
                int id = Integer.parseInt(queryInstanceId);
                returnMap.put(QT_QUERY_RESULT_INSTANCE_ID_PARAM, id);
                return returnMap;
                //throw new Exception("Timed Out, setting to MEDIUM Queue");
            }
        } catch (InterruptedException e) {
            log.error("Error in thread: " + e.getMessage());

            e.printStackTrace();
            throw new I2B2Exception("Thread error while running CRC job ", e);
        } finally {
            t.interrupt();
            //exec = null;
            t = null;
        }
    }

    //      closeAll(sender, null, conn, session);
    //      closeAll(null, rcvr, conn1, recvSession);
    // closeAllTopic(rcvr,conn1,recvSession);
    //MM 
    //      Map returnMap = new HashMap();
    //      returnMap.put(QUERY_STATUS_PARAM, status);
    //      returnMap.put(QT_QUERY_RESULT_INSTANCE_ID_PARAM, queryResultInstanceId);
    //      return returnMap;
    return exec.getResult();
}

From source file:org.apache.hadoop.mapreduce.v2.hs.TestUnnecessaryBlockingOnHistoryFileInfo.java

/**
 * This create a test case in which two threads are trying to load two
 * different jobs of the same user under the intermediate directory.
 * One thread should not be blocked by the other thread that is loading
 * a huge job files (This is simulated by hanging up parsing the job files
 * forever). The test will fail by triggering the timeout if one thread is
 * blocked by the other while the other thread is holding the lock on its
 * associated job files and hanging up parsing the files.
 *///from w w w  .j  av  a  2s . c om
@Test(timeout = 600000)
public void testTwoThreadsQueryingDifferentJobOfSameUser() throws InterruptedException, IOException {
    final Configuration config = new Configuration();
    config.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, INTERMEDIATE_DIR.getPath());
    config.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, Long.MAX_VALUE);

    final JobId job1 = createJobId(0);
    final JobId job2 = createJobId(1);
    final HistoryFileManagerUnderContention historyFileManager = createHistoryFileManager(config, job1, job2);

    Thread webRequest1 = null;
    Thread webRequest2 = null;
    try {
        /**
         * create a dummy .jhist file for job1, and try to load/parse the job
         * files in one child thread.
         */
        createJhistFile(job1);
        webRequest1 = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    HistoryFileManager.HistoryFileInfo historyFileInfo = historyFileManager.getFileInfo(job1);
                    historyFileInfo.loadJob();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
        webRequest1.start();
        historyFileManager.waitUntilIntermediateDirIsScanned(job1);

        /**
         * At this point, thread webRequest1 has finished scanning the
         * intermediate directory and is hanging up parsing the job files while
         * it's holding the lock on the associated HistoryFileInfo object.
         */

        /**
         * create a dummy .jhist file for job2 and try to load/parse the job files
         * in the other child thread. Because job files are not moved from the
         * intermediate directory to the done directory, thread webRequest2
         * will also see the job history files for job1.
         */
        createJhistFile(job2);
        webRequest2 = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    HistoryFileManager.HistoryFileInfo historyFileInfo = historyFileManager.getFileInfo(job2);
                    historyFileInfo.loadJob();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
        webRequest2.start();
        historyFileManager.waitUntilIntermediateDirIsScanned(job2);

        /**
         * If execution had gotten to this point, then thread webRequest2 would
         * not have tried to acquire the lock of the HistoryFileInfo object
         * associated job1, which is permanently held by thread webRequest1 that
         * is hanging up parsing the job history files, so it was able to proceed
         * with parsing job history files of job2.
         */
        Assert.assertTrue(
                "Thread 2 is blocked while it is trying to " + "load job2 by Thread 1 which is loading job1.",
                webRequest2.getState() != Thread.State.BLOCKED);
    } finally {
        if (webRequest1 != null) {
            webRequest1.interrupt();
        }
        if (webRequest2 != null) {
            webRequest2.interrupt();
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHDFSSourceIT.java

@Test(timeout = 30000)
public void testProduceDelimitedWithHeader() throws Exception {
    ClusterHdfsConfigBean conf = new ClusterHdfsConfigBean();
    conf.hdfsUri = miniDFS.getURI().toString();
    conf.hdfsDirLocations = Arrays.asList(dir.toUri().getPath());
    conf.hdfsConfigs = new HashMap<>();
    conf.hdfsKerberos = false;//from   w w w . j av  a2 s. c o  m
    conf.hdfsConfDir = hadoopConfDir;
    conf.recursive = false;
    conf.produceSingleRecordPerMessage = false;
    conf.dataFormat = DataFormat.DELIMITED;
    conf.dataFormatConfig.csvFileFormat = CsvMode.CSV;
    conf.dataFormatConfig.csvHeader = CsvHeader.WITH_HEADER;
    conf.dataFormatConfig.csvMaxObjectLen = 4096;
    conf.dataFormatConfig.csvRecordType = CsvRecordType.LIST;
    conf.dataFormatConfig.csvSkipStartLines = 0;

    SourceRunner sourceRunner = new SourceRunner.Builder(ClusterHdfsDSource.class, createSource(conf))
            .addOutputLane("lane").setExecutionMode(ExecutionMode.CLUSTER_BATCH).setResourcesDir(resourcesDir)
            .build();

    sourceRunner.runInit();

    List<Map.Entry> list = new ArrayList<>();
    list.add(new Pair("HEADER_COL_1,HEADER_COL_2", null));
    list.add(new Pair("path::" + "1", new String("a,b\nC,D\nc,d")));

    Thread th = createThreadForAddingBatch(sourceRunner, list);
    try {
        StageRunner.Output output = sourceRunner.runProduce(null, 5);

        String newOffset = output.getNewOffset();
        Assert.assertEquals("path::" + "1", newOffset);
        List<Record> records = output.getRecords().get("lane");
        Assert.assertEquals(3, records.size());
        Record record = records.get(0);
        Assert.assertEquals("a",
                record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_1",
                record.get().getValueAsList().get(0).getValueAsMap().get("header").getValueAsString());
        Assert.assertEquals("b",
                record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_2",
                record.get().getValueAsList().get(1).getValueAsMap().get("header").getValueAsString());
        record = records.get(1);
        Assert.assertEquals("C",
                record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_1",
                record.get().getValueAsList().get(0).getValueAsMap().get("header").getValueAsString());
        Assert.assertEquals("D",
                record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_2",
                record.get().getValueAsList().get(1).getValueAsMap().get("header").getValueAsString());
        record = records.get(2);
        Assert.assertEquals("c",
                record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_1",
                record.get().getValueAsList().get(0).getValueAsMap().get("header").getValueAsString());
        Assert.assertEquals("d",
                record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString());
        Assert.assertEquals("HEADER_COL_2",
                record.get().getValueAsList().get(1).getValueAsMap().get("header").getValueAsString());
        if (sourceRunner != null) {
            sourceRunner.runDestroy();
        }
    } finally {
        th.interrupt();
    }
}

From source file:net.sf.jasperreports.customvisualization.export.CVElementPhantomJSImageDataProvider.java

/**
 * Executes a command within the given timeout.
 * /*w  w  w.j av  a2  s  . c  om*/
 * @param args
 * @param currentDirectory
 * @param timeout
 */
private static void runCommand(String[] args, File currentDirectory, final int timeout) {
    Thread loggingThread = null;
    Thread interruptingThread = null;

    try {
        String cmd = "";
        for (String arg : args) {
            cmd += " " + arg;
        }

        if (log.isDebugEnabled()) {
            log.debug("Executing external command: " + cmd);
        }
        //System.out.println(cmd);

        ProcessBuilder pb = new ProcessBuilder(Arrays.asList(args));
        pb.directory(currentDirectory);

        final Process externalProcess = pb.start();
        final StringBuilder processOutput = new StringBuilder();

        final boolean[] success = new boolean[1];
        success[0] = false;

        loggingThread = new Thread(new Runnable() {
            @Override
            public void run() {
                BufferedReader br = null;
                try {
                    br = new BufferedReader(new InputStreamReader(externalProcess.getInputStream()));
                    String line;
                    while ((line = br.readLine()) != null) {
                        processOutput.append(line).append("\n");

                        if (line.indexOf("SCRIPT_SUCCESS") >= 0) {
                            success[0] = true;
                            killProcess(externalProcess, 100);
                        } else if (line.indexOf("SCRIPT_ERROR") >= 0) {
                            success[0] = false;
                            killProcess(externalProcess, 100);
                        }
                    }

                    if (log.isDebugEnabled()) {
                        log.debug("External process output:\n" + processOutput.toString());
                    }
                } catch (IOException e) {
                    if (log.isDebugEnabled()) {
                        log.debug(e.getMessage());
                    }
                } finally {
                    if (br != null) {
                        try {
                            br.close();
                        } catch (IOException e) {
                            if (log.isWarnEnabled()) {
                                log.warn("Failed to close phantomjs process' inputstream", e);
                            }
                        }
                    }
                }
            }
        });

        interruptingThread = new Thread(new Runnable() {
            @Override
            public void run() {
                if (killProcess(externalProcess, timeout)) {
                    success[0] = false;
                }
            }

        });
        loggingThread.start();
        interruptingThread.start();
        externalProcess.waitFor();

        // We should not care if the phantomjs process does not end on time if it succeeds in producing the desired output.
        if (externalProcess.exitValue() != 0 && !success[0]) {
            // FIXME we should do loggingThread.join(millis) because the
            // process might end before its output if fully processed

            throw new JRRuntimeException("External process did not end properly; exit value: "
                    + externalProcess.exitValue()
                    + (processOutput.length() > 0 ? "; process output:\n" + processOutput + "\n" : "."));
        }

    } catch (IOException e) {
        throw new JRRuntimeException(e);
    } catch (InterruptedException e) {
        throw new JRRuntimeException(e);
    } finally {

        if (interruptingThread != null && interruptingThread.isAlive()) {
            try {
                interruptingThread.interrupt();
            } catch (Exception ex) {
            }
        }
        if (loggingThread != null && loggingThread.isAlive()) {
            try {
                loggingThread.interrupt();
            } catch (Exception ex) {
            }
        }
    }
}

From source file:org.apache.cassandra.concurrent.ContinuationsExecutor.java

/**
 * Checks if a new worker can be added with respect to current pool state
 * and the given bound (either core or maximum). If so, the worker count is
 * adjusted accordingly, and, if possible, a new worker is created and
 * started running firstTask as its first task. This method returns false if
 * the pool is stopped or eligible to shut down. It also returns false if
 * the thread factory fails to create a thread when asked, which requires a
 * backout of workerCount, and a recheck for termination, in case the
 * existence of this worker was holding up termination.
 * /*from  w w w.  j  ava2s  .co m*/
 * @param firstTask
 *            the task the new thread should run first (or null if none).
 *            Workers are created with an initial first task (in method
 *            execute()) to bypass queuing when there are fewer than
 *            corePoolSize threads (in which case we always start one), or
 *            when the queue is full (in which case we must bypass queue).
 *            Initially idle threads are usually created via
 *            prestartCoreThread or to replace other dying workers.
 * 
 * @param core
 *            if true use corePoolSize as bound, else maximumPoolSize. (A
 *            boolean indicator is used here rather than a value to ensure
 *            reads of fresh values after checking other pool state).
 * @return true if successful
 */
private boolean addWorker(Runnable firstTask, boolean core) {
    retry: for (;;) {
        int c = ctl.get();
        int rs = runStateOf(c);

        // Check if queue empty only if necessary.
        if (rs >= SHUTDOWN && !(rs == SHUTDOWN && firstTask == null && !workQueue.isEmpty()))
            return false;

        for (;;) {
            int wc = workerCountOf(c);
            if (wc >= CAPACITY || wc >= (core ? corePoolSize : maximumPoolSize))
                return false;
            if (compareAndIncrementWorkerCount(c))
                break retry;
            c = ctl.get(); // Re-read ctl
            if (runStateOf(c) != rs)
                continue retry;
            // else CAS failed due to workerCount change; retry inner loop
        }
    }

    Worker w = new Worker(firstTask);
    Thread t = w.thread;

    final ReentrantLock mainLock = this.mainLock;
    mainLock.lock();
    try {
        // Recheck while holding lock.
        // Back out on ThreadFactory failure or if
        // shut down before lock acquired.
        int c = ctl.get();
        int rs = runStateOf(c);

        if (t == null || (rs >= SHUTDOWN && !(rs == SHUTDOWN && firstTask == null))) {
            decrementWorkerCount();
            tryTerminate();
            return false;
        }

        workers.add(w);

        int s = workers.size();
        if (s > largestPoolSize)
            largestPoolSize = s;
    } finally {
        mainLock.unlock();
    }

    t.start();
    // It is possible (but unlikely) for a thread to have been
    // added to workers, but not yet started, during transition to
    // STOP, which could result in a rare missed interrupt,
    // because Thread.interrupt is not guaranteed to have any effect
    // on a non-yet-started Thread (see Thread#interrupt).
    if (runStateOf(ctl.get()) == STOP && !t.isInterrupted())
        t.interrupt();

    return true;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.  Ensures we do not
 * lose edits.//from w ww.j av a2 s .  c  o m
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    Thread zombie = new ZombieLastLogWriterRegionServer(this.conf, counter, stop, region);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                FileSystem fs = FileSystem.get(conf2);
                int expectedFiles = fs.listStatus(HLOGDIR).length;
                HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf2);
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals(expectedFiles, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countHLog(logfile, fs, conf2);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. Zombie could write "
                        + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:com.gft.unity.android.AndroidIO.java

@Override
public String InvokeServiceForBinary(IORequest request, IOService service, String storePath) {

    IOServiceEndpoint endpoint = service.getEndpoint();

    if (service != null) {
        // JUST FOR LOCAL TESTING, DO NOT UNCOMMENT FOR PLATFORM RELEASE
        // LOG.LogDebug(Module.PLATFORM, "Request content (for binary): " + request.getContent());

        if (endpoint == null) {
            LOG.LogDebug(Module.PLATFORM, "No endpoint configured for this service name: " + service.getName());
            return null;
        }/*  w ww . j a v  a  2  s  .  co m*/

        if (!ServiceType.OCTET_BINARY.equals(service.getType())) {
            LOG.LogDebug(Module.PLATFORM, "This method only admits OCTET_BINARY service types");
            return null;
        }

        String requestMethod = service.getRequestMethod().toString();
        if (request.getMethod() != null && request.getMethod().length() > 0)
            requestMethod = request.getMethod().toUpperCase();

        String requestUriString = formatRequestUriString(request, endpoint, requestMethod);
        Thread timeoutThread = null;

        try {

            // Security - VALIDATIONS
            if (!this.applySecurityValidations(requestUriString)) {
                return null;
            }

            // Adding HTTP Client Parameters
            this.addingHttpClientParms(request, endpoint);

            // Building Web Request to send
            HttpEntityEnclosingRequestBase httpRequest = this.buildWebRequest(request, service,
                    requestUriString, requestMethod);

            LOG.LogDebug(Module.PLATFORM, "Downloading service content");

            // Throw a new Thread to check absolute timeout
            timeoutThread = new Thread(new CheckTimeoutThread(httpRequest));
            timeoutThread.start();

            long start = System.currentTimeMillis();
            HttpResponse httpResponse = httpClient.execute(httpRequest);
            LOG.LogDebug(Module.PLATFORM,
                    "Content downloaded in " + (System.currentTimeMillis() - start) + "ms");

            // Read response and store to local filestystem
            return this.readWebResponseAndStore(httpResponse, service, storePath);

        } catch (Exception ex) {
            LOG.Log(Module.PLATFORM, "Unnandled Exception requesting service.", ex);
        } finally {
            // abort any previous timeout checking thread
            if (timeoutThread != null && timeoutThread.isAlive()) {
                timeoutThread.interrupt();
            }
        }
    }

    LOG.LogDebug(Module.PLATFORM, "invoke service (for binary) finished");
    return null;
}