Example usage for java.lang Thread interrupt

List of usage examples for java.lang Thread interrupt

Introduction

In this page you can find the example usage for java.lang Thread interrupt.

Prototype

public void interrupt() 

Source Link

Document

Interrupts this thread.

Usage

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers./*  w  ww  .  j  ava2s .  c o  m*/
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.openhab.binding.modbus.internal.SimultaneousReadWriteTestCase.java

@Test
public void testPoolBlocks() throws Exception {
    final KeyedObjectPool<ModbusSlaveEndpoint, ModbusSlaveConnection> pool = ModbusBinding
            .getReconstructedConnectionPoolForTesting();

    final ModbusTCPSlaveEndpoint endpoint = new ModbusTCPSlaveEndpoint(localAddress().getHostAddress(),
            this.tcpModbusPort);

    ModbusSlaveConnection borrowObject = pool.borrowObject(endpoint);
    Thread thread = new Thread() {
        @Override/*  w ww.j  a  v  a 2  s  .co m*/
        public void run() {
            try {
                ModbusSlaveConnection borrowObject2 = pool.borrowObject(endpoint);
                pool.returnObject(endpoint, borrowObject2);
            } catch (Exception e) {
                e.printStackTrace(System.err);
            }
        }
    };
    thread.start();
    thread.join(500);
    if (!thread.isAlive()) {
        throw new AssertionError("Thread should still be alive -- blocking since no objects");
    } else {
        thread.interrupt();
    }

    pool.returnObject(endpoint, borrowObject);
    // Now that object has been returned, borrowing should work again
    ModbusSlaveConnection borrowObject2 = pool.borrowObject(endpoint);
    pool.returnObject(endpoint, borrowObject2);

}

From source file:de.uni_potsdam.hpi.asg.common.io.Invoker.java

private ProcessReturn invoke(String[] cmd, List<String> params, File folder, int timeout) {
    List<String> command = new ArrayList<String>();
    command.addAll(Arrays.asList(cmd));
    command.addAll(params);//from ww w  .j  ava 2 s.c o  m
    ProcessReturn retVal = new ProcessReturn(Arrays.asList(cmd), params);
    Process process = null;
    try {
        logger.debug("Exec command: " + command.toString());
        //System.out.println(timeout + ": " + command.toString());
        ProcessBuilder builder = new ProcessBuilder(command);
        builder.directory(folder);
        builder.environment(); // bugfix setting env in test-mode (why this works? i dont know..)
        process = builder.start();

        Thread timeoutThread = null;
        if (timeout > 0) {
            timeoutThread = new Thread(new Timeout(Thread.currentThread(), timeout));
            timeoutThread.setName("Timout for " + command.toString());
            timeoutThread.start();
        }
        IOStreamReader ioreader = new IOStreamReader(process);
        Thread streamThread = new Thread(ioreader);
        streamThread.setName("StreamReader for " + command.toString());
        streamThread.start();
        process.waitFor();
        streamThread.join();
        if (timeoutThread != null) {
            timeoutThread.interrupt();
        }
        String out = ioreader.getResult();
        //System.out.println(out);
        if (out == null) {
            //System.out.println("out = null");
            retVal.setStatus(Status.noio);
        }
        retVal.setCode(process.exitValue());
        retVal.setStream(out);
        retVal.setStatus(Status.ok);
    } catch (InterruptedException e) {
        process.destroy();
        retVal.setTimeout(timeout);
        retVal.setStatus(Status.timeout);
    } catch (IOException e) {
        logger.error(e.getLocalizedMessage());
        retVal.setStatus(Status.ioexception);
    }
    return retVal;
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from w w  w  .  jav a2 s  .c o m
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.waarp.openr66.context.task.ExecOutputTask.java

private void finalizeFromError(PipedOutputStream outputStream, PumpStreamHandler pumpStreamHandler,
        PipedInputStream inputStream, AllLineReader allLineReader, Thread thread, int status,
        CommandLine commandLine) {// www .  jav  a2s. c o  m
    try {
        Thread.sleep(Configuration.RETRYINMS);
    } catch (InterruptedException e) {
    }
    try {
        outputStream.flush();
    } catch (IOException e2) {
    }
    try {
        Thread.sleep(Configuration.RETRYINMS);
    } catch (InterruptedException e) {
    }
    try {
        outputStream.close();
    } catch (IOException e1) {
    }
    thread.interrupt();
    try {
        inputStream.close();
    } catch (IOException e1) {
    }
    try {
        Thread.sleep(Configuration.RETRYINMS);
    } catch (InterruptedException e) {
    }
    try {
        pumpStreamHandler.stop();
    } catch (IOException e2) {
    }
    try {
        Thread.sleep(Configuration.RETRYINMS);
    } catch (InterruptedException e) {
    }
    String result = allLineReader.getLastLine().toString();
    logger.error("Status: " + status + " Exec in error with " + commandLine + " returns " + result);
    OpenR66RunnerErrorException exc = new OpenR66RunnerErrorException(
            "<STATUS>" + status + "</STATUS><ERROR>" + result + "</ERROR>");
    futureCompletion.setFailure(exc);
}

From source file:com.baidu.jprotobuf.mojo.PreCompileMojo.java

private void terminateThreads(ThreadGroup threadGroup) {
    long startTime = System.currentTimeMillis();
    Set<Thread> uncooperativeThreads = new HashSet<Thread>(); // these were not responsive to interruption
    for (Collection<Thread> threads = getActiveThreads(threadGroup); !threads
            .isEmpty(); threads = getActiveThreads(threadGroup), threads.removeAll(uncooperativeThreads)) {
        // Interrupt all threads we know about as of this instant (harmless if spuriously went dead (! isAlive())
        // or if something else interrupted it ( isInterrupted() ).
        for (Thread thread : threads) {
            getLog().debug("interrupting thread " + thread);
            thread.interrupt();
        }//w  ww  .  ja  va  2  s. c o m
        // Now join with a timeout and call stop() (assuming flags are set right)
        for (Thread thread : threads) {
            if (!thread.isAlive()) {
                continue; // and, presumably it won't show up in getActiveThreads() next iteration
            }
            if (daemonThreadJoinTimeout <= 0) {
                joinThread(thread, 0); // waits until not alive; no timeout
                continue;
            }
            long timeout = daemonThreadJoinTimeout - (System.currentTimeMillis() - startTime);
            if (timeout > 0) {
                joinThread(thread, timeout);
            }
            if (!thread.isAlive()) {
                continue;
            }
            uncooperativeThreads.add(thread); // ensure we don't process again
            if (stopUnresponsiveDaemonThreads) {
                getLog().warn("thread " + thread + " will be Thread.stop()'ed");
                thread.stop();
            } else {
                getLog().warn("thread " + thread + " will linger despite being asked to die via interruption");
            }
        }
    }
    if (!uncooperativeThreads.isEmpty()) {
        getLog().warn("NOTE: " + uncooperativeThreads.size()
                + " thread(s) did not finish despite being asked to "
                + " via interruption. This is not a problem with exec:java, it is a problem with the running code."
                + " Although not serious, it should be remedied.");
    } else {
        int activeCount = threadGroup.activeCount();
        if (activeCount != 0) {
            // TODO this may be nothing; continue on anyway; perhaps don't even log in future
            Thread[] threadsArray = new Thread[1];
            threadGroup.enumerate(threadsArray);
            getLog().debug("strange; " + activeCount + " thread(s) still active in the group " + threadGroup
                    + " such as " + threadsArray[0]);
        }
    }
}

From source file:org.springframework.integration.channel.QueueChannelTests.java

@Test
public void testReactorPersistentQueue() throws InterruptedException, IOException {
    final AtomicBoolean messageReceived = new AtomicBoolean(false);
    final CountDownLatch latch = new CountDownLatch(1);
    PersistentQueue<Message<?>> queue = new PersistentQueueSpec<Message<?>>()
            .codec(new JavaSerializationCodec<Message<?>>())
            .basePath(this.tempFolder.getRoot().getAbsolutePath()).get();

    final QueueChannel channel = new QueueChannel(queue);
    new Thread(new Runnable() {
        @Override/*from   w  w w. j a v a 2 s .co  m*/
        public void run() {
            Message<?> message = channel.receive();
            if (message != null) {
                messageReceived.set(true);
                latch.countDown();
            }
        }
    }).start();
    assertFalse(messageReceived.get());
    channel.send(new GenericMessage<String>("testing"));
    latch.await(1000, TimeUnit.MILLISECONDS);
    assertTrue(messageReceived.get());

    final CountDownLatch latch1 = new CountDownLatch(2);

    Thread thread = new Thread(new Runnable() {
        @Override
        public void run() {
            while (true) {
                Message<?> message = channel.receive(100);
                if (message != null) {
                    latch1.countDown();
                    if (latch1.getCount() == 0) {
                        break;
                    }
                }
            }
        }
    });
    thread.start();

    Thread.sleep(200);
    channel.send(new GenericMessage<String>("testing"));
    channel.send(new GenericMessage<String>("testing"));
    assertTrue(latch1.await(1000, TimeUnit.MILLISECONDS));

    final AtomicBoolean receiveInterrupted = new AtomicBoolean(false);
    final CountDownLatch latch2 = new CountDownLatch(1);
    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {
            Message<?> message = channel.receive(10000);
            receiveInterrupted.set(true);
            assertTrue(message == null);
            latch2.countDown();
        }
    });
    t.start();
    assertFalse(receiveInterrupted.get());
    t.interrupt();
    latch2.await();
    assertTrue(receiveInterrupted.get());

    receiveInterrupted.set(false);
    final CountDownLatch latch3 = new CountDownLatch(1);
    t = new Thread(new Runnable() {
        @Override
        public void run() {
            Message<?> message = channel.receive();
            receiveInterrupted.set(true);
            assertTrue(message == null);
            latch3.countDown();
        }
    });
    t.start();
    assertFalse(receiveInterrupted.get());
    t.interrupt();
    latch3.await();
    assertTrue(receiveInterrupted.get());

    GenericMessage<String> message1 = new GenericMessage<String>("test1");
    GenericMessage<String> message2 = new GenericMessage<String>("test2");
    assertTrue(channel.send(message1));
    assertTrue(channel.send(message2));
    List<Message<?>> clearedMessages = channel.clear();
    assertNotNull(clearedMessages);
    assertEquals(2, clearedMessages.size());

    clearedMessages = channel.clear();
    assertNotNull(clearedMessages);
    assertEquals(0, clearedMessages.size());

    // Test on artificial infinite wait
    // channel.receive();

    // Distributed scenario
    final CountDownLatch latch4 = new CountDownLatch(1);
    new Thread(new Runnable() {
        @Override
        public void run() {
            Message<?> message = channel.receive();
            if (message != null) {
                latch4.countDown();
            }
        }
    }).start();
    queue.add(new GenericMessage<String>("foo"));
    assertTrue(latch4.await(1000, TimeUnit.MILLISECONDS));
}

From source file:org.opencms.workplace.commons.CmsProgressWidget.java

/**
 * Starts a thread for the progress on the given list.<p>
 * /*from  ww  w .  j av a  2 s.c o m*/
 * @param list the list to use for the progress bar
 * @param abortExisting if true then an already existing thread will be killed
 */
public void startProgress(A_CmsListDialog list, boolean abortExisting) {

    // check the list
    if (list == null) {
        throw new CmsIllegalArgumentException(
                Messages.get().container(Messages.ERR_PROGRESS_START_INVALID_LIST_0));
    }

    // check if created key already exists
    if (m_threads.get(getKey()) != null) {
        if (abortExisting) {
            if (LOG.isDebugEnabled()) {
                LOG.debug(Messages.get().getBundle().key(Messages.LOG_PROGRESS_INTERRUPT_THREAD_1, getKey()));
            }
            Thread thread = (Thread) m_threads.get(getKey());
            thread.interrupt();
        } else {
            throw new CmsIllegalStateException(
                    Messages.get().container(Messages.ERR_PROGRESS_START_THREAD_EXISTS_0));
        }
    }

    // create the thread
    CmsProgressThread thread = new CmsProgressThread(list, getKey(), list.getLocale());

    Map threadsAbandoned = new HashMap();
    Map threadsAlive = new HashMap();
    synchronized (m_threads) {

        // clean up abandoned threads
        for (Iterator iter = m_threads.entrySet().iterator(); iter.hasNext();) {
            Map.Entry entry = (Map.Entry) iter.next();
            CmsProgressThread value = (CmsProgressThread) entry.getValue();

            if ((!value.isAlive()) && (System.currentTimeMillis() - value.getFinishTime() > CLEANUP_PERIOD)) {
                threadsAbandoned.put(entry.getKey(), value);
            } else {
                threadsAlive.put(entry.getKey(), value);
            }
        }

        // add and start new thread
        threadsAlive.put(thread.getKey(), thread);
        thread.start();

        m_threads = threadsAlive;
    }

    if (LOG.isDebugEnabled()) {
        for (Iterator iter = threadsAbandoned.keySet().iterator(); iter.hasNext();) {
            String key = (String) iter.next();
            LOG.debug(Messages.get().getBundle().key(Messages.LOG_PROGRESS_CLEAN_UP_THREAD_1, key));
        }
    }
}

From source file:ca.spencerelliott.mercury.Changesets.java

private synchronized void stopThread() {
    //Make sure the load thread exists before attempting to stop it
    if (load_thread != null) {
        //Save the old thread
        Thread old_thread = load_thread;

        //Get rid of the old thread
        load_thread = null;/*  w ww.  j av  a2s.c om*/

        //Interrupt the old thread
        old_thread.interrupt();
    }
}

From source file:org.kurento.test.client.KurentoTestClient.java

@SuppressWarnings("deprecation")
public void initWebRtc(final WebRtcEndpoint webRtcEndpoint, final WebRtcChannel channel, final WebRtcMode mode)
        throws InterruptedException {

    webRtcEndpoint.addOnIceCandidateListener(new EventListener<OnIceCandidateEvent>() {
        @Override//from  w  w  w.j av a2s  .co  m
        public void onEvent(OnIceCandidateEvent event) {
            browserClient
                    .executeScript("addIceCandidate('" + JsonUtils.toJsonObject(event.getCandidate()) + "');");
        }
    });

    final CountDownLatch latch = new CountDownLatch(1);
    Thread t = new Thread() {
        public void run() {
            initWebRtcSdpProcessor(new SdpOfferProcessor() {
                @Override
                public String processSdpOffer(String sdpOffer) {
                    return webRtcEndpoint.processOffer(sdpOffer);
                }
            }, channel, mode);
            latch.countDown();
        }
    };
    t.start();
    if (!latch.await(browserClient.getTimeout(), TimeUnit.SECONDS)) {
        t.interrupt();
        t.stop();
    }
    webRtcEndpoint.gatherCandidates();
}