List of usage examples for java.util.concurrent BlockingQueue poll
E poll(long timeout, TimeUnit unit) throws InterruptedException;
From source file:com.netflix.curator.framework.imps.TestFramework.java
@Test public void testConnectionState() throws Exception { Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try {// ww w. j a v a2s . c o m final BlockingQueue<ConnectionState> queue = new LinkedBlockingQueue<ConnectionState>(); ConnectionStateListener listener = new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { queue.add(newState); } }; client.getConnectionStateListenable().addListener(listener); client.start(); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.CONNECTED); server.stop(); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.SUSPENDED); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.LOST); } finally { IOUtils.closeQuietly(client); } }
From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java
@Test public void testAsyncInitialPopulation() throws Exception { PathChildrenCache cache = null;//from ww w . ja v a 2 s . co m CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); client.create().forPath("/test"); client.create().forPath("/test/one", "hey there".getBytes()); final BlockingQueue<PathChildrenCacheEvent> events = new LinkedBlockingQueue<PathChildrenCacheEvent>(); cache = new PathChildrenCache(client, "/test", true); cache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { events.offer(event); } }); cache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); PathChildrenCacheEvent event = events.poll(10, TimeUnit.SECONDS); Assert.assertEquals(event.getType(), PathChildrenCacheEvent.Type.CHILD_ADDED); event = events.poll(10, TimeUnit.SECONDS); Assert.assertEquals(event.getType(), PathChildrenCacheEvent.Type.INITIALIZED); Assert.assertEquals(event.getInitialData().size(), 1); } finally { IOUtils.closeQuietly(cache); IOUtils.closeQuietly(client); } }
From source file:com.kurento.kmf.media.HttpGetEndpointTest.java
/** * Test for {@link MediaSessionStartedEvent} * // www.j a v a 2s .c o m * @throws InterruptedException */ @Test public void testEventMediaSessionStarted() throws InterruptedException { final PlayerEndpoint player = pipeline.newPlayerEndpoint(URL_SMALL).build(); HttpGetEndpoint httpEP = pipeline.newHttpGetEndpoint().build(); player.connect(httpEP); final BlockingQueue<EndOfStreamEvent> eosEvents = new ArrayBlockingQueue<EndOfStreamEvent>(1); player.addEndOfStreamListener(new MediaEventListener<EndOfStreamEvent>() { @Override public void onEvent(EndOfStreamEvent event) { eosEvents.add(event); } }); httpEP.addMediaSessionStartedListener(new MediaEventListener<MediaSessionStartedEvent>() { @Override public void onEvent(MediaSessionStartedEvent event) { player.play(); } }); DefaultHttpClient httpclient = new DefaultHttpClient(); try { // This should trigger MediaSessionStartedEvent httpclient.execute(new HttpGet(httpEP.getUrl())); } catch (ClientProtocolException e) { throw new KurentoMediaFrameworkException(); } catch (IOException e) { throw new KurentoMediaFrameworkException(); } Assert.assertNotNull(eosEvents.poll(7, SECONDS)); httpEP.release(); player.release(); }
From source file:com.kurento.kmf.media.HttpPostEndpointTest.java
/** * Test for {@link MediaSessionStartedEvent} * /*from ww w . j av a 2 s. c om*/ * @throws InterruptedException */ @Test public void testEventMediaSessionStarted() throws InterruptedException { final PlayerEndpoint player = pipeline.newPlayerEndpoint(URL_SMALL).build(); HttpPostEndpoint httpEP = pipeline.newHttpPostEndpoint().build(); player.connect(httpEP); final BlockingQueue<EndOfStreamEvent> eosEvents = new ArrayBlockingQueue<EndOfStreamEvent>(1); player.addEndOfStreamListener(new MediaEventListener<EndOfStreamEvent>() { @Override public void onEvent(EndOfStreamEvent event) { eosEvents.add(event); } }); httpEP.addMediaSessionStartedListener(new MediaEventListener<MediaSessionStartedEvent>() { @Override public void onEvent(MediaSessionStartedEvent event) { player.play(); } }); DefaultHttpClient httpclient = new DefaultHttpClient(); try { // This should trigger MediaSessionStartedEvent httpclient.execute(new HttpGet(httpEP.getUrl())); } catch (ClientProtocolException e) { throw new KurentoMediaFrameworkException(); } catch (IOException e) { throw new KurentoMediaFrameworkException(); } Assert.assertNotNull(eosEvents.poll(7, SECONDS)); httpEP.release(); player.release(); }
From source file:com.netflix.curator.framework.imps.TestFramework.java
@Test public void testNamespaceInBackground() throws Exception { CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder(); CuratorFramework client = builder.connectString(server.getConnectString()).namespace("aisa") .retryPolicy(new RetryOneTime(1)).build(); client.start();//from ww w . java 2s .c om try { final BlockingQueue<String> queue = new LinkedBlockingQueue<String>(); CuratorListener listener = new CuratorListener() { @Override public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception { if (event.getType() == CuratorEventType.EXISTS) { queue.put(event.getPath()); } } }; client.getCuratorListenable().addListener(listener); client.create().forPath("/base"); client.checkExists().inBackground().forPath("/base"); String path = queue.poll(10, TimeUnit.SECONDS); Assert.assertEquals(path, "/base"); client.getCuratorListenable().removeListener(listener); BackgroundCallback callback = new BackgroundCallback() { @Override public void processResult(CuratorFramework client, CuratorEvent event) throws Exception { queue.put(event.getPath()); } }; client.getChildren().inBackground(callback).forPath("/base"); path = queue.poll(10, TimeUnit.SECONDS); Assert.assertEquals(path, "/base"); } finally { client.close(); } }
From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java
@Test public void testBasics() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start();/*from w w w.ja v a2 s . c o m*/ try { client.create().forPath("/test"); final BlockingQueue<PathChildrenCacheEvent.Type> events = new LinkedBlockingQueue<PathChildrenCacheEvent.Type>(); PathChildrenCache cache = new PathChildrenCache(client, "/test", true); cache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { if (event.getData().getPath().equals("/test/one")) { events.offer(event.getType()); } } }); cache.start(); client.create().forPath("/test/one", "hey there".getBytes()); Assert.assertEquals(events.poll(10, TimeUnit.SECONDS), PathChildrenCacheEvent.Type.CHILD_ADDED); client.setData().forPath("/test/one", "sup!".getBytes()); Assert.assertEquals(events.poll(10, TimeUnit.SECONDS), PathChildrenCacheEvent.Type.CHILD_UPDATED); Assert.assertEquals(new String(cache.getCurrentData("/test/one").getData()), "sup!"); client.delete().forPath("/test/one"); Assert.assertEquals(events.poll(10, TimeUnit.SECONDS), PathChildrenCacheEvent.Type.CHILD_REMOVED); cache.close(); } finally { client.close(); } }
From source file:com.splout.db.qnode.QNodeHandlerContext.java
/** * Get the Thrift client for this DNode. * <p/>//from ww w. j ava 2 s .c om * Can throw a TTransportException in the rare case when * a new pool is initialized here. In this case, you shouldn't call * the method {@link #returnDNodeClientToPool(String, com.splout.db.thrift.DNodeService.Client, boolean)} * to return the connection. * <p/> * This method never returns null. * * @throws java.lang.InterruptedException if somebody interrupts the thread meanwhile the method is waiting in the pool * @throws com.splout.db.qnode.PoolCreationException if there is failure when a new pool is created. * @throws com.splout.db.qnode.DNodePoolFullException if the pool for the given dnode is empty and the timeout * for waiting for a connection is reached. */ public DNodeService.Client getDNodeClientFromPool(String dnode) throws InterruptedException, PoolCreationException, DNodePoolFullException { BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode); if (dnodeQueue == null) { // This shouldn't happen in real life because it is initialized by the QNode, but it is useful for unit // testing. // Under some rare race conditions the pool may be required before the QNode creates it, but this method // assures that the queue will only be created once and, if it's not possible to create it, an exception // will be thrown and nothing bad will happen. try { initializeThriftClientCacheFor(dnode); dnodeQueue = thriftClientCache.get(dnode); } catch (TTransportException e) { throw new PoolCreationException(e); } } DNodeService.Client client = dnodeQueue.poll(dnodePoolTimeoutMillis, TimeUnit.MILLISECONDS); // Timeout waiting for poll if (client == null) { throw new DNodePoolFullException("Pool for dnode[" + dnode + "] is full and timeout of [" + dnodePoolTimeoutMillis + "] reached when waiting for a connection."); } return client; }
From source file:org.apache.hive.ptest.execution.HostExecutor.java
/** * Executes parallel test until the parallel work queue is empty. Then * executes the isolated tests on the host. During each phase if a * AbortDroneException is thrown the drone is removed possibly * leaving this host with zero functioning drones. If all drones * are removed the host will be replaced before the next run. *///from ww w . ja va2s . c o m private void executeTests(final BlockingQueue<TestBatch> parallelWorkQueue, final BlockingQueue<TestBatch> isolatedWorkQueue, final Set<TestBatch> failedTestResults) throws Exception { if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return; } mLogger.info("Starting parallel execution on " + mHost.getName()); List<ListenableFuture<Void>> droneResults = Lists.newArrayList(); for (final Drone drone : ImmutableList.copyOf(mDrones)) { droneResults.add(mExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { TestBatch batch = null; Stopwatch sw = Stopwatch.createUnstarted(); try { do { batch = parallelWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS); if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return null; } if (batch != null) { numParallelBatchesProcessed++; sw.reset().start(); try { if (!executeTestBatch(drone, batch, failedTestResults)) { failedTestResults.add(batch); } } finally { sw.stop(); mLogger.info( "Finished processing parallel batch [{}] on host {}. ElapsedTime(ms)={}", new Object[] { batch.getName(), getHost().toShortString(), sw.elapsed(TimeUnit.MILLISECONDS) }); } } } while (!mShutdown && !parallelWorkQueue.isEmpty()); } catch (AbortDroneException ex) { mDrones.remove(drone); // return value not checked due to concurrent access mLogger.error("Aborting drone during parallel execution", ex); if (batch != null) { Preconditions.checkState(parallelWorkQueue.add(batch), "Could not add batch to parallel queue " + batch); } } return null; } })); } if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return; } Futures.allAsList(droneResults).get(); mLogger.info("Starting isolated execution on " + mHost.getName()); for (Drone drone : ImmutableList.copyOf(mDrones)) { TestBatch batch = null; Stopwatch sw = Stopwatch.createUnstarted(); try { do { batch = isolatedWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS); if (batch != null) { numIsolatedBatchesProcessed++; sw.reset().start(); try { if (!executeTestBatch(drone, batch, failedTestResults)) { failedTestResults.add(batch); } } finally { sw.stop(); mLogger.info("Finished processing isolated batch [{}] on host {}. ElapsedTime(ms)={}", new Object[] { batch.getName(), getHost().toShortString(), sw.elapsed(TimeUnit.MILLISECONDS) }); } } } while (!mShutdown && !isolatedWorkQueue.isEmpty()); } catch (AbortDroneException ex) { mDrones.remove(drone); // return value not checked due to concurrent access mLogger.error("Aborting drone during isolated execution", ex); if (batch != null) { Preconditions.checkState(isolatedWorkQueue.add(batch), "Could not add batch to isolated queue " + batch); } } } }
From source file:org.springframework.integration.jdbc.lock.JdbcLockRegistryDifferentClientTests.java
@Test public void testExclusiveAccess() throws Exception { DefaultLockRepository client1 = new DefaultLockRepository(dataSource); client1.afterPropertiesSet();// w ww .j a va2 s. co m final DefaultLockRepository client2 = new DefaultLockRepository(dataSource); client2.afterPropertiesSet(); Lock lock1 = new JdbcLockRegistry(client1).obtain("foo"); final BlockingQueue<Integer> data = new LinkedBlockingQueue<Integer>(); final CountDownLatch latch1 = new CountDownLatch(1); lock1.lockInterruptibly(); Executors.newSingleThreadExecutor().execute(() -> { Lock lock2 = new JdbcLockRegistry(client2).obtain("foo"); try { latch1.countDown(); StopWatch stopWatch = new StopWatch(); stopWatch.start(); lock2.lockInterruptibly(); stopWatch.stop(); data.add(4); Thread.sleep(10); data.add(5); Thread.sleep(10); data.add(6); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { lock2.unlock(); } }); assertTrue(latch1.await(10, TimeUnit.SECONDS)); data.add(1); Thread.sleep(1000); data.add(2); Thread.sleep(1000); data.add(3); lock1.unlock(); for (int i = 0; i < 6; i++) { Integer integer = data.poll(10, TimeUnit.SECONDS); assertNotNull(integer); assertEquals(i + 1, integer.intValue()); } }
From source file:com.turn.griffin.data.GriffinUploadTask.java
private BitSet getAvailableBitmap(FileInfo fileInfo) { String filename = fileInfo.getFilename(); long fileVersion = fileInfo.getVersion(); long blockCount = fileInfo.getBlockCount(); Optional<GriffinConsumer> consumer = Optional.absent(); BitSet availableBlockBitmap = new BitSet((int) blockCount); try {/* ww w . ja v a 2 s . c om*/ BlockingQueue<byte[]> dataQueue = new ArrayBlockingQueue<>( GriffinDownloadTask.DOWNLOAD_CONSUMER_QUEUE_SIZE); Properties properties = new Properties(); properties.put("auto.offset.reset", "smallest"); /* The groupId should be unique to avoid conflict with other consumers running on this machine */ String consumerGroupId = GriffinKafkaTopicNameUtil.getDataTopicConsumerGroupId(filename, fileVersion, new String[] { dataManager.getMyServerId(), this.getClass().getSimpleName(), UUID.randomUUID().toString() }); String dataTopicNameForConsumer = GriffinKafkaTopicNameUtil.getDataTopicNameForConsumer(filename, fileVersion); consumer = Optional.fromNullable(new GriffinConsumer(GriffinModule.ZOOKEEPER, consumerGroupId, dataTopicNameForConsumer, GriffinDownloadTask.DOWNLOAD_THREAD_COUNT, properties, dataQueue)); /* TODO: Change this to a better bitmap (Check out RoaringBitmap) */ while (availableBlockBitmap.nextClearBit(0) != blockCount) { Optional<byte[]> message = Optional.fromNullable(dataQueue .poll(GriffinLeaderSelectionTask.LEADER_SELECTION_PERIOD_MS, TimeUnit.MILLISECONDS)); if (!message.isPresent()) { /* We know how much of the file is available in Kafka */ break; } DataMessage dataMessage = DataMessage.parseFrom(message.get()); availableBlockBitmap.set((int) dataMessage.getBlockSeqNo()); } } catch (Exception e) { logger.warn(String.format("Unable to download file %s to get available bitmap ", filename), e); /* Work with whatever information we have gathered till now */ } finally { if (consumer.isPresent()) { consumer.get().shutdown(true); } } return availableBlockBitmap; }