List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java
/** * It verifies that if bk-client doesn't complete the add-entry in given time out then broker is resilient enought * to create new ledger and add entry successfully. * //from w w w .j a v a 2 s . c o m * * @throws Exception */ @Test(timeOut = 20000) public void testManagedLedgerWithAddEntryTimeOut() throws Exception { ManagedLedgerConfig config = new ManagedLedgerConfig().setAddEntryTimeoutSeconds(1); ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("timeout_ledger_test", config); BookKeeper bk = mock(BookKeeper.class); doNothing().when(bk).asyncCreateLedger(anyInt(), anyInt(), anyInt(), any(), any(), any(), any(), any()); PulsarMockBookKeeper bkClient = mock(PulsarMockBookKeeper.class); ClientConfiguration conf = new ClientConfiguration(); doReturn(conf).when(bkClient).getConf(); class MockLedgerHandle extends PulsarMockLedgerHandle { public MockLedgerHandle(PulsarMockBookKeeper bk, long id, DigestType digest, byte[] passwd) throws GeneralSecurityException { super(bk, id, digest, passwd); } @Override public void asyncAddEntry(final byte[] data, final AddCallback cb, final Object ctx) { // do nothing } @Override public void asyncClose(org.apache.bookkeeper.client.AsyncCallback.CloseCallback cb, Object ctx) { cb.closeComplete(BKException.Code.OK, this, ctx); } } MockLedgerHandle ledgerHandle = mock(MockLedgerHandle.class); final String data = "data"; doNothing().when(ledgerHandle).asyncAddEntry(data.getBytes(), null, null); AtomicBoolean addSuccess = new AtomicBoolean(); setFieldValue(ManagedLedgerImpl.class, ledger, "currentLedger", ledgerHandle); final int totalAddEntries = 1; CountDownLatch latch = new CountDownLatch(totalAddEntries); ledger.asyncAddEntry(data.getBytes(), new AddEntryCallback() { @Override public void addComplete(Position position, Object ctx) { addSuccess.set(true); latch.countDown(); } @Override public void addFailed(ManagedLedgerException exception, Object ctx) { latch.countDown(); } }, null); latch.await(); assertTrue(addSuccess.get()); setFieldValue(ManagedLedgerImpl.class, ledger, "currentLedger", null); }
From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java
@Test public void testReadRetrybugWhenChannelThrowsAccessException() throws IOException, URISyntaxException, NoSuchMethodException, IllegalAccessException, InvocationTargetException { final String tempPathPrefix = null; final Path tempDirectoryPath = Files.createTempDirectory(Paths.get("."), tempPathPrefix); final AtomicBoolean caughtException = new AtomicBoolean(false); try {// w w w . j ava2 s. c o m final String DIR_NAME = "largeFiles/"; final String FILE_NAME = "lesmis-copies.txt"; final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME); final long bookSize = Files.size(objPath); final Ds3Object obj = new Ds3Object(FILE_NAME, bookSize); final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client); final int maxNumBlockAllocationRetries = 1; final int maxNumObjectTransferAttempts = 3; final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim, maxNumBlockAllocationRetries, maxNumObjectTransferAttempts); final Ds3ClientHelpers.Job readJob = ds3ClientHelpers.startReadJob(BUCKET_NAME, Arrays.asList(obj)); final GetJobSpectraS3Response jobSpectraS3Response = ds3ClientShim .getJobSpectraS3(new GetJobSpectraS3Request(readJob.getJobId())); assertThat(jobSpectraS3Response.getMasterObjectListResult(), is(notNullValue())); readJob.transfer(new Ds3ClientHelpers.ObjectChannelBuilder() { @Override public SeekableByteChannel buildChannel(final String key) throws IOException { throw new AccessControlException(key); } }); } catch (final IOException e) { caughtException.set(true); assertTrue(e.getCause() instanceof AccessControlException); } finally { FileUtils.deleteDirectory(tempDirectoryPath.toFile()); } assertTrue(caughtException.get()); }
From source file:de.hybris.platform.test.TransactionTest.java
@Test public void testItemAccessDuringCommit_PLA10839() throws Exception { final Title title1 = UserManager.getInstance().createTitle("t1"); final Title title2 = UserManager.getInstance().createTitle("t2"); final Title title3 = UserManager.getInstance().createTitle("t3"); final Title title4 = UserManager.getInstance().createTitle("t4"); final AtomicBoolean listenerHasBeenCalled = new AtomicBoolean(false); final InvalidationListener listener = new InvalidationListener() { @Override/*from www . ja v a 2 s. c o m*/ public void keyInvalidated(final Object[] key, final int invalidationType, final InvalidationTarget target, final RemoteInvalidationSource remoteSrc) { listenerHasBeenCalled.set(true); // access t1 here if (StringUtils.isEmpty(title1.getName())) { System.err.println("title1 name is empty"); } } }; final InvalidationTopic topic = InvalidationManager.getInstance() .getInvalidationTopic(new Object[] { Cache.CACHEKEY_HJMP, Cache.CACHEKEY_ENTITY }); try { topic.addInvalidationListener(listener); final Transaction tx = Transaction.current(); tx.execute(new TransactionBody() { @Override public Object execute() throws Exception { title2.setName("foo"); title3.setName("foo"); title4.setName("foo"); return null; } }); assertEquals("foo", title2.getName()); assertEquals("foo", title3.getName()); assertEquals("foo", title4.getName()); assertTrue(listenerHasBeenCalled.get()); } finally { topic.removeInvalidationListener(listener); } }
From source file:eu.esdihumboldt.hale.ui.service.instance.internal.orient.OrientInstanceService.java
/** * Perform the transformation//from www. j av a2 s . c om * * @return if the transformation was successful */ protected boolean performTransformation() { final TransformationService ts = getTransformationService(); if (ts == null) { log.userError("No transformation service available"); return false; } final AtomicBoolean transformationFinished = new AtomicBoolean(false); final AtomicBoolean transformationCanceled = new AtomicBoolean(false); IRunnableWithProgress op = new IRunnableWithProgress() { @Override public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException { try { Alignment alignment = getAlignmentService().getAlignment(); if (alignment.getActiveTypeCells().isEmpty()) { // early exit if there are no type relations return; } // determine if there are any active type cells w/o source boolean transformEmpty = false; for (Cell cell : alignment.getActiveTypeCells()) { if (cell.getSource() == null || cell.getSource().isEmpty()) { transformEmpty = true; break; } } InstanceCollection sources = getInstances(DataSet.SOURCE); if (!transformEmpty && sources.isEmpty()) { return; } HaleOrientInstanceSink sink = new HaleOrientInstanceSink(transformed, true); TransformationReport report; ATransaction trans = log.begin("Instance transformation"); try { report = ts.transform(alignment, sources, sink, HaleUI.getServiceProvider(), new ProgressMonitorIndicator(monitor)); // publish report ReportService rs = PlatformUI.getWorkbench().getService(ReportService.class); rs.addReport(report); } finally { try { sink.close(); } catch (IOException e) { // ignore } trans.end(); } } finally { // remember if canceled if (monitor.isCanceled()) { transformationCanceled.set(true); } // transformation finished transformationFinished.set(true); } } }; try { ThreadProgressMonitor.runWithProgressDialog(op, ts.isCancelable()); } catch (Throwable e) { log.error("Error starting transformation process", e); } // wait for transformation to complete HaleUI.waitFor(transformationFinished); return !transformationCanceled.get(); }
From source file:com.facebook.AccessTokenManager.java
private void refreshCurrentAccessTokenImpl() { final AccessToken accessToken = currentAccessToken; if (accessToken == null) { return;/*from w ww .j ava 2 s . c o m*/ } if (!tokenRefreshInProgress.compareAndSet(false, true)) { return; } Validate.runningOnUiThread(); lastAttemptedTokenExtendDate = new Date(); final Set<String> permissions = new HashSet<>(); final Set<String> declinedPermissions = new HashSet<>(); final AtomicBoolean permissionsCallSucceeded = new AtomicBoolean(false); final RefreshResult refreshResult = new RefreshResult(); GraphRequestBatch batch = new GraphRequestBatch( createGrantedPermissionsRequest(accessToken, new GraphRequest.Callback() { @Override public void onCompleted(GraphResponse response) { JSONObject result = response.getJSONObject(); if (result == null) { return; } JSONArray permissionsArray = result.optJSONArray("data"); if (permissionsArray == null) { return; } permissionsCallSucceeded.set(true); for (int i = 0; i < permissionsArray.length(); i++) { JSONObject permissionEntry = permissionsArray.optJSONObject(i); if (permissionEntry == null) { continue; } String permission = permissionEntry.optString("permission"); String status = permissionEntry.optString("status"); if (!Utility.isNullOrEmpty(permission) && !Utility.isNullOrEmpty(status)) { status = status.toLowerCase(Locale.US); if (status.equals("granted")) { permissions.add(permission); } else if (status.equals("declined")) { declinedPermissions.add(permission); } else { Log.w(TAG, "Unexpected status: " + status); } } } } }), createExtendAccessTokenRequest(accessToken, new GraphRequest.Callback() { @Override public void onCompleted(GraphResponse response) { JSONObject data = response.getJSONObject(); if (data == null) { return; } refreshResult.accessToken = data.optString("access_token"); refreshResult.expiresAt = data.optInt("expires_at"); } })); batch.addCallback(new GraphRequestBatch.Callback() { @Override public void onBatchCompleted(GraphRequestBatch batch) { if (getInstance().getCurrentAccessToken() == null || getInstance().getCurrentAccessToken().getUserId() != accessToken.getUserId()) { return; } try { if (permissionsCallSucceeded.get() == false && refreshResult.accessToken == null && refreshResult.expiresAt == 0) { return; } AccessToken newAccessToken = new AccessToken( refreshResult.accessToken != null ? refreshResult.accessToken : accessToken.getToken(), accessToken.getApplicationId(), accessToken.getUserId(), permissionsCallSucceeded.get() ? permissions : accessToken.getPermissions(), permissionsCallSucceeded.get() ? declinedPermissions : accessToken.getDeclinedPermissions(), accessToken.getSource(), refreshResult.expiresAt != 0 ? new Date(refreshResult.expiresAt * 1000l) : accessToken.getExpires(), new Date()); getInstance().setCurrentAccessToken(newAccessToken); } finally { tokenRefreshInProgress.set(false); } } }); batch.executeAsync(); }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = VertexPropertyFeatures.class, feature = FEATURE_STRING_VALUES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_FLOAT_VALUES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES) public void shouldReadWriteDetachedVertexAsReferenceNoEdgesToGraphSON() throws Exception { final Vertex v1 = g.addVertex("name", "marko", "acl", "rw"); final Vertex v2 = g.addVertex(); v1.addEdge("friends", v2, "weight", 0.5f); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GraphSONWriter writer = g.io().graphSONWriter().create(); final DetachedVertex dv = DetachedFactory.detach(v1, false); writer.writeVertex(os, dv);/*from w ww . ja va2 s . c o m*/ final AtomicBoolean called = new AtomicBoolean(false); final GraphSONReader reader = g.io().graphSONReader().create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readVertex(bais, detachedVertex -> { assertEquals(v1.id().toString(), detachedVertex.id().toString()); // lossy assertEquals(v1.label(), detachedVertex.label()); assertEquals(0, StreamFactory.stream(detachedVertex.iterators().propertyIterator()).count()); called.set(true); return detachedVertex; }); } assertTrue(called.get()); } }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = VertexPropertyFeatures.class, feature = FEATURE_STRING_VALUES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES) public void shouldReadWriteDetachedVertexAsReferenceNoEdgesToGryo() throws Exception { final Vertex v1 = g.addVertex("name", "marko", "acl", "rw"); final Vertex v2 = g.addVertex(); v1.addEdge("friends", v2, "weight", 0.5d); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GryoWriter writer = g.io().gryoWriter().create(); final DetachedVertex dv = DetachedFactory.detach(v1, false); writer.writeVertex(os, dv);//from w ww.ja v a2 s .c om final AtomicBoolean called = new AtomicBoolean(false); final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readVertex(bais, detachedVertex -> { assertEquals(v1.id(), detachedVertex.id()); assertEquals(v1.label(), detachedVertex.label()); assertEquals(0, StreamFactory.stream(detachedVertex.iterators().propertyIterator()).count()); called.set(true); return mock(Vertex.class); }); } assertTrue(called.get()); } }
From source file:org.apache.hadoop.hdfs.server.namenode.bookkeeper.TestBookKeeperEditLogInputStream.java
/** * Test "tailing" an in-progress ledger that is later finalized, i.e., a * typical primary/standby high-availability scenario. * Spawns two threads: a consumer (which writes transactions with * monotonically increasing transaction ids to the log), and consumer * (which keeps reading from the log, refreshing when encountering the end of * the log until the producer is shut down). Verifies that transactions have * been written in the correct order./* ww w . j a v a 2s . c o m*/ */ @Test public void testTailing() throws Exception { // Unlike the other unit test, numEdits here is constant as this is // a longer running test final int numEdits = 10000; final AtomicBoolean finishedProducing = new AtomicBoolean(false); final LedgerHandle ledgerOut = createLedger(); final long ledgerId = ledgerOut.getId(); Callable<Void> producerThread = new Callable<Void>() { @Override public Void call() throws Exception { try { BookKeeperEditLogOutputStream out = new BookKeeperEditLogOutputStream(ledgerOut); out.create(); // Writes version to the ledger output stream for (int i = 1; i <= numEdits; i++) { FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance(); // Set an increasing transaction id to verify correctness op.setTransactionId(i); if (LOG.isDebugEnabled()) { LOG.debug("Writing " + op); } FSEditLogTestUtil.writeToStreams(op, out); if (i % 1000 == 0) { Thread.sleep(500); FSEditLogTestUtil.flushStreams(out); } } FSEditLogTestUtil.flushStreams(out); FSEditLogTestUtil.closeStreams(out); } finally { // Let the producer know that we've reached the end. finishedProducing.set(true); } return null; } }; Callable<Void> consumerThread = new Callable<Void>() { @Override public Void call() throws Exception { BookKeeperEditLogInputStream in = new BookKeeperEditLogInputStream(ledgerProvider, ledgerId, 0, 1, -1, true); long numOps = 0; long maxTxId = -1; FSEditLogOp op; long lastPos = in.getPosition(); do { op = in.readOp(); if (op == null) { // If we've reached the end prematurely... Thread.sleep(1000); LOG.info("Refreshing to " + lastPos); in.refresh(lastPos, -1); // Then refresh to last known good position } else { long txId = op.getTransactionId(); if (txId > maxTxId) { // Standby ingest contains similar logic: transactions // with ids lower than what is already read are ignored. numOps++; maxTxId = txId; } // Remember the last known safe position that we can refresh to lastPos = in.getPosition(); } } while (op != null || !finishedProducing.get()); Thread.sleep(1000); // Once producer is shutdown, scan again from last known good position // until the end of the ledger. This mirrors the Ingest logic (last // read when being quiesced). in.refresh(lastPos, -1); do { op = in.readOp(); if (op != null) { long txId = op.getTransactionId(); if (txId > maxTxId) { numOps++; maxTxId = txId; } } } while (op != null); assertEquals("Must have read " + numEdits + " edits", numEdits, numOps); assertEquals("Must end at txid = " + numEdits, maxTxId, numEdits); return null; } }; // Allow producer and consumer to run concurrently ExecutorService executor = Executors.newFixedThreadPool(2); Future<Void> producerFuture = executor.submit(producerThread); Future<Void> consumerFuture = executor.submit(consumerThread); // Calling a .get() on the future will rethrow any exceptions thrown in // the future. producerFuture.get(); consumerFuture.get(); }
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors. *///from w ww .j a va2s . co m @Test public void testSealWithStorageErrors() throws Exception { // Add some appends and seal, and then flush together. Verify that everything got flushed in one go. final int appendCount = 1000; final WriterConfig config = WriterConfig.builder() .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold. .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000) .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build(); @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); // Part 1: flush triggered by accumulated size. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that). StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); } // Generate and add a Seal Operation. StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context); context.segmentAggregator.add(sealOp); // Have the writes fail every few attempts with a well known exception. AtomicBoolean generateSyncException = new AtomicBoolean(true); AtomicBoolean generateAsyncException = new AtomicBoolean(true); AtomicReference<IntentionalException> setException = new AtomicReference<>(); Supplier<Exception> exceptionSupplier = () -> { IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis())); setException.set(ex); return ex; }; context.storage.setSealSyncErrorInjector( new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier)); context.storage.setSealAsyncErrorInjector( new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier)); // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage. int attemptCount = 4; for (int i = 0; i < attemptCount; i++) { // Repeat a number of times, at least once should work. setException.set(null); try { FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); Assert.assertNull("An exception was expected, but none was thrown.", setException.get()); Assert.assertNotNull("No FlushResult provided.", flushResult); } catch (Exception ex) { if (setException.get() != null) { Assert.assertEquals("Unexpected exception thrown.", setException.get(), ExceptionHelpers.getRealException(ex)); } else { // Not expecting any exception this time. throw ex; } } if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) { // We are done. We got at least one through. break; } } // Verify data. byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; SegmentProperties storageInfo = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength()); Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed()); Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage()); context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:io.nats.client.ITClusterTest.java
@Test public void testHotSpotReconnect() throws InterruptedException { int numClients = 100; ExecutorService executor = Executors.newFixedThreadPool(numClients, new NatsThreadFactory("testhotspotreconnect")); final BlockingQueue<String> rch = new LinkedBlockingQueue<String>(); final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>(); final AtomicBoolean shutdown = new AtomicBoolean(false); try (NatsServer s1 = runServerOnPort(1222)) { try (NatsServer s2 = runServerOnPort(1224)) { try (NatsServer s3 = runServerOnPort(1226)) { final class NATSClient implements Runnable { Connection nc = null; final AtomicInteger numReconnects = new AtomicInteger(0); final AtomicInteger numDisconnects = new AtomicInteger(0); String currentUrl = null; final AtomicInteger instance = new AtomicInteger(-1); final Options opts; NATSClient(int inst) { this.instance.set(inst); opts = defaultOptions(); opts.servers = Nats.processUrlArray(testServers); opts.disconnectedCb = new DisconnectedCallback() { public void onDisconnect(ConnectionEvent event) { numDisconnects.incrementAndGet(); try { dch.put(instance.get()); } catch (InterruptedException e) { e.printStackTrace(); }/* w w w.ja va 2s .co m*/ nc.setDisconnectedCallback(null); } }; opts.reconnectedCb = new ReconnectedCallback() { public void onReconnect(ConnectionEvent event) { numReconnects.incrementAndGet(); currentUrl = nc.getConnectedUrl(); try { rch.put(currentUrl); } catch (InterruptedException e) { e.printStackTrace(); } } }; } @Override public void run() { try { nc = opts.connect(); assertTrue(!nc.isClosed()); assertNotNull(nc.getConnectedUrl()); currentUrl = nc.getConnectedUrl(); // System.err.println("Instance " + instance + " connected to " + // currentUrl); while (!shutdown.get()) { sleep(10); } nc.close(); } catch (IOException e) { e.printStackTrace(); } } public synchronized boolean isConnected() { return (nc != null && !nc.isClosed()); } public void shutdown() { shutdown.set(true); } } List<NATSClient> tasks = new ArrayList<NATSClient>(numClients); for (int i = 0; i < numClients; i++) { NATSClient task = new NATSClient(i); tasks.add(task); executor.submit(task); } Map<String, Integer> cs = new HashMap<String, Integer>(); int numReady = 0; while (numReady < numClients) { numReady = 0; for (NATSClient cli : tasks) { if (cli.isConnected()) { numReady++; } } sleep(100); } s1.shutdown(); sleep(1000); int disconnected = 0; // wait for disconnects while (dch.size() > 0 && disconnected < numClients) { Integer instance = -1; instance = dch.poll(5, TimeUnit.SECONDS); assertNotNull("timed out waiting for disconnect signal", instance); disconnected++; } assertTrue(disconnected > 0); int reconnected = 0; // wait for reconnects for (int i = 0; i < disconnected; i++) { String url = null; while (rch.size() == 0) { sleep(50); } url = rch.poll(5, TimeUnit.SECONDS); assertNotNull("timed out waiting for reconnect signal", url); reconnected++; Integer count = cs.get(url); if (count != null) { cs.put(url, ++count); } else { cs.put(url, 1); } } for (NATSClient client : tasks) { client.shutdown(); } executor.shutdownNow(); assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS)); assertEquals(disconnected, reconnected); int numServers = 2; assertEquals(numServers, cs.size()); int expected = numClients / numServers; // We expect a 40 percent variance int var = (int) ((float) expected * 0.40); int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4])); // System.err.printf("var = %d, delta = %d\n", var, delta); if (delta > var) { String str = String.format("Connected clients to servers out of range: %d/%d", delta, var); fail(str); } } } } }