List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean
public AtomicBoolean()
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
/** * If the workspace name and owner provided correspond to a local workspace * on this computer, that local workspace will be reconciled. *///from w ww.j a va2 s.co m protected Workspace reconcileIfLocal(final String workspaceName, final String ownerName, final boolean unscannedReconcile, final boolean reconcileMissingLocalItems, final boolean skipIfAccessDenied, final AtomicBoolean reconciled) { if (reconciled != null) { reconciled.set(false); } if (workspaceName == null || workspaceName.length() == 0 || ownerName == null || ownerName.length() == 0) { return null; } Workspace localWorkspace = null; if ((localWorkspace = getLocalWorkspace(workspaceName, ownerName)) != null) { final AtomicReference<Failure[]> failures = new AtomicReference<Failure[]>(); final AtomicBoolean pendingChangesUpdatedByServer = new AtomicBoolean(); try { final boolean wasReconciled = LocalDataAccessLayer.reconcileLocalWorkspace(localWorkspace, this, unscannedReconcile, reconcileMissingLocalItems, failures, pendingChangesUpdatedByServer); if (wasReconciled) { localWorkspace.invalidateMappings(); localWorkspace.refreshMappingsIfNeeded(); } if (reconciled != null) { reconciled.set(wasReconciled); } } catch (final ResourceAccessException e) { if (!skipIfAccessDenied) { throw e; } return null; } getVersionControlClient().reportFailures(localWorkspace, failures.get()); } return localWorkspace; }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.BaselineFolderCollection.java
/** * Given a baseline file GUID and a target location on disk, copies the * baseline from the baseline store to the target location. (The target * location always receives a decompressed copy of the baseline, even if it * is stored compressed in the baseline folder.) * * * @param workspace//from www .jav a 2 s . c o m * @param baselineFolders * @param baselineFileGuid * Baseline file GUID to copy * @param targetLocalItem * Target location for the baseline file * @param baselineFileLength * (optional) If provided, the uncompressed baseline length will be * compared against this value and checked after decompression. If * the values do not match, an exception will be thrown. * @param baselineHashValue * (optional) If provided, the uncompressed baseline will be hashed * and its hash compared to this value after decompression. If the * values to not match, an exception will be thrown. */ public static void copyBaselineToTarget(final Workspace workspace, final List<BaselineFolder> baselineFolders, final byte[] baselineFileGuid, final String targetLocalItem, final long baselineFileLength, final byte[] baselineHashValue, final boolean symlink) { Check.notNullOrEmpty(targetLocalItem, "targetLocalItem"); //$NON-NLS-1$ BaselineFolder.checkForValidBaselineFileGUID(baselineFileGuid); // Clear the target location. final File file = new File(targetLocalItem); file.delete(); final AtomicBoolean outIsBaselineCompressed = new AtomicBoolean(); final String baselineLocation = getBaselineLocation(workspace, baselineFolders, baselineFileGuid, outIsBaselineCompressed); if (null == baselineLocation) { // The baseline could not be located on disk. throw new MissingBaselineException(targetLocalItem); } String decompressedBaselineLocation = baselineLocation; try { byte[] decompressedHashValue = null; final boolean haveBaselineHashValue = null != baselineHashValue && 16 == baselineHashValue.length; MessageDigest md5Digest = null; if (haveBaselineHashValue) { md5Digest = MessageDigest.getInstance("MD5"); //$NON-NLS-1$ } if (outIsBaselineCompressed.get()) { // The temporary file is created in the folder where the // compressed baseline currently exists. We use the temporary // file extension so that we can clean up the file later if we // happen to lose it. decompressedBaselineLocation = LocalPath.combine(LocalPath.getParent(baselineLocation), GUID.newGUIDString()) + TMP_EXTENSION; // Decompress the baseline to a temporary file. Then move the // temporary file to the target location. final byte[] buffer = new byte[DECOMPRESSION_BUFFER_SIZE]; InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = new GZIPInputStream(new FileInputStream(baselineLocation)); if (!symlink) { outputStream = new FileOutputStream(decompressedBaselineLocation); } int bytesRead; while (true) { bytesRead = inputStream.read(buffer, 0, buffer.length); if (bytesRead < 0) { break; } else if (bytesRead == 0) { continue; } if (null != md5Digest) { md5Digest.update(buffer, 0, bytesRead); } if (symlink) { final String targetLink = new String(buffer, 0, bytesRead); FileSystemUtils.getInstance().createSymbolicLink(targetLink, targetLocalItem); } else { outputStream.write(buffer, 0, bytesRead); } } if (null != md5Digest) { decompressedHashValue = md5Digest.digest(); } } finally { if (inputStream != null) { IOUtils.closeSafely(inputStream); } if (outputStream != null) { IOUtils.closeSafely(outputStream); } } } // First, check to see if the length of the file matches. if (-1 != baselineFileLength && baselineFileLength != new File(decompressedBaselineLocation).length()) { throw new CorruptBaselineException(targetLocalItem, Messages.getString("BaselineFolderCollection.BaselineLengthDoesNotMatch")); //$NON-NLS-1$ } if (null != md5Digest && null == decompressedHashValue && !symlink) { // Calculate the decompressed hash value for a raw file (.rw // extension) as we will not have gone through the streaming // logic above decompressedHashValue = HashUtils.hashFile(new File(decompressedBaselineLocation), HashUtils.ALGORITHM_MD5); } if (haveBaselineHashValue && null != decompressedHashValue && 16 == decompressedHashValue.length) { if (!Arrays.equals(baselineHashValue, decompressedHashValue)) { throw new CorruptBaselineException(targetLocalItem, Messages.getString("BaselineFolderCollection.BaselineHashValueDoesNotMatch")); //$NON-NLS-1$ } } // Put the decompressed baseline at the target location. We've // verified its contents are correct. if (!symlink) { if (outIsBaselineCompressed.get()) { FileHelpers.rename(decompressedBaselineLocation, targetLocalItem); } else { FileCopyHelper.copy(decompressedBaselineLocation, targetLocalItem); } } } catch (final Exception ex) { // If the baseline is corrupt, delete it so we'll throw a missing // baseline exception next time. (This is not strictly necessary.) if (ex instanceof CorruptBaselineException && null != baselineLocation) { FileHelpers.deleteFileWithoutException(baselineLocation); } // Try not to leak a temp file on the way out if we're throwing. final File tempFile = new File(decompressedBaselineLocation); if (outIsBaselineCompressed.get() && null != decompressedBaselineLocation && tempFile.exists()) { FileHelpers.deleteFileWithoutException(decompressedBaselineLocation); } throw new VersionControlException(ex); } }
From source file:io.vertx.config.git.GitConfigStoreTest.java
@Test public void testConfigurationUpdate() throws IOException, GitAPIException { add(git, root, new File("src/test/resources/files/a.json"), "dir"); push(git);/*from w w w . jav a 2s . c o m*/ retriever = ConfigRetriever.create(vertx, new ConfigRetrieverOptions().setScanPeriod(1000).addStore(new ConfigStoreOptions().setType("git") .setConfig(new JsonObject().put("url", bareRoot.getAbsolutePath()) .put("path", "target/junk/work").put("filesets", new JsonArray().add(new JsonObject().put("pattern", "dir/*.json")))))); AtomicBoolean done = new AtomicBoolean(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); done.set(true); }); await().untilAtomic(done, is(true)); updateA(); await().until(() -> "A2".equals(retriever.getCachedConfig().getString("a.name")) && "B".equalsIgnoreCase(retriever.getCachedConfig().getString("b.name"))); }
From source file:org.zodiark.subscriber.SubscriberTest.java
@Test(enabled = false) public void sucessfulRequestForAction() throws IOException, InterruptedException { final CountDownLatch completed = new CountDownLatch(1); final ZodiarkClient wowzaClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build(); final CountDownLatch connected = new CountDownLatch(1); final AtomicReference<String> uuid = new AtomicReference<>(); final AtomicReference<String> paths = new AtomicReference<>(); // =============== Wowza paths.set(""); wowzaClient.handler(new OnEnvelopHandler() { @Override//from w w w . ja va 2 s . c o m public boolean onEnvelop(Envelope e) throws IOException { Message m = e.getMessage(); switch (m.getPath()) { case Paths.WOWZA_CONNECT: // Connected. Listen uuid.set(e.getUuid()); break; case Paths.SERVER_VALIDATE_OK: Envelope publisherOk = Envelope.newClientToServerRequest(e.getUuid(), new Message(new Path(paths.get()), e.getMessage().getData())); wowzaClient.send(publisherOk); break; case Paths.WOWZA_OBFUSCATE: WowzaMessage wm = mapper.readValue(m.getData(), WowzaMessage.class); Envelope ok = Envelope.newClientToServerRequest(e.getUuid(), new Message(new Path(Paths.WOWZA_OBFUSCATE_OK), e.getMessage().getData())); System.out.println("Obfuscating Subscribers"); wowzaClient.send(ok); case Paths.WOWZA_DEOBFUSCATE: wm = mapper.readValue(m.getData(), WowzaMessage.class); System.out.println("De-obfuscating Subscribers"); ok = Envelope.newClientToServerRequest(e.getUuid(), new Message(new Path(Paths.WOWZA_DEOBFUSCATE_OK), e.getMessage().getData())); wowzaClient.send(ok); default: // ERROR } connected.countDown(); return false; } }).open(); Envelope wowzaConnect = Envelope.newClientToServerRequest(new Message(new Path(Paths.WOWZA_CONNECT), mapper.writeValueAsString(new UserPassword("wowza", "bar")))); wowzaClient.send(wowzaConnect); connected.await(); // ================ Publisher final AtomicReference<PublisherResults> answer = new AtomicReference<>(); final ZodiarkClient publisherClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build(); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<String> publisherUUID = new AtomicReference<>(); publisherClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { answer.set(mapper.readValue(e.getMessage().getData(), PublisherResults.class)); publisherUUID.set(e.getUuid()); latch.countDown(); return true; } }).open(); // ================ Publisher create the session Envelope createSessionMessage = Envelope.newClientToServerRequest( new Message(new Path(""), mapper.writeValueAsString(new UserPassword("publisherex", "bar")))); createSessionMessage.setFrom(new From(ActorValue.PUBLISHER)); publisherClient.send(createSessionMessage); latch.await(); assertEquals("OK", answer.get().getResults()); answer.set(null); final CountDownLatch tlatch = new CountDownLatch(1); final AtomicReference<String> finalMessage = new AtomicReference<>(); publisherClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { switch (e.getMessage().getPath()) { case Paths.BEGIN_STREAMING_SESSION: answer.set(mapper.readValue(e.getMessage().getData(), PublisherResults.class)); tlatch.countDown(); break; case Paths.PUBLISHER_ACTION_ACCEPT: Action a = mapper.readValue(e.getMessage().getData(), Action.class); Envelope publisherOk = Envelope.newClientToServerRequest(e.getUuid(), new Message(new Path(Paths.ZODIARK_ACTION_ACCEPTED), e.getMessage().getData())); publisherClient.send(publisherOk); break; case Paths.ACTION_START: // Start action PublisherResults results = mapper.readValue(e.getMessage().getData(), PublisherResults.class); System.out.println("==> Start Action " + results.getResults()); publisherOk = Envelope.newClientToServerRequest(e.getUuid(), new Message(new Path(Paths.ACTION_START_OK), e.getMessage().getData())); publisherClient.send(publisherOk); break; case Paths.ACTION_TIMER: Time t = mapper.readValue(e.getMessage().getData(), Time.class); System.out.println("Publisher ===>" + t); break; case Paths.ACTION_COMPLETED: results = mapper.readValue(e.getMessage().getData(), PublisherResults.class); System.out.println("Publisher Action completed"); completed.countDown(); break; case Paths.PUBLISHER_ABOUT_READY: results = mapper.readValue(e.getMessage().getData(), PublisherResults.class); finalMessage.set(results.getResults()); break; } return false; } }); // ================ Prepare for streaming, handshake with Wowza Envelope startStreamingSession = Envelope .newClientToServerRequest(new Message(new Path(Paths.VALIDATE_PUBLISHER_STREAMING_SESSION), mapper.writeValueAsString(new WowzaUUID(uuid.get())))); createSessionMessage.setFrom(new From(ActorValue.PUBLISHER)); publisherClient.send(startStreamingSession); tlatch.await(); assertEquals("OK", answer.get().getResults()); // ================ Subscriber paths.set(Paths.JOIN_SUBSCRIBER_STREAMING_SESSION); final AtomicReference<SubscriberResults> sanswer = new AtomicReference<>(); final ZodiarkClient subscriberClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build(); final CountDownLatch platch = new CountDownLatch(1); final AtomicReference<String> subscriberUUID = new AtomicReference<>(); subscriberClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { sanswer.set(mapper.readValue(e.getMessage().getData(), SubscriberResults.class)); subscriberUUID.set(e.getUuid()); platch.countDown(); return true; } }).open(); // ================ Subscriber create the session createSessionMessage = Envelope.newClientToServerRequest(subscriberUUID.get(), new Message(new Path(Paths.DB_POST_SUBSCRIBER_SESSION_CREATE), mapper.writeValueAsString(new UserPassword("123456", "bar")))); createSessionMessage.setFrom(new From(ActorValue.SUBSCRIBER)); subscriberClient.send(createSessionMessage); platch.await(); assertEquals("OK", sanswer.get().getResults()); sanswer.set(null); final CountDownLatch elatch = new CountDownLatch(1); subscriberClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { sanswer.set(mapper.readValue(e.getMessage().getData(), SubscriberResults.class)); elatch.countDown(); return true; } }); // ================ Join the Publisher Session StreamingRequest request = new StreamingRequestImpl(publisherUUID.get(), uuid.get()); startStreamingSession = Envelope.newClientToServerRequest(subscriberUUID.get(), new Message( new Path(Paths.VALIDATE_SUBSCRIBER_STREAMING_SESSION), mapper.writeValueAsString(request))); startStreamingSession.setFrom(new From(ActorValue.SUBSCRIBER)); subscriberClient.send(startStreamingSession); elatch.await(); assertEquals("OK", sanswer.get().getResults()); // ================ Ask for an Action the Publisher Session Action action = new Action(); action.setPath("/action/doSomething"); action.setData("{ \"foo\":\"bar\""); Envelope e = Envelope.newClientToServerRequest(subscriberUUID.get(), new Message(new Path(Paths.SUBSCRIBER_ACTION), mapper.writeValueAsString(action))); e.setFrom(new From(ActorValue.SUBSCRIBER)); final CountDownLatch actionLatch = new CountDownLatch(1); final AtomicReference<Envelope> response = new AtomicReference<>(); final AtomicBoolean timerCalled = new AtomicBoolean(); subscriberClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { switch (e.getMessage().getPath()) { case Paths.MESSAGE_ACTION_VALIDATE: response.set(e); actionLatch.countDown(); break; case Paths.ACTION_TIMER: Time t = mapper.readValue(e.getMessage().getData(), Time.class); System.out.println("Subscriber ===>" + t); timerCalled.set(true); break; case Paths.ACTION_COMPLETED: SubscriberResults results = mapper.readValue(e.getMessage().getData(), SubscriberResults.class); System.out.println("Action completed"); break; } return false; } }); subscriberClient.send(e); actionLatch.await(); assertEquals(Paths.MESSAGE_ACTION_VALIDATE, response.get().getMessage().getPath()); assertEquals("{\"results\":\"OK\",\"uuid\":null}", response.get().getMessage().getData()); completed.await(); assertTrue(timerCalled.get()); assertEquals("READY", finalMessage.get()); }
From source file:com.tinspx.util.concurrent.DelayedSemaphoreTest.java
@SuppressWarnings("UnnecessaryUnboxing") static void runTest(Executor executor, DelayedSemaphore ds, Ticker ticker, int threadCount, int acquisitions, Acquire acquire, Permits permits, Range<Integer> acquireRange, Release release, DelayConstraint constraint) throws InterruptedException { checkArgument(threadCount > 0);/*from w w w . j a va 2 s. c o m*/ DelayTest.DelayTestBuilder builder = DelayTest.builder(); builder.stop(new AtomicBoolean()); builder.start(new CountDownLatch(threadCount)); builder.lock(new ReentrantLock()); builder.releaseTimes(new long[ds.permits()]); builder.acquisitions(acquisitions); builder.ticker(ticker).ds(ds); builder.acquire(acquire).permits(permits).permits(permits).acquireRange(acquireRange); builder.release(release); builder.delayConstraint(constraint); builder.tests(new MutableInt()); builder.totalThreads(threadCount); DelayTest[] testers = new DelayTest[threadCount]; for (int i = 0; i < threadCount; i++) { testers[i] = builder.thread(i).build(); executor.execute(testers[i]); } for (int i = 0; i < threadCount; i++) { testers[i].complete.await(); } String errorMsg = null; for (int i = 0; i < threadCount; i++) { if (testers[i].fail != null) { errorMsg = testers[i].fail; System.out.println(errorMsg); System.out.println(); } } if (errorMsg != null) { fail(errorMsg); } assertEquals(threadCount * acquisitions, builder.tests.getValue().intValue()); if (++testCount % 10 == 0) { System.out.printf("%d, Tests: %s\n", testCount, builder.tests); } }
From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java
/** * Tests the ability of getOrAssignStreamSegmentId to handle the TooManyActiveSegmentsException. */// www . j a v a2s . c o m @Test public void testGetOrAssignStreamSegmentIdWithMetadataLimit() throws Exception { final String segmentName = "Segment"; final String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID()); HashSet<String> storageSegments = new HashSet<>(); storageSegments.add(segmentName); storageSegments.add(transactionName); @Cleanup TestContext context = new TestContext(); setupStorageGetHandler(context, storageSegments, name -> new StreamSegmentInformation(name, 0, false, false, new ImmutableDate())); // 1. Verify the behavior when even after the retry we still cannot map. AtomicInteger exceptionCounter = new AtomicInteger(); AtomicBoolean cleanupInvoked = new AtomicBoolean(); // We use 'containerId' as a proxy for the exception id (to make sure we collect the right one). context.operationLog.addHandler = op -> FutureHelpers .failedFuture(new TooManyActiveSegmentsException(exceptionCounter.incrementAndGet(), 0)); Supplier<CompletableFuture<Void>> noOpCleanup = () -> { if (!cleanupInvoked.compareAndSet(false, true)) { return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times/")); } return CompletableFuture.completedFuture(null); }; val mapper1 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore, noOpCleanup, context.storage, executorService()); AssertExtensions.assertThrows( "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.", () -> mapper1.getOrAssignStreamSegmentId(segmentName, TIMEOUT), ex -> ex instanceof TooManyActiveSegmentsException && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get()); Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); // Now with a transaction. exceptionCounter.set(0); cleanupInvoked.set(false); AssertExtensions.assertThrows( "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.", () -> mapper1.getOrAssignStreamSegmentId(transactionName, TIMEOUT), ex -> ex instanceof TooManyActiveSegmentsException && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get()); Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); // 2. Verify the behavior when the first call fails, but the second one succeeds. exceptionCounter.set(0); cleanupInvoked.set(false); Supplier<CompletableFuture<Void>> workingCleanup = () -> { if (!cleanupInvoked.compareAndSet(false, true)) { return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times.")); } setupOperationLog(context); // Setup the OperationLog to function correctly. return CompletableFuture.completedFuture(null); }; val mapper2 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore, workingCleanup, context.storage, executorService()); long id = mapper2.getOrAssignStreamSegmentId(segmentName, TIMEOUT).join(); Assert.assertEquals("Unexpected number of attempts to map.", 1, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); Assert.assertNotEquals("No valid SegmentId assigned.", ContainerMetadata.NO_STREAM_SEGMENT_ID, id); }
From source file:com.google.gdt.eclipse.designer.util.Utils.java
/** * @return <code>true</code> if given {@link IPackageFragment} is "source" package of some GWT * module.// www.j a va2 s. co m */ public static boolean isModuleSourcePackage(IPackageFragment packageFragment) throws Exception { final String packageName = packageFragment.getElementName(); // check enclosing module ModuleDescription module = getSingleModule(packageFragment); if (module != null) { final AtomicBoolean result = new AtomicBoolean(); ModuleVisitor.accept(module, new ModuleVisitor() { @Override public boolean visitModule(ModuleElement moduleElement) { String modulePackage = CodeUtils.getPackage(moduleElement.getId()) + "."; if (packageName.startsWith(modulePackage)) { String folderInModule = packageName.substring(modulePackage.length()).replace('.', '/'); if (moduleElement.isInSourceFolder(folderInModule)) { result.set(true); return false; } } return true; } }); return result.get(); } // no enclosing module return false; }
From source file:io.vertx.config.git.GitConfigStoreTest.java
@Test public void testConfigurationUpdateWithMergeIssue_Commit(TestContext tc) throws IOException, GitAPIException { add(git, root, new File("src/test/resources/files/a.json"), "dir"); push(git);/*w ww . j a v a 2 s .co m*/ retriever = ConfigRetriever.create(vertx, new ConfigRetrieverOptions().setScanPeriod(1000).addStore(new ConfigStoreOptions().setType("git") .setConfig(new JsonObject().put("url", bareRoot.getAbsolutePath()) .put("path", "target/junk/work").put("filesets", new JsonArray().add(new JsonObject().put("pattern", "dir/*.json")))))); AtomicBoolean done = new AtomicBoolean(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); done.set(true); }); await().untilAtomic(done, is(true)); // Edit the file in the work dir File a = new File("target/junk/work/dir/a.json"); assertThat(a).isFile(); FileUtils.write(a, new JsonObject().put("a.name", "A").put("conflict", "A").put("added", "added").encodePrettily(), StandardCharsets.UTF_8); git.add().addFilepattern("dir/a.json").call(); git.commit().setMessage("update A").setAuthor("clement", "clement@apache.org") .setCommitter("clement", "clement@apache.org").call(); done.set(false); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); assertThat(ar.result().getString("added")).isEqualTo("added"); done.set(true); }); await().untilAtomic(done, is(true)); updateA(); Async async = tc.async(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isFalse(); assertThat(ar.cause().getMessage()).containsIgnoringCase("conflict"); async.complete(); }); }
From source file:com.datatorrent.stram.StramRecoveryTest.java
@Test public void testRpcFailover() throws Exception { String appPath = testMeta.getPath(); Configuration conf = new Configuration(false); final AtomicBoolean timedout = new AtomicBoolean(); StreamingContainerUmbilicalProtocol impl = MockitoUtil .mockProtocol(StreamingContainerUmbilicalProtocol.class); Mockito.doAnswer(new org.mockito.stubbing.Answer<Void>() { @Override//from ww w . j a va 2s. co m public Void answer(InvocationOnMock invocation) { LOG.debug("got call: " + invocation.getMethod()); if (!timedout.get()) { try { timedout.set(true); Thread.sleep(1000); } catch (Exception e) { } //throw new RuntimeException("fail"); } return null; } }).when(impl).log("containerId", "timeout"); Server server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class) .setInstance(impl).setBindAddress("0.0.0.0").setPort(0).setNumHandlers(1).setVerbose(false).build(); server.start(); InetSocketAddress address = NetUtils.getConnectAddress(server); LOG.info("Mock server listening at " + address); int rpcTimeoutMillis = 500; int retryDelayMillis = 100; int retryTimeoutMillis = 500; FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(appPath, conf); URI uri = RecoverableRpcProxy.toConnectURI(address, rpcTimeoutMillis, retryDelayMillis, retryTimeoutMillis); recoveryHandler.writeConnectUri(uri.toString()); RecoverableRpcProxy rp = new RecoverableRpcProxy(appPath, conf); StreamingContainerUmbilicalProtocol protocolProxy = rp.getProxy(); protocolProxy.log("containerId", "msg"); // simulate socket read timeout try { protocolProxy.log("containerId", "timeout"); Assert.fail("expected socket timeout"); } catch (java.net.SocketTimeoutException e) { // expected } Assert.assertTrue("timedout", timedout.get()); rp.close(); // test success on retry timedout.set(false); retryTimeoutMillis = 1500; uri = RecoverableRpcProxy.toConnectURI(address, rpcTimeoutMillis, retryDelayMillis, retryTimeoutMillis); recoveryHandler.writeConnectUri(uri.toString()); protocolProxy.log("containerId", "timeout"); Assert.assertTrue("timedout", timedout.get()); rp.close(); String rpcTimeout = System.getProperty(RecoverableRpcProxy.RPC_TIMEOUT); String rpcRetryDelay = System.getProperty(RecoverableRpcProxy.RETRY_DELAY); String rpcRetryTimeout = System.getProperty(RecoverableRpcProxy.RETRY_TIMEOUT); System.setProperty(RecoverableRpcProxy.RPC_TIMEOUT, Integer.toString(500)); System.setProperty(RecoverableRpcProxy.RETRY_DELAY, Long.toString(100)); System.setProperty(RecoverableRpcProxy.RETRY_TIMEOUT, Long.toString(500)); timedout.set(false); uri = RecoverableRpcProxy.toConnectURI(address); recoveryHandler.writeConnectUri(uri.toString()); rp = new RecoverableRpcProxy(appPath, conf); protocolProxy = rp.getProxy(); protocolProxy.log("containerId", "msg"); try { protocolProxy.log("containerId", "timeout"); Assert.fail("expected socket timeout"); } catch (java.net.SocketTimeoutException e) { // expected } Assert.assertTrue("timedout", timedout.get()); rp.close(); timedout.set(false); System.setProperty(RecoverableRpcProxy.RETRY_TIMEOUT, Long.toString(1500)); uri = RecoverableRpcProxy.toConnectURI(address); recoveryHandler.writeConnectUri(uri.toString()); protocolProxy.log("containerId", "timeout"); Assert.assertTrue("timedout", timedout.get()); restoreSystemProperty(RecoverableRpcProxy.RPC_TIMEOUT, rpcTimeout); restoreSystemProperty(RecoverableRpcProxy.RETRY_DELAY, rpcRetryDelay); restoreSystemProperty(RecoverableRpcProxy.RETRY_TIMEOUT, rpcRetryTimeout); server.stop(); }
From source file:com.tinspx.util.concurrent.TimedSemaphoreTest.java
@SuppressWarnings("UnnecessaryUnboxing") static void runTest(Executor executor, TimedSemaphore ts, Ticker ticker, int threadCount, int acquisitions, Acquire acquire, Permits permits) throws InterruptedException { checkArgument(threadCount > 0);//from w w w . j ava2 s .c o m DelayTest.DelayTestBuilder builder = DelayTest.builder(); builder.stop(new AtomicBoolean()); builder.start(new CountDownLatch(threadCount)); builder.lock(new ReentrantLock()); builder.acquisitions(acquisitions); builder.ticker(ticker).ts(ts); builder.acquire(acquire).permits(permits); builder.tests(new MutableInt()); builder.totalThreads(threadCount); builder.history(new History(ts)); DelayTest[] testers = new DelayTest[threadCount]; for (int i = 0; i < threadCount; i++) { testers[i] = builder.thread(i).build(); executor.execute(testers[i]); } for (int i = 0; i < threadCount; i++) { testers[i].complete.await(); } String errorMsg = null; for (int i = 0; i < threadCount; i++) { if (testers[i].fail != null) { errorMsg = testers[i].fail; System.out.println(errorMsg); System.out.println(); } } if (errorMsg != null) { fail(errorMsg); } assertEquals(threadCount * acquisitions, builder.tests.getValue().intValue()); if (++testCount % 10 == 0) { System.out.printf("%d, Tests: %s\n", testCount, builder.tests); } }