List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:gobblin.couchbase.writer.CouchbaseWriter.java
@Override public Future<WriteResponse> write(final D record, final WriteCallback callback) { assertRecordWritable(record);//from w w w .j a v a 2s.c om if (record instanceof TupleDocument) { ((TupleDocument) record).content().value1().retain(); } Observable<D> observable = _bucket.async().upsert(record); if (callback == null) { return new WriteResponseFuture<>( observable.timeout(_operationTimeout, _operationTimeunit).toBlocking().toFuture(), _defaultWriteResponseMapper); } else { final AtomicBoolean callbackFired = new AtomicBoolean(false); final BlockingQueue<Pair<WriteResponse, Throwable>> writeResponseQueue = new ArrayBlockingQueue<>(1); final Future<WriteResponse> writeResponseFuture = new Future<WriteResponse>() { @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } @Override public boolean isCancelled() { return false; } @Override public boolean isDone() { return callbackFired.get(); } @Override public WriteResponse get() throws InterruptedException, ExecutionException { Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.take(); return getWriteResponseorThrow(writeResponseThrowablePair); } @Override public WriteResponse get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.poll(timeout, unit); if (writeResponseThrowablePair == null) { throw new TimeoutException("Timeout exceeded while waiting for future to be done"); } else { return getWriteResponseorThrow(writeResponseThrowablePair); } } }; observable.timeout(_operationTimeout, _operationTimeunit).subscribe(new Subscriber<D>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { callbackFired.set(true); writeResponseQueue.add(new Pair<WriteResponse, Throwable>(null, e)); callback.onFailure(e); } @Override public void onNext(D doc) { try { callbackFired.set(true); WriteResponse writeResponse = new GenericWriteResponse<D>(doc); writeResponseQueue.add(new Pair<WriteResponse, Throwable>(writeResponse, null)); callback.onSuccess(writeResponse); } finally { if (doc instanceof TupleDocument) { ((TupleDocument) doc).content().value1().release(); } } } }); return writeResponseFuture; } }
From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java
/** * Tests the ability of getOrAssignStreamSegmentId to handle the TooManyActiveSegmentsException. */// w w w . j a v a 2 s . co m @Test public void testGetOrAssignStreamSegmentIdWithMetadataLimit() throws Exception { final String segmentName = "Segment"; final String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID()); HashSet<String> storageSegments = new HashSet<>(); storageSegments.add(segmentName); storageSegments.add(transactionName); @Cleanup TestContext context = new TestContext(); setupStorageGetHandler(context, storageSegments, name -> new StreamSegmentInformation(name, 0, false, false, new ImmutableDate())); // 1. Verify the behavior when even after the retry we still cannot map. AtomicInteger exceptionCounter = new AtomicInteger(); AtomicBoolean cleanupInvoked = new AtomicBoolean(); // We use 'containerId' as a proxy for the exception id (to make sure we collect the right one). context.operationLog.addHandler = op -> FutureHelpers .failedFuture(new TooManyActiveSegmentsException(exceptionCounter.incrementAndGet(), 0)); Supplier<CompletableFuture<Void>> noOpCleanup = () -> { if (!cleanupInvoked.compareAndSet(false, true)) { return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times/")); } return CompletableFuture.completedFuture(null); }; val mapper1 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore, noOpCleanup, context.storage, executorService()); AssertExtensions.assertThrows( "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.", () -> mapper1.getOrAssignStreamSegmentId(segmentName, TIMEOUT), ex -> ex instanceof TooManyActiveSegmentsException && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get()); Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); // Now with a transaction. exceptionCounter.set(0); cleanupInvoked.set(false); AssertExtensions.assertThrows( "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.", () -> mapper1.getOrAssignStreamSegmentId(transactionName, TIMEOUT), ex -> ex instanceof TooManyActiveSegmentsException && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get()); Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); // 2. Verify the behavior when the first call fails, but the second one succeeds. exceptionCounter.set(0); cleanupInvoked.set(false); Supplier<CompletableFuture<Void>> workingCleanup = () -> { if (!cleanupInvoked.compareAndSet(false, true)) { return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times.")); } setupOperationLog(context); // Setup the OperationLog to function correctly. return CompletableFuture.completedFuture(null); }; val mapper2 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore, workingCleanup, context.storage, executorService()); long id = mapper2.getOrAssignStreamSegmentId(segmentName, TIMEOUT).join(); Assert.assertEquals("Unexpected number of attempts to map.", 1, exceptionCounter.get()); Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get()); Assert.assertNotEquals("No valid SegmentId assigned.", ContainerMetadata.NO_STREAM_SEGMENT_ID, id); }
From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java
@Test public void verifyRemediationTest() throws Exception { PackageState state = initializer.initialize(DCS_PROFILE); Set<URI> originalFileLocations = new HashSet<>(); ipm2rdf.transformToNode(state.getPackageTree()) .walk(node -> originalFileLocations.add(node.getFileInfo().getLocation())); // The package should contain two files: // - READMX// ww w .ja va2 s .c om // - READM // // The file with the acute E will be remediated to a resource named 'READMX', which will collide with // an existing resource of the same name. // assert that our sample problem files are in the content to be packaged assertTrue(originalFileLocations.stream().anyMatch(uri -> uri.getPath().endsWith("READMX"))); // 0x0301 is the UTF-16 encoding of the 'COMBINING ACUTE ACCENT' combining diacritic // 0x00c9 is the UTF-16 encoding of 'LATIN CAPITAL LETTER E WITH ACUTE' assertTrue(originalFileLocations.stream().anyMatch(uri -> (uri.getPath().endsWith("README" + '\u0301')) || (uri.getPath().endsWith("READM" + '\u00c9')))); OpenedPackage opened = packager.createPackage(state, folder.getRoot()); AtomicBoolean foundIllegal = new AtomicBoolean(Boolean.FALSE); AtomicBoolean foundRemediated = new AtomicBoolean(Boolean.FALSE); AtomicReference<String> remediatedFilename = new AtomicReference<>(); AtomicBoolean foundCollision = new AtomicBoolean(Boolean.FALSE); AtomicReference<String> collidingFilename = new AtomicReference<>(); // Walk the generated package, and make sure that // 1. That a resource with illegal characters does not exist // 2. That a resource named 'READMX' does exist // 3. That a resource named after the SHA-1 hex of its identifier exists // 4. That those two resources originate from two different files in the original package content opened.getPackageTree().walk(node -> { if (node.getFileInfo() == null || !node.getFileInfo().isFile()) { return; } System.err.println(node.getFileInfo().getName()); System.err.println(" " + node.getFileInfo().getLocation().toString()); // this should not happen, because a file name with invalid characters should have // been remediated prior to being inserted into the package if (node.getFileInfo().getLocation().getPath().endsWith("README" + '\u0301') || node.getFileInfo().getLocation().getPath().endsWith("READM" + '\u00c9')) { foundIllegal.set(Boolean.TRUE); } if (node.getFileInfo().getLocation().getPath().endsWith(shaHex(node.getIdentifier().toString()))) { foundRemediated.set(Boolean.TRUE); remediatedFilename.set(node.getFileInfo().getName()); // short circuit return; } if (node.getFileInfo().getName().equals("READMX") || node.getFileInfo().getName().equals("READM")) { foundCollision.set(Boolean.TRUE); collidingFilename.set(node.getFileInfo().getName()); } }); assertFalse(foundIllegal.get()); assertTrue(foundCollision.get()); assertTrue(foundRemediated.get()); assertNotNull(remediatedFilename.get()); assertNotNull(collidingFilename.get()); assertNotEquals(remediatedFilename.get(), collidingFilename.get()); }
From source file:fr.gouv.culture.vitam.utils.Executor.java
/** * Execute an external command//from w w w .ja v a 2 s . c om * @param cmd * @param tempDelay * @param correctValues * @param showOutput * @param realCommand * @return correctValues if ok, < 0 if an execution error occurs, or other error values */ public static int exec(List<String> cmd, long tempDelay, int[] correctValues, boolean showOutput, String realCommand) { // Create command with parameters CommandLine commandLine = new CommandLine(cmd.get(0)); for (int i = 1; i < cmd.size(); i++) { commandLine.addArgument(cmd.get(i)); } DefaultExecutor defaultExecutor = new DefaultExecutor(); ByteArrayOutputStream outputStream; outputStream = new ByteArrayOutputStream(); PumpStreamHandler pumpStreamHandler = new PumpStreamHandler(outputStream); defaultExecutor.setStreamHandler(pumpStreamHandler); defaultExecutor.setExitValues(correctValues); AtomicBoolean isFinished = new AtomicBoolean(false); ExecuteWatchdog watchdog = null; Timer timer = null; if (tempDelay > 0) { // If delay (max time), then setup Watchdog timer = new Timer(true); watchdog = new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT); defaultExecutor.setWatchdog(watchdog); CheckEndOfExecute endOfExecute = new CheckEndOfExecute(isFinished, watchdog, realCommand); timer.schedule(endOfExecute, tempDelay); } int status = -1; try { // Execute the command status = defaultExecutor.execute(commandLine); } catch (ExecuteException e) { if (e.getExitValue() == -559038737) { // Cannot run immediately so retry once try { Thread.sleep(100); } catch (InterruptedException e1) { } try { status = defaultExecutor.execute(commandLine); } catch (ExecuteException e1) { pumpStreamHandler.stop(); System.err.println(StaticValues.LBL.error_error.get() + "Exception: " + e.getMessage() + " Exec in error with " + commandLine.toString() + "\n\t" + outputStream.toString()); status = -2; try { outputStream.close(); } catch (IOException e2) { } return status; } catch (IOException e1) { pumpStreamHandler.stop(); System.err.println(StaticValues.LBL.error_error.get() + "Exception: " + e.getMessage() + " Exec in error with " + commandLine.toString() + "\n\t" + outputStream.toString()); status = -2; try { outputStream.close(); } catch (IOException e2) { } return status; } } else { pumpStreamHandler.stop(); System.err.println(StaticValues.LBL.error_error.get() + "Exception: " + e.getMessage() + " Exec in error with " + commandLine.toString() + "\n\t" + outputStream.toString()); status = -2; try { outputStream.close(); } catch (IOException e2) { } return status; } } catch (IOException e) { pumpStreamHandler.stop(); System.err.println(StaticValues.LBL.error_error.get() + "Exception: " + e.getMessage() + " Exec in error with " + commandLine.toString() + "\n\t" + outputStream.toString()); status = -2; try { outputStream.close(); } catch (IOException e2) { } return status; } finally { isFinished.set(true); if (timer != null) { timer.cancel(); } try { Thread.sleep(200); } catch (InterruptedException e1) { } } pumpStreamHandler.stop(); if (defaultExecutor.isFailure(status) && watchdog != null) { if (watchdog.killedProcess()) { // kill by the watchdoc (time out) if (showOutput) { System.err.println(StaticValues.LBL.error_error.get() + "Exec is in Time Out"); } } status = -3; try { outputStream.close(); } catch (IOException e2) { } } else { if (showOutput) { System.out.println("Exec: " + outputStream.toString()); } try { outputStream.close(); } catch (IOException e2) { } } return status; }
From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java
@Test(timeout = 300000) public void testIOEOnOutputThread() throws Exception { conf.setBoolean(HBASE_SKIP_ERRORS, false); generateWALs(-1);//w ww .j a va 2s . c o m useDifferentDFSClient(); FileStatus[] logfiles = fs.listStatus(WALDIR); assertTrue("There should be some log file", logfiles != null && logfiles.length > 0); // wals with no entries (like the one we don't use in the factory) // won't cause a failure since nothing will ever be written. // pick the largest one since it's most likely to have entries. int largestLogFile = 0; long largestSize = 0; for (int i = 0; i < logfiles.length; i++) { if (logfiles[i].getLen() > largestSize) { largestLogFile = i; largestSize = logfiles[i].getLen(); } } assertTrue("There should be some log greater than size 0.", 0 < largestSize); // Set up a splitter that will throw an IOE on the output side WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, null, null, this.mode) { @Override protected Writer createWriter(Path logfile) throws IOException { Writer mockWriter = Mockito.mock(Writer.class); Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<Entry>any()); return mockWriter; } }; // Set up a background thread dumper. Needs a thread to depend on and then we need to run // the thread dumping in a background thread so it does not hold up the test. final AtomicBoolean stop = new AtomicBoolean(false); final Thread someOldThread = new Thread("Some-old-thread") { @Override public void run() { while (!stop.get()) Threads.sleep(10); } }; someOldThread.setDaemon(true); someOldThread.start(); final Thread t = new Thread("Background-thread-dumper") { public void run() { try { Threads.threadDumpingIsAlive(someOldThread); } catch (InterruptedException e) { e.printStackTrace(); } } }; t.setDaemon(true); t.start(); try { logSplitter.splitLogFile(logfiles[largestLogFile], null); fail("Didn't throw!"); } catch (IOException ioe) { assertTrue(ioe.toString().contains("Injected")); } finally { // Setting this to true will turn off the background thread dumper. stop.set(true); } }
From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java
/** * Test the case where we have multiple threads waiting on the * ShortCircuitCache delivering a certain ShortCircuitReplica. * * In this case, there should only be one call to * createShortCircuitReplicaInfo. This one replica should be shared * by all threads.// www.j a v a 2 s. c o m */ @Test(timeout = 60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean creationIsBlocked = new AtomicBoolean(true); final AtomicBoolean testFailed = new AtomicBoolean(false); DFSInputStream.tcpReadsDisabledForTesting = true; BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() { @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { Uninterruptibles.awaitUninterruptibly(latch); if (!creationIsBlocked.compareAndSet(true, false)) { Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected."); } return null; } }; TemporarySocketDirectory sockDir = new TemporarySocketDirectory(); Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String TEST_FILE = "/test_file"; final int TEST_FILE_LEN = 4000; final int SEED = 0xFADED; final int NUM_THREADS = 10; DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED); Runnable readerRunnable = new Runnable() { @Override public void run() { try { byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE)); Assert.assertFalse(creationIsBlocked.get()); byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); } catch (Throwable e) { LOG.error("readerRunnable error", e); testFailed.set(true); } } }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new Thread(readerRunnable); threads[i].start(); } Thread.sleep(500); latch.countDown(); for (int i = 0; i < NUM_THREADS; i++) { Uninterruptibles.joinUninterruptibly(threads[i]); } cluster.shutdown(); sockDir.close(); Assert.assertFalse(testFailed.get()); }
From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL.java
/** * Test flush for sure has a sequence id that is beyond the last edit appended. We do this by * slowing appends in the background ring buffer thread while in foreground we call flush. The * addition of the sync over HRegion in flush should fix an issue where flush was returning before * all of its appends had made it out to the WAL (HBASE-11109). * @throws IOException//from w w w .j a v a 2 s . c o m * @see <a href="https://issues.apache.org/jira/browse/HBASE-11109">HBASE-11109</a> */ @Test public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException { String testName = currentTest.getMethodName(); final TableName tableName = TableName.valueOf(testName); final HRegionInfo hri = new HRegionInfo(tableName); final byte[] rowName = tableName.getName(); final HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(), TEST_UTIL.getConfiguration(), htd); HBaseTestingUtility.closeRegionAndWAL(r); final int countPerFamily = 10; final AtomicBoolean goslow = new AtomicBoolean(false); NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } // subclass and doctor a method. AbstractFSWAL<?> wal = newSlowWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), testName, CONF, null, true, null, null, new Runnable() { @Override public void run() { if (goslow.get()) { Threads.sleep(100); LOG.debug("Sleeping before appending 100ms"); } } }); HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal); EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate(); try { List<Put> puts = null; for (HColumnDescriptor hcd : htd.getFamilies()) { puts = TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x"); } // Now assert edits made it in. final Get g = new Get(rowName); Result result = region.get(g); assertEquals(countPerFamily * htd.getFamilies().size(), result.size()); // Construct a WALEdit and add it a few times to the WAL. WALEdit edits = new WALEdit(); for (Put p : puts) { CellScanner cs = p.cellScanner(); while (cs.advance()) { edits.add(cs.current()); } } // Add any old cluster id. List<UUID> clusterIds = new ArrayList<UUID>(); clusterIds.add(UUID.randomUUID()); // Now make appends run slow. goslow.set(true); for (int i = 0; i < countPerFamily; i++) { final HRegionInfo info = region.getRegionInfo(); final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes); wal.append(info, logkey, edits, true); } region.flush(true); // FlushResult.flushSequenceId is not visible here so go get the current sequence id. long currentSequenceId = region.getReadPoint(null); // Now release the appends goslow.set(false); assertTrue(currentSequenceId >= region.getReadPoint(null)); } finally { region.close(true); wal.close(); } }
From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java
@Test public void testDeleteHappyPath() { final Mock mockProjectRoleService = new Mock(ProjectRoleService.class); mockProjectRoleService.setStrict(true); mockProjectRoleService.expectVoid("removeAllRoleActorsByNameAndType", P.ANY_ARGS); final Mock mockPermissionManager = new Mock(PermissionManager.class); mockPermissionManager.setStrict(true); mockPermissionManager.expectAndReturn("hasPermission", P.args(P.eq(Permissions.ADMINISTER), P.IS_ANYTHING), Boolean.TRUE);// w ww .j a v a 2 s . c o m mockPermissionManager.expectVoid("removeGroupPermissions", P.args(P.eq("TestGroup"))); final Mock mockNotificationManager = new Mock(NotificationSchemeManager.class); mockNotificationManager.setStrict(true); mockNotificationManager.expectAndReturn("removeEntities", P.args(P.eq(GroupDropdown.DESC), P.eq("TestGroup")), Boolean.TRUE); final Mock mockSubscriptionManager = new Mock(SubscriptionManager.class); mockSubscriptionManager.setStrict(true); mockSubscriptionManager.expectVoid("deleteSubscriptionsForGroup", P.ANY_ARGS); final AtomicBoolean calledSharePermissionDeleteUtils = new AtomicBoolean(false); final SharePermissionDeleteUtils deleteUtils = new SharePermissionDeleteUtils(null) { @Override public void deleteGroupPermissions(final String groupName) { calledSharePermissionDeleteUtils.set(true); } }; final AtomicBoolean updateCommentAndGroupsCalled = new AtomicBoolean(false); final AtomicBoolean removeGroupCalled = new AtomicBoolean(false); final AtomicBoolean clearCalled = new AtomicBoolean(false); final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, (NotificationSchemeManager) mockNotificationManager.proxy(), (PermissionManager) mockPermissionManager.proxy(), (ProjectRoleService) mockProjectRoleService.proxy(), null, null, deleteUtils, (SubscriptionManager) mockSubscriptionManager.proxy(), null, null, null) { @Override void updateCommentsAndWorklogs(final User user, final String groupName, final String swapGroup) { updateCommentAndGroupsCalled.set(true); } @Override Group getGroup(final String groupName) { return null; } @Override void removeGroup(final Group group) throws PermissionException { removeGroupCalled.set(true); } @Override void clearIssueSecurityLevelCache() { clearCalled.set(true); } }; final ErrorCollection errorCollection = new SimpleErrorCollection(); final JiraServiceContext jiraServiceContext = getContext(errorCollection); assertTrue(defaultGroupService.delete(jiraServiceContext, "TestGroup", "SwapGroup")); assertTrue(updateCommentAndGroupsCalled.get()); assertTrue(clearCalled.get()); assertTrue(removeGroupCalled.get()); assertTrue(calledSharePermissionDeleteUtils.get()); mockPermissionManager.verify(); mockProjectRoleService.verify(); mockNotificationManager.verify(); mockSubscriptionManager.verify(); }
From source file:com.splout.db.integration.TestMultiThreadedFailover.java
@Test public void test() throws Throwable { FileUtils.deleteDirectory(new File(TMP_FOLDER)); new File(TMP_FOLDER).mkdirs(); createSploutEnsemble(N_QNODES, N_DNODES); String[] qNodeAddresses = new String[N_QNODES]; for (int i = 0; i < N_QNODES; i++) { qNodeAddresses[i] = getqNodes().get(i).getAddress(); }// w ww . j a va 2s .c o m final SploutClient client = new SploutClient(qNodeAddresses); final Tablespace testTablespace = createTestTablespace(N_DNODES); final Random random = new Random(SEED); final AtomicBoolean failed = new AtomicBoolean(false); deployIteration(0, random, client, testTablespace); for (QNode qnode : getqNodes()) { // Make sure all QNodes are aware of the the first deploy // There might be some delay as they have to receive notifications via // Hazelcast etc long waitedSoFar = 0; QueryStatus status = null; SploutClient perQNodeClient = new SploutClient(qnode.getAddress()); do { status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null); Thread.sleep(100); waitedSoFar += 100; if (waitedSoFar > 5000) { throw new AssertionError("Waiting too much on a test condition"); } } while (status == null || status.getError() != null); log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0."); } try { // Business logic here ExecutorService service = Executors.newFixedThreadPool(N_THREADS); // This is the "mother-fucker" thread. // It will bring DNodes down on purpose. // And then bring them up again. service.submit(new Runnable() { @Override public void run() { while (true) { try { Thread.sleep(1000); log.info("Time to kill some DNode..."); int whichOne = (int) (Math.random() * getdNodes().size()); getdNodes().get(whichOne).testCommand(TestCommands.SHUTDOWN.toString()); Thread.sleep(1000); log.info("Time to bring the DNode back to life..."); getdNodes().get(whichOne).testCommand(TestCommands.RESTART.toString()); } catch (InterruptedException e) { log.info("MFT - Bye bye!"); } catch (DNodeException e) { failed.set(true); e.printStackTrace(); throw new RuntimeException(e); } catch (TException e) { failed.set(true); e.printStackTrace(); throw new RuntimeException(e); } } } }); // These threads will continuously perform queries and check that the // results are consistent. for (int i = 0; i < N_THREADS; i++) { service.submit(new Runnable() { @SuppressWarnings("unchecked") @Override public void run() { try { while (true) { int randomDNode = Math.abs(random.nextInt()) % N_DNODES; QueryStatus status = client.query(TABLESPACE, ((randomDNode * 10) - 1) + "", "SELECT * FROM " + TABLE + ";", null); log.info("Query status -> " + status); assertEquals(1, status.getResult().size()); Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0); assertEquals(randomDNode, jsonResult.get("dnode")); Thread.sleep(100); } } catch (InterruptedException ie) { // Bye bye log.info("Bye bye!"); } catch (Throwable e) { e.printStackTrace(); failed.set(true); } } }); } Thread.sleep(15000); assertEquals(false, failed.get()); } finally { closeSploutEnsemble(); FileUtils.deleteDirectory(new File(TMP_FOLDER)); } }
From source file:com.microsoft.tfs.core.ws.runtime.client.SOAPService.java
/** * Execute a SOAP request that was built via * {@link #createSOAPRequest(String, SOAPMethodRequestWriter)} * * @param request//from ww w .ja v a 2 s . c om * the request to execute (not null). * @param responseName * the name of the SOAP response message for this request (not null) * @param responseReader * the response reader that will do the work of reading the response * (except the SOAP envelope). If null, no response stream reader is * invoked (no response data is read except for the SOAP envelope and * body elements). * @throws SOAPFault * if a SOAP fault was returned by the server. * @throws UnauthorizedException * if the client could not contact the server because of an * authorization error (HTTP 401). * @throws ProxyUnauthorizedException * if the client could not authenticate to the HTTP proxy * @throws FederatedAuthException * if the client could not contact the server because it lacks the * proper federated authentication (ACS) cookies and the federated * authentication handler (set by * {@link #setTransportAuthHandler(TransportAuthHandler)} ) did not * handle the exception. The caller is expected to obtain the * cookies and resubmit. * @throws InvalidServerResponseException * if the server returned data that could not be parsed as XML or * SOAP. * @throws EndpointNotFoundException * if the server returned HTTP 404 when the request was executed. * @throws TransportException * if some other an IO error occurred. * @throws TransportRequestHandlerCanceledException * if the user cancelled the prompt for credentials */ protected void executeSOAPRequest(final SOAPRequest request, final String responseName, final SOAPMethodResponseReader responseReader) throws SOAPFault, UnauthorizedException, ProxyUnauthorizedException, FederatedAuthException, InvalidServerResponseException, EndpointNotFoundException, TransportException, TransportRequestHandlerCanceledException { /* * Duplicate the transport request handler map so we needn't keep a lock * and so that we have a consistent set throughout execution. */ final List<TransportRequestHandler> requestHandlers = new ArrayList<TransportRequestHandler>(); synchronized (transportRequestHandlers) { requestHandlers.addAll(transportRequestHandlers); } /* * Allow the transport authentication handler to process initial * credentials. This can happen if we're lazily authenticating and we do * not yet have a full set of credentials. */ final AtomicBoolean cancel = new AtomicBoolean(false); for (final TransportRequestHandler requestHandler : requestHandlers) { // cancel doesn't stop us from invoking handlers if (requestHandler.prepareRequest(this, request, cancel) == Status.COMPLETE) { break; } } if (cancel.get()) { throw new TransportRequestHandlerCanceledException(); } /* * Execute this method in a retry loop. On exceptions, we can delegate * to a user configured exception handler, which may modify the method * and allow us to resubmit. * * The typical use case for this is ACS authentication - it can expire * in the middle of a call and we want to prompt the user to * reauthenticate. */ RuntimeException failure = null; do { try { executeSOAPRequestInternal(request, responseName, responseReader); break; } catch (final RuntimeException e) { // Give the handlers a chance to handle/correct/cancel this // exception boolean exceptionHandled = false; cancel.set(false); for (final TransportRequestHandler requestHandler : requestHandlers) { // cancel doesn't stop us from invoking handlers if (requestHandler.handleException(this, request, e, cancel) == Status.COMPLETE) { /* * This handler handled the exception - defer all others * from attempting to handle it and reset the auth * state. */ request.getPostMethod().getHostAuthState().invalidate(); failure = null; exceptionHandled = true; break; } // Status was CONTINUE, continue with next handler } // Wasn't handled, prepare to throw it if (!exceptionHandled) { // The user wants to cancel, convert to a cancel if (cancel.get()) { failure = new TransportRequestHandlerCanceledException(); } else { failure = e; } break; } // Exception handled, loop to retry } } while (true); if (failure != null) { throw failure; } for (final TransportRequestHandler requestHandler : requestHandlers) { requestHandler.handleSuccess(this, request); } }