List of usage examples for java.util.concurrent CyclicBarrier await
public int await() throws InterruptedException, BrokenBarrierException
From source file:rpc.TestRPC.java
@Test(timeout = 30000) public void testRPCInterrupted() throws IOException, InterruptedException { final Configuration conf = new Configuration(); Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(null) .build();//ww w.j av a 2s . c om server.start(); int numConcurrentRPC = 200; InetSocketAddress addr = NetUtils.getConnectAddress(server); final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC); final CountDownLatch latch = new CountDownLatch(numConcurrentRPC); final AtomicBoolean leaderRunning = new AtomicBoolean(true); final AtomicReference<Throwable> error = new AtomicReference<Throwable>(); Thread leaderThread = null; for (int i = 0; i < numConcurrentRPC; i++) { final int num = i; final TestProtocol proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf); Thread rpcThread = new Thread(new Runnable() { @Override public void run() { try { barrier.await(); while (num == 0 || leaderRunning.get()) { proxy.slowPing(false); } proxy.slowPing(false); } catch (Exception e) { if (num == 0) { leaderRunning.set(false); } else { error.set(e); } LOG.error(e); } finally { latch.countDown(); } } }); rpcThread.start(); if (leaderThread == null) { leaderThread = rpcThread; } } // let threads get past the barrier Thread.sleep(1000); // stop a single thread while (leaderRunning.get()) { leaderThread.interrupt(); } latch.await(); // should not cause any other thread to get an error assertTrue("rpc got exception " + error.get(), error.get() == null); server.stop(); }
From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java
@Test(timeout = 20000) public void testKilledDuringCommit() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf);//w w w . ja v a2s . co m dispatcher.start(); CyclicBarrier syncBarrier = new CyclicBarrier(2); OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); commitHandler.start(); JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); syncBarrier.await(); job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL)); assertJobState(job, JobStateInternal.KILLED); dispatcher.stop(); commitHandler.stop(); }
From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java
@Test(timeout = 20000) public void testCommitJobFailsJob() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf);//from ww w.j av a 2s. c om dispatcher.start(); CyclicBarrier syncBarrier = new CyclicBarrier(2); OutputCommitter committer = new TestingOutputCommitter(syncBarrier, false); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); commitHandler.start(); JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); // let the committer fail and verify the job fails syncBarrier.await(); assertJobState(job, JobStateInternal.FAILED); dispatcher.stop(); commitHandler.stop(); }
From source file:org.acmsl.queryj.api.handlers.AbstractTemplateWritingHandler.java
/** * Writes the templates.//from w w w. j a v a2 s . c om * @param templates the templates. * @param engineName the engine name. * @param parameters the parameters. * @param charset the file encoding. * @param templateGenerator the template generator. * @param threadCount the number of threads to use. * @param rootDir the root dir. * @return the futures for the concurrent threads. * @throws QueryJBuildException if the templates cannot be written. */ @NotNull @SuppressWarnings("unused") protected List<Future<?>> writeTemplatesMultithread2ndVersion(@Nullable final List<T> templates, @NotNull final String engineName, @NotNull final QueryJCommand parameters, @NotNull final Charset charset, @NotNull final TG templateGenerator, final int threadCount, @NotNull final File rootDir) throws QueryJBuildException { @NotNull final List<Future<?>> result; if (templates != null) { result = new ArrayList<>(templates.size()); @NotNull final ExecutorService threadPool = Executors.newFixedThreadPool(threadCount); @NotNull final CyclicBarrier round = new CyclicBarrier(threadCount); @NotNull AtomicInteger index = new AtomicInteger(0); int intIndex; @Nullable final Log t_Log = UniqueLogFactory.getLog(AbstractTemplateWritingHandler.class); for (@Nullable final T t_Template : templates) { if (t_Template != null) { intIndex = index.incrementAndGet(); if (intIndex <= threadCount) { if (t_Log != null) { t_Log.info("Starting a new thread " + intIndex + "/" + threadCount); } result.add(threadPool.submit((Runnable) buildGeneratorThread(t_Template, templateGenerator, retrieveOutputDir(t_Template.getTemplateContext(), rootDir, parameters), rootDir, charset, intIndex, round, parameters))); } else { if (t_Log != null) { t_Log.info("No threads available " + intIndex + "/" + threadCount); } index = new AtomicInteger(0); try { round.await(); } catch (@NotNull final InterruptedException interrupted) { if (t_Log != null) { t_Log.info("Thread pool interrupted while waiting", interrupted); } } catch (@NotNull final BrokenBarrierException brokenBarrier) { if (t_Log != null) { t_Log.info(BROKEN_BARRIER_LITERAL, brokenBarrier); } } if (t_Log != null) { t_Log.info("Resetting thread pool (shutdown? " + threadPool.isShutdown() + ")"); } round.reset(); } } } } else { result = new ArrayList<>(0); } return result; }
From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java
@Test(timeout = 20000) public void testRebootedDuringCommit() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 2); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf);//from www . ja va 2s. c o m dispatcher.start(); CyclicBarrier syncBarrier = new CyclicBarrier(2); OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); commitHandler.start(); AppContext mockContext = mock(AppContext.class); when(mockContext.isLastAMRetry()).thenReturn(true); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false); JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); syncBarrier.await(); job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT)); assertJobState(job, JobStateInternal.REBOOT); // return the external state as ERROR since this is last retry. Assert.assertEquals(JobState.RUNNING, job.getState()); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); Assert.assertEquals(JobState.ERROR, job.getState()); dispatcher.stop(); commitHandler.stop(); }
From source file:com.unboundid.scim.tools.SCIMQueryRate.java
/** * Performs the actual processing for this tool. In this case, it gets a * connection to the directory server and uses it to perform the requested * searches./*w w w . ja v a2 s. co m*/ * * @return The result code for the processing that was performed. */ @Override() public ResultCode doToolProcessing() { //Initalize the Debugger Debug.setEnabled(true); Debug.getLogger().addHandler(new ConsoleHandler()); Debug.getLogger().setUseParentHandlers(false); // Determine the random seed to use. final Long seed; if (randomSeed.isPresent()) { seed = Long.valueOf(randomSeed.getValue()); } else { seed = null; } // Create a value pattern for the filter. final ValuePattern filterPattern; boolean isQuery = true; if (filter.isPresent()) { try { filterPattern = new ValuePattern(filter.getValue(), seed); } catch (ParseException pe) { Debug.debugException(pe); err(ERR_QUERY_TOOL_BAD_FILTER_PATTERN.get(pe.getMessage())); return ResultCode.PARAM_ERROR; } } else if (resourceId.isPresent()) { isQuery = false; try { filterPattern = new ValuePattern(resourceId.getValue()); } catch (ParseException pe) { Debug.debugException(pe); err(ERR_QUERY_TOOL_BAD_RESOURCE_ID_PATTERN.get(pe.getMessage())); return ResultCode.PARAM_ERROR; } } else { filterPattern = null; } // Get the attributes to return. final String[] attrs; if (attributes.isPresent()) { final List<String> attrList = attributes.getValues(); attrs = new String[attrList.size()]; attrList.toArray(attrs); } else { attrs = NO_STRINGS; } // If the --ratePerSecond option was specified, then limit the rate // accordingly. FixedRateBarrier fixedRateBarrier = null; if (ratePerSecond.isPresent()) { final int intervalSeconds = collectionInterval.getValue(); final int ratePerInterval = ratePerSecond.getValue() * intervalSeconds; fixedRateBarrier = new FixedRateBarrier(1000L * intervalSeconds, ratePerInterval); } // Determine whether to include timestamps in the output and if so what // format should be used for them. final boolean includeTimestamp; final String timeFormat; if (timestampFormat.getValue().equalsIgnoreCase("with-date")) { includeTimestamp = true; timeFormat = "dd/MM/yyyy HH:mm:ss"; } else if (timestampFormat.getValue().equalsIgnoreCase("without-date")) { includeTimestamp = true; timeFormat = "HH:mm:ss"; } else { includeTimestamp = false; timeFormat = null; } // Determine whether any warm-up intervals should be run. final long totalIntervals; final boolean warmUp; int remainingWarmUpIntervals = warmUpIntervals.getValue(); if (remainingWarmUpIntervals > 0) { warmUp = true; totalIntervals = 0L + numIntervals.getValue() + remainingWarmUpIntervals; } else { warmUp = true; totalIntervals = 0L + numIntervals.getValue(); } // Create the table that will be used to format the output. final OutputFormat outputFormat; if (csvFormat.isPresent()) { outputFormat = OutputFormat.CSV; } else { outputFormat = OutputFormat.COLUMNS; } final ColumnFormatter formatter = new ColumnFormatter(includeTimestamp, timeFormat, outputFormat, " ", new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Queries/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Avg Dur ms"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Resources/Query"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Errors/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Overall", "Queries/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Overall", "Avg Dur ms")); // Create values to use for statistics collection. final AtomicLong queryCounter = new AtomicLong(0L); final AtomicLong resourceCounter = new AtomicLong(0L); final AtomicLong errorCounter = new AtomicLong(0L); final AtomicLong queryDurations = new AtomicLong(0L); // Determine the length of each interval in milliseconds. final long intervalMillis = 1000L * collectionInterval.getValue(); // We will use Apache's HttpClient library for this tool. SSLUtil sslUtil; try { sslUtil = createSSLUtil(); } catch (LDAPException e) { debugException(e); err(e.getMessage()); return e.getResultCode(); } RegistryBuilder<ConnectionSocketFactory> registryBuilder = RegistryBuilder.create(); final String schemeName; if (sslUtil != null) { try { SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory( sslUtil.createSSLContext("TLS"), new NoopHostnameVerifier()); schemeName = "https"; registryBuilder.register(schemeName, sslConnectionSocketFactory); } catch (GeneralSecurityException e) { debugException(e); err(ERR_SCIM_TOOL_CANNOT_CREATE_SSL_CONTEXT.get(getExceptionMessage(e))); return ResultCode.LOCAL_ERROR; } } else { schemeName = "http"; registryBuilder.register(schemeName, new PlainConnectionSocketFactory()); } final Registry<ConnectionSocketFactory> socketFactoryRegistry = registryBuilder.build(); RequestConfig requestConfig = RequestConfig.custom().setConnectionRequestTimeout(30000) .setExpectContinueEnabled(true).build(); SocketConfig socketConfig = SocketConfig.custom().setSoTimeout(30000).setSoReuseAddress(true).build(); final PoolingHttpClientConnectionManager mgr = new PoolingHttpClientConnectionManager( socketFactoryRegistry); mgr.setMaxTotal(numThreads.getValue()); mgr.setDefaultMaxPerRoute(numThreads.getValue()); mgr.setDefaultSocketConfig(socketConfig); mgr.setValidateAfterInactivity(-1); ClientConfig jerseyConfig = new ClientConfig(); jerseyConfig.property(ApacheClientProperties.CONNECTION_MANAGER, mgr); jerseyConfig.property(ApacheClientProperties.REQUEST_CONFIG, requestConfig); ApacheConnectorProvider connectorProvider = new ApacheConnectorProvider(); jerseyConfig.connectorProvider(connectorProvider); if (authID.isPresent()) { try { final String password; if (authPassword.isPresent()) { password = authPassword.getValue(); } else if (authPasswordFile.isPresent()) { password = authPasswordFile.getNonBlankFileLines().get(0); } else { password = null; } BasicCredentialsProvider provider = new BasicCredentialsProvider(); provider.setCredentials(new AuthScope(host.getValue(), port.getValue()), new UsernamePasswordCredentials(authID.getValue(), password)); jerseyConfig.property(ApacheClientProperties.CREDENTIALS_PROVIDER, provider); jerseyConfig.property(ApacheClientProperties.PREEMPTIVE_BASIC_AUTHENTICATION, true); } catch (IOException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_SET_BASIC_AUTH.get(e.getMessage())); return ResultCode.LOCAL_ERROR; } } else if (bearerToken.isPresent()) { jerseyConfig.register(new ClientRequestFilter() { public void filter(final ClientRequestContext clientRequestContext) throws IOException { try { clientRequestContext.getHeaders().add("Authorization", "Bearer " + bearerToken.getValue()); } catch (Exception ex) { throw new RuntimeException("Unable to add authorization handler", ex); } } }); } // Create the SCIM client to use for the queries. final URI uri; try { final String path; if (contextPath.getValue().startsWith("/")) { path = contextPath.getValue(); } else { path = "/" + contextPath.getValue(); } uri = new URI(schemeName, null, host.getValue(), port.getValue(), path, null, null); } catch (URISyntaxException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_CANNOT_CREATE_URL.get(e.getMessage())); return ResultCode.OTHER; } final SCIMService service = new SCIMService(uri, jerseyConfig); if (xmlFormat.isPresent()) { service.setContentType(MediaType.APPLICATION_XML_TYPE); service.setAcceptType(MediaType.APPLICATION_XML_TYPE); } // Retrieve the resource schema. final ResourceDescriptor resourceDescriptor; try { resourceDescriptor = service.getResourceDescriptor(resourceName.getValue(), null); if (resourceDescriptor == null) { throw new ResourceNotFoundException( "Resource " + resourceName.getValue() + " is not defined by the service provider"); } } catch (SCIMException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_RETRIEVE_RESOURCE_SCHEMA.get(e.getMessage())); return ResultCode.OTHER; } final SCIMEndpoint<? extends BaseResource> endpoint = service.getEndpoint(resourceDescriptor, BaseResource.BASE_RESOURCE_FACTORY); // Create the threads to use for the searches. final CyclicBarrier barrier = new CyclicBarrier(numThreads.getValue() + 1); final QueryRateThread[] threads = new QueryRateThread[numThreads.getValue()]; for (int i = 0; i < threads.length; i++) { threads[i] = new QueryRateThread(i, isQuery, endpoint, filterPattern, attrs, barrier, queryCounter, resourceCounter, queryDurations, errorCounter, fixedRateBarrier); threads[i].start(); } // Display the table header. for (final String headerLine : formatter.getHeaderLines(true)) { out(headerLine); } // Indicate that the threads can start running. try { barrier.await(); } catch (Exception e) { Debug.debugException(e); } long overallStartTime = System.nanoTime(); long nextIntervalStartTime = System.currentTimeMillis() + intervalMillis; boolean setOverallStartTime = false; long lastDuration = 0L; long lastNumEntries = 0L; long lastNumErrors = 0L; long lastNumSearches = 0L; long lastEndTime = System.nanoTime(); for (long i = 0; i < totalIntervals; i++) { final long startTimeMillis = System.currentTimeMillis(); final long sleepTimeMillis = nextIntervalStartTime - startTimeMillis; nextIntervalStartTime += intervalMillis; try { if (sleepTimeMillis > 0) { Thread.sleep(sleepTimeMillis); } } catch (Exception e) { Debug.debugException(e); } final long endTime = System.nanoTime(); final long intervalDuration = endTime - lastEndTime; final long numSearches; final long numEntries; final long numErrors; final long totalDuration; if (warmUp && (remainingWarmUpIntervals > 0)) { numSearches = queryCounter.getAndSet(0L); numEntries = resourceCounter.getAndSet(0L); numErrors = errorCounter.getAndSet(0L); totalDuration = queryDurations.getAndSet(0L); } else { numSearches = queryCounter.get(); numEntries = resourceCounter.get(); numErrors = errorCounter.get(); totalDuration = queryDurations.get(); } final long recentNumSearches = numSearches - lastNumSearches; final long recentNumEntries = numEntries - lastNumEntries; final long recentNumErrors = numErrors - lastNumErrors; final long recentDuration = totalDuration - lastDuration; final double numSeconds = intervalDuration / 1000000000.0d; final double recentSearchRate = recentNumSearches / numSeconds; final double recentErrorRate = recentNumErrors / numSeconds; final double recentAvgDuration; final double recentEntriesPerSearch; if (recentNumSearches > 0L) { recentEntriesPerSearch = 1.0d * recentNumEntries / recentNumSearches; recentAvgDuration = 1.0d * recentDuration / recentNumSearches / 1000000; } else { recentEntriesPerSearch = 0.0d; recentAvgDuration = 0.0d; } if (warmUp && (remainingWarmUpIntervals > 0)) { out(formatter.formatRow(recentSearchRate, recentAvgDuration, recentEntriesPerSearch, recentErrorRate, "warming up", "warming up")); remainingWarmUpIntervals--; if (remainingWarmUpIntervals == 0) { out(INFO_QUERY_TOOL_WARM_UP_COMPLETED.get()); setOverallStartTime = true; } } else { if (setOverallStartTime) { overallStartTime = lastEndTime; setOverallStartTime = false; } final double numOverallSeconds = (endTime - overallStartTime) / 1000000000.0d; final double overallSearchRate = numSearches / numOverallSeconds; final double overallAvgDuration; if (numSearches > 0L) { overallAvgDuration = 1.0d * totalDuration / numSearches / 1000000; } else { overallAvgDuration = 0.0d; } out(formatter.formatRow(recentSearchRate, recentAvgDuration, recentEntriesPerSearch, recentErrorRate, overallSearchRate, overallAvgDuration)); lastNumSearches = numSearches; lastNumEntries = numEntries; lastNumErrors = numErrors; lastDuration = totalDuration; } lastEndTime = endTime; } // Stop all of the threads. ResultCode resultCode = ResultCode.SUCCESS; for (final QueryRateThread t : threads) { t.signalShutdown(); } // Interrupt any blocked threads after a grace period. final WakeableSleeper sleeper = new WakeableSleeper(); sleeper.sleep(1000); mgr.shutdown(); for (final QueryRateThread t : threads) { final ResultCode r = t.waitForShutdown(); if (resultCode == ResultCode.SUCCESS) { resultCode = r; } } return resultCode; }
From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java
@Test(timeout = 20000) public void testCheckJobCompleteSuccess() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf);//from www . ja v a 2s .c o m dispatcher.start(); CyclicBarrier syncBarrier = new CyclicBarrier(2); OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); commitHandler.start(); JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED)); assertJobState(job, JobStateInternal.COMMITTING); job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED)); assertJobState(job, JobStateInternal.COMMITTING); // let the committer complete and verify the job succeeds syncBarrier.await(); assertJobState(job, JobStateInternal.SUCCEEDED); job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED)); assertJobState(job, JobStateInternal.SUCCEEDED); job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED)); assertJobState(job, JobStateInternal.SUCCEEDED); dispatcher.stop(); commitHandler.stop(); }
From source file:de.codesourcery.eve.skills.ui.utils.ParallelUITasksRunner.java
public static boolean submitParallelTasks(final ApplicationThreadManager threadManager, final UITask parent, UITask... children) {//from w w w. j a v a 2 s . co m final AtomicInteger childSuccesses = new AtomicInteger(0); final AtomicInteger childFailures = new AtomicInteger(0); if (parent == null) { throw new IllegalArgumentException("parent task cannot be NULL"); } if (ArrayUtils.isEmpty(children)) { throw new IllegalArgumentException("Need to provide at least one child task"); } final CyclicBarrier startBarrier = new CyclicBarrier(children.length + 1) { @Override public void reset() { System.out.println("========== resetting start barrier =========== "); super.reset(); } }; final CountDownLatch childrenTerminated = new CountDownLatch(children.length); int submittedChildren = 0; for (final UITask child : children) { final UITask wrapped = new UITask() { @Override public void successHook() throws Exception { boolean success = false; try { child.successHook(); success = true; } finally { if (success) { childSuccesses.incrementAndGet(); } else { childFailures.incrementAndGet(); } childrenTerminated.countDown(); } } @Override public void beforeExecution() { child.beforeExecution(); // note: when this method throws an exception , #failure() is invoked } @Override public void setEnabledHook(boolean yesNo) { child.setEnabledHook(yesNo); } @Override public void failureHook(Throwable t) throws Exception { try { child.failureHook(t); } finally { childFailures.incrementAndGet(); childrenTerminated.countDown(); } } @Override public void cancellationHook() { try { child.cancellationHook(); } finally { childFailures.incrementAndGet(); childrenTerminated.countDown(); } } @Override public String getId() { return child.getId(); } @Override public void run() throws Exception { try { if (log.isTraceEnabled()) { log.trace("run(): Child task " + getId() + " is now waiting."); } startBarrier.await(); // will BrokenBarrierException if any of the child tasks could not be started, } catch (InterruptedException e) { log.error("run(): Child task " + getId() + " was interrupted"); Thread.currentThread().interrupt(); } catch (BrokenBarrierException e) { log.error("run(): Child task" + getId() + " aborted, barrier broken."); throw new RuntimeException( "Child task not started because another child task failed submitTask()"); } if (log.isTraceEnabled()) { log.trace("run(): Child task " + getId() + " is now running."); } child.run(); } }; if (null == threadManager.submitTask(wrapped, false)) { log.error("submitParallelTasks(): Failed to submit child " + child); // note: I wait for (submittedChildren-childFailures) because some // child task may have already failed before reaching their run() method while (startBarrier.getNumberWaiting() != (submittedChildren - childFailures.get())) { log.info("submitParallelTasks(): Waiting for all child tasks to reach barrier ( " + startBarrier.getNumberWaiting() + " waiting)"); try { java.lang.Thread.sleep(500); } catch (Exception e) { } ; } startBarrier.reset(); // will cause all child threads waiting on this barrier to terminate return false; } submittedChildren++; } /* * All children are submitted and waiting at the barrier. */ final boolean parentSubmitted = null != threadManager.submitTask("Control thread of " + parent.getId(), new Runnable() { @Override public void run() { try { while (true) { try { log.debug("run(): Parent task " + parent.getId() + " is waiting for it's children to start..."); startBarrier.await(5, TimeUnit.SECONDS); break; } catch (TimeoutException e) { if (childFailures.get() != 0) { runFailureHookOnEDT(parent, childFailures.get() + " child tasks of parent task " + parent.getId() + " failed to start."); return; } } } } catch (InterruptedException e) { runFailureHookOnEDT(parent, "Parent task " + parent.getId() + " was interrupted while waiting" + " for it's children to start."); startBarrier.reset(); // let children fail. Thread.currentThread().interrupt(); return; } catch (BrokenBarrierException e) { runFailureHookOnEDT(parent, "Parent task " + parent.getId() + " failed to wait for it's children"); throw new RuntimeException("Internal error - task " + parent.getId() + " failed to wait for it's children?"); } log.debug("run(): Task " + parent.getId() + " is waiting for it's children to finish"); try { childrenTerminated.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); runFailureHookOnEDT(parent, "Parent task " + parent.getId() + " was interrupted while waiting for it's children"); return; } log.info("run(): All children of parent task " + parent.getId() + " have finished ( success: " + childSuccesses.get() + " / failure: " + childFailures.get() + ")"); if (childFailures.get() > 0) { runFailureHookOnEDT(parent, childFailures.get() + " child tasks of parent " + parent.getId() + " have FAILED."); return; } if (null == threadManager.submitTask(parent, false)) { runFailureHookOnEDT(parent, "Failed to submit parent task " + parent.getId()); return; } } }, false); if (!parentSubmitted) { log.debug("submitParallelTasks(): Failed to submit parent task " + parent.getId() + " , terminating child tasks."); startBarrier.reset(); // aborts all child tasks with a BrokenBarrierException } return parentSubmitted; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java
@Test(timeout = 20000) public void testConcurrentAddApplication() throws IOException, InterruptedException, BrokenBarrierException { final CyclicBarrier startBarrier = new CyclicBarrier(2); final CyclicBarrier endBarrier = new CyclicBarrier(2); // this token uses barriers to block during renew final Credentials creds1 = new Credentials(); final Token<DelegationTokenIdentifier> token1 = mock(Token.class); when(token1.getKind()).thenReturn(KIND); DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(new Text("user1"), new Text("renewer"), new Text("user1")); when(token1.decodeIdentifier()).thenReturn(dtId1); creds1.addToken(new Text("token"), token1); doReturn(true).when(token1).isManaged(); doAnswer(new Answer<Long>() { public Long answer(InvocationOnMock invocation) throws InterruptedException, BrokenBarrierException { startBarrier.await(); endBarrier.await();/*from w w w . jav a2s . c om*/ return Long.MAX_VALUE; } }).when(token1).renew(any(Configuration.class)); // this dummy token fakes renewing final Credentials creds2 = new Credentials(); final Token<DelegationTokenIdentifier> token2 = mock(Token.class); when(token2.getKind()).thenReturn(KIND); when(token2.decodeIdentifier()).thenReturn(dtId1); creds2.addToken(new Text("token"), token2); doReturn(true).when(token2).isManaged(); doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class)); // fire up the renewer final DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()) .thenReturn(new ConcurrentHashMap<ApplicationId, ByteBuffer>()); ClientRMService mockClientRMService = mock(ClientRMService.class); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); dtr.setRMContext(mockContext); when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); dtr.init(conf); dtr.start(); // submit a job that blocks during renewal Thread submitThread = new Thread() { @Override public void run() { dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user"); } }; submitThread.start(); // wait till 1st submit blocks, then submit another startBarrier.await(); dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user"); // signal 1st to complete endBarrier.await(); submitThread.join(); }
From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImplIT.java
@Test public void testSummaryMultiThreadDedup() throws ZepException, InterruptedException, ExecutionException { // Attempts to create the same event from multiple threads - verifies we get the appropriate de-duping behavior // for the count and that we are holding the lock on the database appropriately. int poolSize = 10; final CyclicBarrier barrier = new CyclicBarrier(poolSize); ExecutorService executorService = Executors.newFixedThreadPool(poolSize); ExecutorCompletionService<String> ecs = new ExecutorCompletionService<String>(executorService); final Event event = EventTestUtils.createSampleEvent(); final EventPreCreateContext context = new EventPreCreateContextImpl(); for (int i = 0; i < poolSize; i++) { ecs.submit(new Callable<String>() { @Override//from ww w.j a v a2 s .c o m public String call() throws Exception { barrier.await(); return eventSummaryDao.create(event, context); } }); } String uuid = null; for (int i = 0; i < poolSize; i++) { String thisUuid = ecs.take().get(); if (uuid == null) { assertNotNull(thisUuid); uuid = thisUuid; } else { assertEquals(uuid, thisUuid); } } // Now look up the event and make sure the count is equal to the number of submitted workers assertEquals(poolSize, this.eventSummaryDao.findByUuid(uuid).getCount()); }