List of usage examples for java.util.concurrent ExecutorService awaitTermination
boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;
From source file:net.fenyo.mail4hotspot.dns.DnsListener.java
public void run() { final ExecutorService pool = Executors.newCachedThreadPool(); try {/*from w ww . j a va 2 s . c om*/ socket = new DatagramSocket(DNSPORT); } catch (final SocketException ex) { ex.printStackTrace(); log.error("can not start DNS service"); return; } do { final DatagramPacket query = new DatagramPacket(new byte[DATAGRAMMAXSIZE], DATAGRAMMAXSIZE); try { socket.receive(query); pool.execute(new Handler(query)); } catch (IOException ex) { log.error(ex); } } while (thread.isInterrupted() == false); try { log.info("waiting for executor tasks to terminate"); pool.awaitTermination(120, TimeUnit.SECONDS); } catch (InterruptedException ex) { log.error(ex); } }
From source file:com.liveramp.hank.partition_server.UpdateManager.java
@Override public void update() throws IOException { HankTimer timer = new HankTimer(); try {/* ww w . j av a 2 s .c o m*/ // Delete unknown files deleteUnknownFiles(); // Perform update Semaphore concurrentUpdatesSemaphore = new Semaphore(configurator.getNumConcurrentUpdates()); List<Throwable> encounteredThrowables = new ArrayList<Throwable>(); PartitionUpdateTaskStatisticsAggregator partitionUpdateTaskStatisticsAggregator = new PartitionUpdateTaskStatisticsAggregator(); Map<String, Queue<PartitionUpdateTask>> dataDirectoryToUpdateTasks = new HashMap<String, Queue<PartitionUpdateTask>>(); List<PartitionUpdateTask> allUpdateTasks = buildPartitionUpdateTasks( partitionUpdateTaskStatisticsAggregator, encounteredThrowables); // Build and organize update tasks per data directory for (PartitionUpdateTask updateTask : allUpdateTasks) { String dataDirectory = updateTask.getDataDirectory(); Queue<PartitionUpdateTask> updateTasks = dataDirectoryToUpdateTasks.get(dataDirectory); if (updateTasks == null) { updateTasks = new LinkedList<PartitionUpdateTask>(); dataDirectoryToUpdateTasks.put(dataDirectory, updateTasks); } updateTasks.add(updateTask); } // Logging LOG.info("Number of update tasks: " + allUpdateTasks.size()); for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) { LOG.info("Number of update tasks scheduled in " + entry.getKey() + ": " + entry.getValue().size()); } // Build executor services Map<String, ExecutorService> dataDirectoryToExecutorService = new HashMap<String, ExecutorService>(); for (String dataDirectory : dataDirectoryToUpdateTasks.keySet()) { dataDirectoryToExecutorService.put(dataDirectory, new UpdateThreadPoolExecutor(configurator.getMaxConcurrentUpdatesPerDataDirectory(), new UpdaterThreadFactory(dataDirectory), concurrentUpdatesSemaphore)); } LOG.info("Submitting update tasks for " + dataDirectoryToUpdateTasks.size() + " directories."); // Execute tasks. We execute one task for each data directory and loop around so that the tasks // attempt to acquire the semaphore in a reasonable order. boolean remaining = true; while (remaining) { remaining = false; for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) { // Pop next task Queue<PartitionUpdateTask> partitionUpdateTasks = entry.getValue(); if (!partitionUpdateTasks.isEmpty()) { PartitionUpdateTask partitionUpdateTask = partitionUpdateTasks.remove(); // Execute task dataDirectoryToExecutorService.get(entry.getKey()).execute(partitionUpdateTask); } if (!partitionUpdateTasks.isEmpty()) { remaining = true; } } } LOG.info("All update tasks submitted, shutting down executor services"); // Shutdown executors for (ExecutorService executorService : dataDirectoryToExecutorService.values()) { executorService.shutdown(); } LOG.info("Waiting for executors to finish."); // Wait for executors to finish for (Map.Entry<String, ExecutorService> entry : dataDirectoryToExecutorService.entrySet()) { String directory = entry.getKey(); ExecutorService executorService = entry.getValue(); boolean keepWaiting = true; while (keepWaiting) { try { LOG.info("Waiting for updates to complete on data directory: " + directory); boolean terminated = executorService.awaitTermination( UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_VALUE, UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_UNIT); if (terminated) { // We finished executing all tasks // Otherwise, timeout elapsed and current thread was not interrupted. Keep waiting. LOG.info("Finished updates for directory: " + directory); keepWaiting = false; } // Record update ETA Hosts.setUpdateETA(host, partitionUpdateTaskStatisticsAggregator.computeETA()); } catch (InterruptedException e) { // Received interruption (stop request). // Swallow the interrupted state and ask the executor to shutdown immediately. Also, keep waiting. LOG.info( "The update manager was interrupted. Stopping the update process (stop executing new partition update tasks" + " and wait for those that were running to finish)."); // Shutdown all executors for (ExecutorService otherExecutorService : dataDirectoryToExecutorService.values()) { otherExecutorService.shutdownNow(); } // Record failed update exception (we need to keep waiting) encounteredThrowables.add( new IOException("Failed to complete update: update interruption was requested.")); } } } LOG.info("All executors have finished updates"); // Shutdown all executors for (ExecutorService executorService : dataDirectoryToExecutorService.values()) { executorService.shutdownNow(); } LOG.info("Finished with " + encounteredThrowables.size() + " errors."); // Detect failures if (!encounteredThrowables.isEmpty()) { LOG.error(String.format("%d exceptions encountered while running partition update tasks:", encounteredThrowables.size())); int i = 0; for (Throwable t : encounteredThrowables) { LOG.error(String.format("Exception %d/%d:", ++i, encounteredThrowables.size()), t); } throw new IOException(String.format( "Failed to complete update: %d exceptions encountered while running partition update tasks.", encounteredThrowables.size())); } // Garbage collect useless host domains garbageCollectHostDomains(host); // Log statistics partitionUpdateTaskStatisticsAggregator.logStats(); } catch (IOException e) { LOG.info("Update failed and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000)); throw e; } LOG.info("Update succeeded and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000)); }
From source file:org.cloudifysource.dsl.download.ResourceDownloaderTest.java
@Test public void testConcurrentDownload() throws Exception { for (int i = 0; i < 10; ++i) { cleanDownloadFolder();/*from ww w. j a v a 2 s .co m*/ ExecutorService pool = Executors.newFixedThreadPool(3); try { DownloadTask task1 = new DownloadTask(); DownloadTask task2 = new DownloadTask(); DownloadTask task3 = new DownloadTask(); Future<Exception> future1 = pool.submit(task1); Future<Exception> future2 = pool.submit(task2); Future<Exception> future3 = pool.submit(task3); Exception e1 = future1.get(); Exception e2 = future2.get(); Exception e3 = future3.get(); Assert.assertNull("concurrent download failed: " + e1, e1); Assert.assertNull("concurrent download failed: " + e2, e2); Assert.assertNull("concurrent download failed: " + e3, e3); final File destinationFolder = new File(DESTINATION_FOLDER); File[] files = destinationFolder.listFiles(); Assert.assertEquals("Expecting only one file", 1, files.length); } finally { pool.shutdown(); pool.awaitTermination(10, TimeUnit.SECONDS); cleanDownloadFolder(); } } }
From source file:org.apache.hadoop.yarn.util.TestFSDownload.java
@Test(timeout = 10000) public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); FileContext files = FileContext.getLocalFSFileContext(conf); final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName())); files.mkdir(basedir, null, true);//from w w w .j av a2s. c om conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>(); Random rand = new Random(); long sharedSeed = rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>(); ExecutorService exec = Executors.newSingleThreadExecutor(); LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName()); int size = 512; LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; Path path = new Path(basedir, "test-file"); LocalResource rsrc = createFile(files, path, size, rand, vis); rsrcVis.put(rsrc, vis); Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf); destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc); pending.put(rsrc, exec.submit(fsd)); exec.shutdown(); while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ; Assert.assertTrue(pending.get(rsrc).isDone()); try { for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) { p.getValue().get(); Assert.fail("We localized a file that is not public."); } } catch (ExecutionException e) { Assert.assertTrue(e.getCause() instanceof IOException); } }
From source file:org.mitre.mpf.mst.TestSystemNightly.java
@Test(timeout = 20 * MINUTES) public void testPriorities() throws Exception { // an assumption failure causes the test to be ignored; // only run this test on a machine where /mpfdata/datasets is mapped Assume.assumeTrue("Skipping test. It should only run on Jenkins.", jenkins); log.info("Beginning testPriorities()"); int TIMEOUT_MILLIS = 15 * MINUTES; ExecutorService executor = Executors.newFixedThreadPool(4); PriorityRunner busyWorkRunner = new PriorityRunner(9); PriorityRunner lowRunner = new PriorityRunner(1); PriorityRunner highRunner = new PriorityRunner(9); // wait until busy work is in progress; fill service message queue(s) executor.submit(busyWorkRunner);//from w w w . j av a2s. com Assert.assertTrue("The busy work job is not in progress. Job may have failed to start.", busyWorkRunner.waitForSomeProgress()); executor.submit(lowRunner); executor.submit(highRunner); List<PriorityRunner> priorityRunners = new LinkedList<PriorityRunner>(); priorityRunners.add(busyWorkRunner); priorityRunners.add(lowRunner); priorityRunners.add(highRunner); PriorityMonitor priorityMonitor = new PriorityMonitor(priorityRunners); executor.submit(priorityMonitor); executor.shutdown(); executor.awaitTermination(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); Assert.assertTrue("The busy work job did not complete.", busyWorkRunner.completed); Assert.assertTrue("The LOW priority job did not complete.", lowRunner.completed); Assert.assertTrue("The HIGH priority job did not complete.", highRunner.completed); priorityMonitor.terminate(); // just in case Assert.assertFalse("The busy work job failed.", busyWorkRunner.hadError); Assert.assertFalse("The LOW priority job failed.", lowRunner.hadError); Assert.assertFalse("The HIGH priority job failed.", highRunner.hadError); Assert.assertTrue(String.format( "The LOW priority job was expected to take longer than the HIGH priority job. (LOW = %d ms, HIGH = %d ms)", lowRunner.elapsed, highRunner.elapsed), lowRunner.elapsed > highRunner.elapsed); log.info("Finished test #{}. LOW = {} ms, HIGH = {} ms.", testCtr, lowRunner.elapsed, highRunner.elapsed); }
From source file:org.apache.hadoop.hbase.util.TestDrainableQueue.java
@Test(timeout = 30 * 1000) public void testDrainableQueue() throws Exception { for (int attempt = 0; attempt < NUM_ATTEMPTS; ++attempt) { final int totalEvents = NUM_PRODUCERS * NUM_EVENTS_PER_BATCH; final int drainAfterNEvents = totalEvents / 2; shouldDrain = new CountDownLatch(drainAfterNEvents); numEnqueued.set(0);/*w w w . j a va2s.c o m*/ q = new DrainableQueue<Integer>("queue"); ExecutorService exec = Executors.newFixedThreadPool(NUM_PRODUCERS); CompletionService<Void> cs = new ExecutorCompletionService<Void>(exec); List<Future<Void>> futures = new ArrayList<Future<Void>>(); for (int producer = 0; producer < NUM_PRODUCERS; ++producer) { futures.add(cs.submit(new Producer(producer))); } shouldDrain.await(); eventsProcessed = 0; LOG.info("Starting draining the queue"); q.drain(this); LOG.info("Finished draining the queue"); assertEquals(numEnqueued.get(), eventsProcessed); LOG.info("Events processed: " + eventsProcessed + ", drainAfterNEvents: " + drainAfterNEvents); assertTrue(eventsProcessed >= drainAfterNEvents); for (Future<Void> f : futures) { try { f.get(); } catch (ExecutionException ex) { LOG.error("Exception from producer thread", ex); if (ex.getCause() instanceof AssertionError) { throw (AssertionError) ex.getCause(); } throw ex; } } exec.shutdown(); assertTrue(exec.awaitTermination(5, TimeUnit.SECONDS)); } }
From source file:ca.zadrox.dota2esportticker.service.UpdateMatchService.java
private void updateMatches(boolean doResults) { if (!checkForConnectivity()) { LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_NO_CONNECTIVITY)); return;// ww w.j a v a2 s. c om } LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_STARTED)); final String BASE_URL = "http://www.gosugamers.net/dota2/gosubet"; final String MATCH_LINK_URL_BASE = "http://www.gosugamers.net"; try { String rawHtml = new OkHttpClient().newCall(new Request.Builder().url(BASE_URL).build()).execute() .body().string(); rawHtml = rawHtml.substring(rawHtml.indexOf("<div id=\"col1\" class=\"rows\">"), rawHtml.indexOf("<div id=\"col2\" class=\"rows\">")); Document doc = Jsoup.parse(rawHtml); Elements tables = doc.getElementsByClass("matches"); ArrayList<ArrayList<String>> matchLinks = new ArrayList<ArrayList<String>>(tables.size()); int numSeries = 0; for (Element table : tables) { Elements links = table.getElementsByClass("match"); if (links.size() != 0) { ArrayList<String> innerMatchLink = new ArrayList<String>(links.size()); for (Element link : links) { String linkHref = link.attr("href"); innerMatchLink.add(MATCH_LINK_URL_BASE + linkHref); numSeries++; } matchLinks.add(innerMatchLink); } } // needed if there are massive reschedules to update content properly. Uri resultsUri = MatchContract.SeriesEntry.buildSeriesUriWithAfterTime(TimeUtils.getUTCTime()); Cursor c = getContentResolver().query(resultsUri, new String[] { MatchContract.SeriesEntry.COLUMN_GG_MATCH_PAGE }, null, null, null); while (c.moveToNext()) { if (!matchLinks.get(0).contains(c.getString(0))) { matchLinks.get(0).add(c.getString(0)); } } Iterator<ArrayList<String>> iterator = matchLinks.iterator(); int numResults = 0; ExecutorService executorService = Executors.newFixedThreadPool(10); ArrayList<Future<BundledMatchItem>> seriesItemFutures = new ArrayList<Future<BundledMatchItem>>( numSeries); LogUtils.LOGD(TAG, "Starting Retrieval, num elements gathered: " + numSeries); int i = 0; while (iterator.hasNext()) { ArrayList<String> matchList = iterator.next(); for (String matchUrl : matchList) { boolean hasResult = !iterator.hasNext(); if (!doResults && hasResult) { continue; } else if (hasResult) { numResults++; } seriesItemFutures.add(executorService.submit(new MatchGetter(matchUrl, hasResult))); i++; } } executorService.shutdown(); executorService.awaitTermination(20L, TimeUnit.SECONDS); LogUtils.LOGD(TAG, "Stopping Retrieval, elements submitted for fetching: " + i); ContentValues[] seriesEntries = new ContentValues[i]; ContentValues[] resultEntries = new ContentValues[numResults]; int seriesEntryWriteIndex = 0; int resultEntryWriteIndex = 0; for (Future<BundledMatchItem> seriesItemFuture : seriesItemFutures) { try { BundledMatchItem seriesItem = seriesItemFuture.get(); if (seriesItem != null) { seriesEntries[seriesEntryWriteIndex] = seriesItem.mMatch; seriesEntryWriteIndex++; if (seriesItem.hasResult) { resultEntries[resultEntryWriteIndex] = seriesItem.mResult; resultEntryWriteIndex++; } } } catch (ExecutionException e) { Log.e(TAG, "Should never get here"); } } this.getContentResolver().bulkInsert(MatchContract.SeriesEntry.CONTENT_URI, seriesEntries); if (doResults) this.getContentResolver().bulkInsert(MatchContract.ResultEntry.CONTENT_URI, resultEntries); PrefUtils.setLastUpdateTime(this, TimeUtils.getUTCTime()); } catch (IOException e) { Log.e(TAG, e.getMessage(), e); e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_COMPLETE)); PrefUtils.setLastResultsUpdateTime(this, TimeUtils.getUTCTime()); }
From source file:org.apache.usergrid.tools.ExportAdmins.java
/** * Shouldn't have to do this but getOrganizationsForAdminUser() is not 100% reliable in some Usergrid installations. *///from w w w . j a va 2 s .c o m private void buildOrgMap() throws Exception { logger.info("Building org map"); ExecutorService execService = Executors.newFixedThreadPool(readThreadCount); EntityManager em = emf.getEntityManager(CpNamingUtils.MANAGEMENT_APPLICATION_ID); String queryString = "select *"; Query query = Query.fromQL(queryString); query.withLimit(1000); Results organizations = null; int count = 0; do { organizations = em.searchCollection(em.getApplicationRef(), "groups", query); for (Entity organization : organizations.getEntities()) { execService.submit(new OrgMapWorker(organization)); count++; } if (count % 1000 == 0) { logger.info("Queued {} org map workers", count); } query.setCursor(organizations.getCursor()); } while (organizations != null && organizations.hasCursor()); execService.shutdown(); while (!execService.awaitTermination(10, TimeUnit.SECONDS)) { logger.info("Processed {} orgs for map", userToOrgsMap.size()); } logger.info("Org map complete, counted {} organizations", count); }
From source file:org.apache.hadoop.fs.swift.TestSwiftFileSystemConcurrency.java
/** * test on concurrent file system changes *///from w w w . j a va 2 s . c o m @Test(timeout = SWIFT_TEST_TIMEOUT) public void testRaceConditionOnDirDeleteTest() throws Exception { SwiftTestUtils.skip("Skipping unreliable test"); final String message = "message"; final Path fileToRead = new Path(TEST_RACE_CONDITION_ON_DELETE_DIR + "/files/many-files/file"); final ExecutorService executorService = Executors.newFixedThreadPool(2); fs.create(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR + "/file/test/file1")); fs.create(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR + "/documents/doc1")); fs.create(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR + "/pictures/picture")); executorService.execute(new Runnable() { @Override public void run() { try { assertDeleted(new Path(TEST_RACE_CONDITION_ON_DELETE_DIR), true); } catch (IOException e) { LOG.warn("deletion thread:" + e, e); thread1Ex = e; throw new RuntimeException(e); } } }); executorService.execute(new Runnable() { @Override public void run() { try { final FSDataOutputStream outputStream = fs.create(fileToRead); outputStream.write(message.getBytes()); outputStream.close(); } catch (IOException e) { LOG.warn("writer thread:" + e, e); thread2Ex = e; throw new RuntimeException(e); } } }); executorService.awaitTermination(1, TimeUnit.MINUTES); if (thread1Ex != null) { throw thread1Ex; } if (thread2Ex != null) { throw thread2Ex; } try { fs.open(fileToRead); LOG.info("concurrency test failed to trigger a failure"); } catch (FileNotFoundException expected) { } }
From source file:com.trellmor.berrymotes.sync.EmoteDownloader.java
public void start(SyncResult syncResult) { Log.info("EmoteDownload started"); this.updateNetworkInfo(); mSyncResult = syncResult;/* ww w . ja v a 2 s. c o m*/ if (!mIsConnected) { Log.error("Network not available"); syncResult.stats.numIoExceptions++; return; } // Registers BroadcastReceiver to track network connection changes. IntentFilter filter = new IntentFilter(ConnectivityManager.CONNECTIVITY_ACTION); NetworkReceiver receiver = new NetworkReceiver(); mContext.registerReceiver(receiver, filter); ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT); mHttpClient = AndroidHttpClient.newInstance(USER_AGENT); try { String[] subreddits = getSubreddits(); for (String subreddit : subreddits) { if (mSubreddits.isChecked(subreddit)) { Runnable subredditEmoteDownloader = new SubredditEmoteDownloader(mContext, this, subreddit); executor.execute(subredditEmoteDownloader); } else { // Delete this subreddit deleteSubreddit(subreddit, mContentResolver); // Reset last download date SharedPreferences.Editor settings = PreferenceManager.getDefaultSharedPreferences(mContext) .edit(); settings.remove(SettingsActivity.KEY_SYNC_LAST_MODIFIED + subreddit); settings.commit(); } } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (URISyntaxException e) { Log.error("Emotes URL is malformed", e); synchronized (mSyncResult) { mSyncResult.stats.numParseExceptions++; if (mSyncResult.delayUntil < 60 * 60) mSyncResult.delayUntil = 60 * 60; } return; } catch (IOException e) { Log.error("Error reading from network: " + e.getMessage(), e); synchronized (mSyncResult) { mSyncResult.stats.numIoExceptions++; if (mSyncResult.delayUntil < 30 * 60) mSyncResult.delayUntil = 30 * 60; } return; } catch (InterruptedException e) { synchronized (mSyncResult) { syncResult.moreRecordsToGet = true; } Log.info("Sync interrupted"); executor.shutdownNow(); try { executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e2) { } Thread.currentThread().interrupt(); } finally { Log.info("Deleted emotes: " + Long.toString(mSyncResult.stats.numDeletes)); Log.info("Added emotes: " + Long.toString(mSyncResult.stats.numInserts)); // Unregisters BroadcastReceiver at the end mContext.unregisterReceiver(receiver); mHttpClient.close(); } Log.info("EmoteDownload finished"); }