List of usage examples for java.util.concurrent ExecutorService shutdown
void shutdown();
From source file:com.microsoft.azure.servicebus.samples.partitionedqueues.PartitionedQueues.java
public void run(String connectionString) throws Exception { QueueClient sendClient;/* ww w .jav a 2 s .c o m*/ QueueClient receiveClient; // Create a QueueClient instance using the connection string builder // We set the receive mode to "PeekLock", meaning the message is delivered // under a lock and must be acknowledged ("completed") to be removed from the queue receiveClient = new QueueClient(new ConnectionStringBuilder(connectionString, "PartitionedQueue"), ReceiveMode.PEEKLOCK); // We are using single thread executor as we are only processing one message at a time ExecutorService executorService = Executors.newSingleThreadExecutor(); this.registerMessageHandler(receiveClient, executorService); sendClient = new QueueClient(new ConnectionStringBuilder(connectionString, "PartitionedQueue"), ReceiveMode.PEEKLOCK); this.sendMessagesAsync(sendClient).thenRunAsync(() -> sendClient.closeAsync()); // wait for ENTER or 10 seconds elapsing waitForEnter(10); receiveClient.close(); executorService.shutdown(); }
From source file:edu.stanford.nlp.parser.ensemble.Ensemble.java
public void run() throws IOException { List<Runnable> jobs = createJobs(); boolean multiThreaded = false; if ((run.equalsIgnoreCase(Const.RUN_TRAIN) && multiThreadTrain) || (run.equalsIgnoreCase(Const.RUN_TEST) && multiThreadEval)) { multiThreaded = true;//from www . j ava 2 s. c o m } String file_name; String phase_name; // reverse the training corpus if (run.equals(Const.RUN_TRAIN)) { file_name = trainCorpus; phase_name = "training"; } // reverse the testing corpus else if (run.equals(Const.RUN_TEST)) { file_name = testCorpus; phase_name = "testing"; } else { throw new RuntimeException("Unknown run mode: " + run); } if (rightToLeft) { File f = new File(file_name); File f1 = new File(workingDirectory + File.separator + f.getName()); f1.deleteOnExit(); FileUtils.copyFile(f, f1); if (rtl_pseudo_projective && run.equals(Const.RUN_TRAIN)) { String ppReversedFileName = workingDirectory + File.separator + f.getName() + ".pp"; try { SystemLogger.logger().debug( "Projectivise reversing " + phase_name + " corpus to " + ppReversedFileName + "\n"); String input = f.getName(); f = new File(ppReversedFileName); String output = f.getName(); ProjectivizeCorpus.Projectivize(workingDirectory, input, output, "pp-reverse"); f.deleteOnExit(); f = new File("pp-reverse.mco"); f.deleteOnExit(); f = new File(ppReversedFileName); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException("Error: cannot projectivize corpus"); } } String reversedFileName = workingDirectory + File.separator + f.getName() + ".reversed"; SystemLogger.logger().debug("Reversing " + phase_name + " corpus to " + reversedFileName + "\n"); ReverseCorpus.reverseCorpus(f.getAbsolutePath(), reversedFileName); f = new File(reversedFileName); f.deleteOnExit(); } if (ltr_pseudo_projective && run.equals(Const.RUN_TRAIN)) { File f = new File(file_name); File f1 = new File(workingDirectory + File.separator + f.getName()); f1.deleteOnExit(); FileUtils.copyFile(f, f1); String ppFileName = workingDirectory + File.separator + f.getName() + ".pp"; try { SystemLogger.logger().debug("Projectivise " + phase_name + " corpus to " + ppFileName + "\n"); String input = f.getName(); f = new File(ppFileName); String output = f.getName(); ProjectivizeCorpus.Projectivize(workingDirectory, input, output, "pp"); f.deleteOnExit(); f = new File("pp.mco"); f.deleteOnExit(); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException("Error: cannot projectivize corpus"); } } if (multiThreaded) { ExecutorService threadPool = Executors.newFixedThreadPool(threadCount); for (Runnable job : jobs) { threadPool.execute(job); } threadPool.shutdown(); this.waitForThreads(jobs.size()); } else { for (Runnable job : jobs) { job.run(); } } // run the actual ensemble model if (run.equalsIgnoreCase(Const.RUN_TEST)) { String outFile = workingDirectory + File.separator + outputPrefix + "." + modelName + "-ensemble"; List<String> sysFiles = new ArrayList<String>(); for (String baseModel : baseModels) { sysFiles.add( (workingDirectory + File.separator + outputPrefix + "." + modelName + "-" + baseModel)); } // generate the ensemble Eisner.ensemble(testCorpus, sysFiles, outFile, reparseAlgorithm); // score the ensemble Score s = Scorer.evaluate(testCorpus, outFile); if (s != null) { SystemLogger.logger().info(String.format("ensemble LAS: %.2f %d/%d\n", s.las, s.lcorrect, s.total)); SystemLogger.logger().info(String.format("ensemble UAS: %.2f %d/%d\n", s.uas, s.ucorrect, s.total)); } SystemLogger.logger().info("Ensemble output saved as: " + outFile + "\n"); } SystemLogger.logger().info("DONE.\n"); }
From source file:com.linkedin.pinot.integration.tests.MetadataAndDictionaryAggregationPlanClusterIntegrationTest.java
private void loadDataIntoH2(List<File> avroFiles) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); setUpH2Connection(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); }
From source file:org.jolokia.client.request.J4pConnectionPoolingIntegrationTest.java
private void searchParallel(J4pClient j4pClient) throws Exception { stubFor(get(urlPathMatching("/test/([a-z]*)")) .willReturn(aResponse().withFixedDelay(1000).withBody(getJsonResponse("test")))); final ExecutorService executorService = Executors.newFixedThreadPool(20); final J4pSearchRequest j4pSearchRequest = new J4pSearchRequest("java.lang:type=*"); final List<Future<Void>> requestsList = new ArrayList<Future<Void>>(); for (int i = 0; i < 20; i++) { requestsList.add(executorService.submit(new AsyncRequest(j4pClient, j4pSearchRequest))); }//ww w. j av a2 s. c o m for (Future<Void> requests : requestsList) { requests.get(); } executorService.shutdown(); }
From source file:voldemort.store.readonly.swapper.StoreSwapperTest.java
@Test public void testAdminStoreSwapper() throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); try {//from w ww .j a va2 s . co m // Use the admin store swapper StoreSwapper swapper = new AdminStoreSwapper(cluster, executor, adminClient, 1000000, true, true); testFetchSwap(swapper); } finally { executor.shutdown(); } }
From source file:com.sastix.cms.common.services.htmltopdf.PdfTest.java
@Test public void performanceTest() throws InterruptedException { int NTHREDS = 30;//the lesser the threads, the completion time increases. At least 15 threads for better performance on my laptop ExecutorService executor = Executors.newFixedThreadPool(NTHREDS); long start = DateTime.now().getMillis(); for (int i = 0; i < numberOfTasks; i++) { Runnable worker = new PdfRunnable(i, "<html><head><meta charset=\"utf-8\"></head><h1>Mller</h1></html>"); executor.execute(worker);/*w ww.jav a 2 s . co m*/ } try { latch.await(); } catch (InterruptedException E) { // handle } executor.shutdown(); executor.awaitTermination(5, TimeUnit.SECONDS); assertEquals(cmap.size(), numberOfTasks); long passed = DateTime.now().getMillis() - start; LOG.info("Millis passed: " + passed); LOG.info("Seconds passed: " + (double) passed / 1000); }
From source file:io.ecarf.core.cloud.task.processor.files.ProcessFilesTask.java
@Override public void run() throws IOException { log.info("START: processing files"); Stopwatch stopwatch = Stopwatch.createStarted(); Set<String> filesSet = ObjectUtils.csvToSet(files); log.info("Processing files: " + filesSet); List<Callable<T>> tasks = getSubTasks(filesSet); int processors = Runtime.getRuntime().availableProcessors(); try {//from ww w .jav a 2s . co m // check if we only have one file to process if (tasks.size() == 1) { this.processSingleOutput(tasks.get(0).call()); } else if (processors == 1) { // only one process then process synchronously List<T> output = new ArrayList<>(); for (Callable<T> task : tasks) { output.add(task.call()); } this.processMultiOutput(output); } else { // multiple cores ExecutorService executor = Utils.createFixedThreadPool(processors); try { List<Future<T>> results = executor.invokeAll(tasks); List<T> output = new ArrayList<>(); for (Future<T> result : results) { output.add(result.get()); } this.processMultiOutput(output); } finally { executor.shutdown(); } } } catch (Exception e) { log.error("Failed to process multiple files", e); throw new IOException(e); } log.info("TIMER# All files are processed successfully, elapsed time: " + stopwatch); }
From source file:com.linkedin.pinot.integration.tests.StarTreeClusterIntegrationTest.java
/** * Generate the reference and star tree indexes and upload to corresponding tables. * @param avroFiles/* w ww. j a v a 2 s . co m*/ * @param tableName * @param starTree * @throws IOException * @throws ArchiveException * @throws InterruptedException */ private void generateAndUploadSegments(List<File> avroFiles, String tableName, boolean starTree) throws IOException, ArchiveException, InterruptedException { BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_segmentsDir); BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_tarredSegmentsDir); ExecutorService executor = Executors.newCachedThreadPool(); BaseClusterIntegrationTest.buildSegmentsFromAvro(avroFiles, executor, 0, _segmentsDir, _tarredSegmentsDir, tableName, starTree, getSingleValueColumnsSchema()); executor.shutdown(); executor.awaitTermination(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS); for (String segmentName : _tarredSegmentsDir.list()) { LOGGER.info("Uploading segment {}", segmentName); File file = new File(_tarredSegmentsDir, segmentName); FileUploadUtils.sendSegmentFile(ControllerTestUtils.DEFAULT_CONTROLLER_HOST, ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT, segmentName, new FileInputStream(file), file.length()); } }
From source file:Main.java
public void processUsers(int numOfWorkerThreads) { ExecutorService threadPool = Executors.newFixedThreadPool(numOfWorkerThreads); int chunk = itemsToBeProcessed.length / numOfWorkerThreads; int start = 0; List<Future> tasks = new ArrayList<Future>(); for (int i = 0; i < numOfWorkerThreads; i++) { tasks.add(threadPool.submit(new WorkerThread(start, start + chunk))); start = start + chunk;/*from ww w. ja v a 2s.com*/ } // join all worker threads to main thread for (Future f : tasks) { try { f.get(); } catch (Exception e) { e.printStackTrace(); } } threadPool.shutdown(); while (!threadPool.isTerminated()) { } }