Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.openiot.ide.core.MenuFactory.java

/**
 * Creates/*from  w  ww . j  av a2  s  .c  o m*/
 */
private void validateUrls() {

    // TODO Remove comments for old way
    // Spawn a new Thread for each url Validation
    // List<Thread> threads = new ArrayList<>(propertyMap.size());

    if (propertyMap.isEmpty())
        return;

    ExecutorService es = Executors.newFixedThreadPool(propertyMap.entrySet().size());

    for (Map.Entry<String, HashMap<String, String>> entry : propertyMap.entrySet()) {
        es.execute(new ValidatorRunnable(entry));
        // threads.add(new Thread(new ValidatorRunnable(entry)));
    }

    es.shutdown();
    try {
        es.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    // alternative way to do the above with threads
    // for (Thread t : threads) {
    // t.start();
    // }
    //
    // for (Thread t : threads) {
    // try {
    // t.join();
    // } catch (InterruptedException e) {
    // e.printStackTrace();
    // }
    // }

}

From source file:org.pentaho.reporting.platform.plugin.staging.AsyncJobFileStagingHandlerTest.java

@Test
public void testStagingDirNotGetDeletedBetweenExecutions() throws Exception {
    CountDownLatch startSignal = new CountDownLatch(0);

    int count = 30;
    ExecutorService service = Executors.newFixedThreadPool(count);

    List<AsyncJobFileStagingHandler> handlers = new ArrayList<>();

    for (int i = 0; i < count; i++) {
        AsyncJobFileStagingHandler handler = new AsyncJobFileStagingHandler(session);
        handlers.add(handler);/*  ww w .  ja  va  2 s.  c  om*/
        service.submit(new AsyncStagingReadWrite(startSignal, handler));
    }

    startSignal.countDown();
    service.shutdown();
    service.awaitTermination(5, TimeUnit.SECONDS);

    Path stagingDir = AsyncJobFileStagingHandler.getStagingDirPath();
    File[] fileList = stagingDir.toFile().listFiles();

    File sessionFolder = fileList[0];
    assertTrue(sessionFolder.isDirectory());

    assertEquals("Folder is named by session id", session.getId(), sessionFolder.getName());

    assertEquals("Folder is NOT empty after a BACKLOG-7598 fix", 30, sessionFolder.list().length);

    for (AsyncJobFileStagingHandler handler : handlers) {
        assertTrue(handler.getStagingContent().cleanContent());
    }

    assertEquals("Staging folder empty now", 0, sessionFolder.list().length);
}

From source file:org.apache.carbondata.sdk.file.ConcurrentSdkWriterTest.java

@Test
public void testWriteFiles() throws IOException {
    String path = "./testWriteFiles";
    FileUtils.deleteDirectory(new File(path));

    Field[] fields = new Field[2];
    fields[0] = new Field("name", DataTypes.STRING);
    fields[1] = new Field("age", DataTypes.INT);

    ExecutorService executorService = Executors.newFixedThreadPool(numOfThreads);
    try {/*w w  w .  j av  a2s . c  o m*/
        CarbonWriterBuilder builder = CarbonWriter.builder().outputPath(path).withThreadSafe(numOfThreads);
        CarbonWriter writer = builder.withCsvInput(new Schema(fields)).writtenBy("ConcurrentSdkWriterTest")
                .build();
        // write in multi-thread
        for (int i = 0; i < numOfThreads; i++) {
            executorService.submit(new WriteLogic(writer));
        }
        executorService.shutdown();
        executorService.awaitTermination(2, TimeUnit.HOURS);
        writer.close();
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }

    // read the files and verify the count
    CarbonReader reader;
    try {
        reader = CarbonReader.builder(path, "_temp1121").projection(new String[] { "name", "age" }).build();
        int i = 0;
        while (reader.hasNext()) {
            Object[] row = (Object[]) reader.readNextRow();
            i++;
        }
        Assert.assertEquals(i, numOfThreads * recordsPerItr);
        reader.close();
    } catch (InterruptedException e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }

    FileUtils.deleteDirectory(new File(path));
}

From source file:com.espertech.esper.multithread.TestMTContextListenerDispatch.java

private void tryPerformanceDispatch(int numThreads, int numRepeats) throws Exception {
    MyListener listener = new MyListener();
    engine.getEPAdministrator().getStatement("select").addListener(listener);

    List<Object>[] events = new ArrayList[numThreads];
    int eventId = 0;
    for (int threadNum = 0; threadNum < numThreads; threadNum++) {
        events[threadNum] = new ArrayList<Object>();
        for (int eventNum = 0; eventNum < numRepeats; eventNum++) {
            // range: 1 to 1000
            int partition = (int) (Math.random() * 50);
            eventId++;//from w  w w  .  j a v  a 2s. co  m
            events[threadNum].add(new SupportBean(new Integer(partition).toString(), eventId));
        }
    }

    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future futures[] = new Future[numThreads];
    long startTime = System.currentTimeMillis();

    for (int i = 0; i < numThreads; i++) {
        Callable callable = new SendEventCallable(i, engine, events[i].iterator());
        futures[i] = threadPool.submit(callable);
    }
    for (Future future : futures) {
        assertEquals(true, future.get());
    }
    long delta = System.currentTimeMillis() - startTime;

    threadPool.shutdown();
    threadPool.awaitTermination(10, TimeUnit.SECONDS);

    // print those events not received
    for (List<Object> eventList : events) {
        for (Object event : eventList) {
            if (!listener.getBeans().contains(event)) {
                log.info("Expected event was not received, event " + event);
            }
        }
    }

    assertEquals(numRepeats * numThreads, listener.getBeans().size());
    assertTrue("delta=" + delta, delta < 500);
}

From source file:com.linkedin.pinot.integration.tests.DefaultColumnsClusterIntegrationTest.java

protected void setUp(boolean sendSchema) throws Exception {
    // Set up directories.
    FileUtils.deleteQuietly(TMP_DIR);//from   w w  w  . java  2  s .  c o m
    Assert.assertTrue(TMP_DIR.mkdirs());
    Assert.assertTrue(SEGMENT_DIR.mkdir());
    Assert.assertTrue(TAR_DIR.mkdir());

    // Start the cluster.
    startZk();
    startController();
    startBroker();
    startServer();

    // Create the table.
    addOfflineTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", -1, "", null, null);

    // Add the schema.
    if (sendSchema) {
        sendSchema();
    }

    // Unpack the Avro files.
    List<File> avroFiles = unpackAvroData(TMP_DIR, SEGMENT_COUNT);

    // Load data into H2.
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data.
    buildSegmentsFromAvro(avroFiles, executor, 0, SEGMENT_DIR, TAR_DIR, "mytable", false, null);

    // Initialize query generator.
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments
    // are online.
    CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);

    // Upload the segments.
    for (String segmentName : TAR_DIR.list()) {
        File file = new File(TAR_DIR, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all segments to be ONLINE.
    latch.await();
    waitForSegmentsOnline();
}

From source file:com.espertech.esper.multithread.dispatchmodel.TestMTDispatch.java

private void trySend(int numThreads, int numCount, int ratioDoubleAdd,
        UpdateDispatchViewModel updateDispatchView, DispatchService dispatchService) throws Exception {
    // execute//  w  w  w . j  a  va  2s.  com
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future future[] = new Future[numThreads];
    DispatchCallable callables[] = new DispatchCallable[numThreads];
    DispatchProducer producer = new DispatchProducer(updateDispatchView);
    for (int i = 0; i < numThreads; i++) {
        callables[i] = new DispatchCallable(producer, i, numCount, ratioDoubleAdd, updateDispatchView,
                dispatchService);
        future[i] = threadPool.submit(callables[i]);
    }

    threadPool.shutdown();
    threadPool.awaitTermination(10, TimeUnit.SECONDS);

    for (int i = 0; i < numThreads; i++) {
        assertTrue((Boolean) future[i].get());
    }
}

From source file:com.ebay.jetstream.event.processor.esper.ESPTest.java

public void testProcessor() {
    EsperProcessor processor = getProcessor("ESPTestProcessor");
    ESPTestSink sink = new ESPTestSink();
    List<EventSink> sinks = new ArrayList<EventSink>();
    sinks.add(sink);/*from  w  w  w.jav  a 2s  .c  o  m*/
    processor.setEventSinks(sinks);
    // TODO: start not exposed - processor.start(); // it was stopped while running previous test

    ExecutorService threadPool = Executors.newCachedThreadPool(new ESPTestThreadFactory());
    Runnable runnables[] = new ESPTestRunnable[THREADS_NUM];
    try {
        for (int i = 0; i < THREADS_NUM; i++) {
            runnables[i] = new ESPTestRunnable(processor, i);
            threadPool.submit(runnables[i]);
        }
        threadPool.shutdown();
        threadPool.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        fail("InterruptedException: " + e.getMessage());
    }
    assertTrue("ExecutorService failed to shut down properly", threadPool.isShutdown());

    // processor.stop();
    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    assertEquals(THREADS_NUM, sink.getCount());
    testLogger.info("sink first, last = [" + sink.getIds().first() + "," + sink.getIds().last() + "]");
}

From source file:org.apache.carbondata.sdk.file.ConcurrentAvroSdkWriterTest.java

@Test
public void testWriteFiles() throws IOException {
    String path = "./testWriteFiles";
    FileUtils.deleteDirectory(new File(path));

    String mySchema = "{" + "  \"name\": \"address\", " + "   \"type\": \"record\", " + "    \"fields\": [  "
            + "  { \"name\": \"name\", \"type\": \"string\"}, " + "  { \"name\": \"age\", \"type\": \"int\"}, "
            + "  { " + "    \"name\": \"address\", " + "      \"type\": { " + "    \"type\" : \"record\", "
            + "        \"name\" : \"my_address\", " + "        \"fields\" : [ "
            + "    {\"name\": \"street\", \"type\": \"string\"}, "
            + "    {\"name\": \"city\", \"type\": \"string\"} " + "  ]} " + "  } " + "] " + "}";

    String json = "{\"name\":\"bob\", \"age\":10, \"address\" : {\"street\":\"abc\", \"city\":\"bang\"}}";

    // conversion to GenericData.Record
    org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(mySchema);
    GenericData.Record record = TestUtil.jsonToAvro(json, mySchema);

    ExecutorService executorService = Executors.newFixedThreadPool(numOfThreads);
    try {//from www.  j a v  a2s .  c om
        CarbonWriterBuilder builder = CarbonWriter.builder().outputPath(path).withThreadSafe(numOfThreads);
        CarbonWriter writer = builder.withAvroInput(avroSchema).writtenBy("ConcurrentAvroSdkWriterTest")
                .build();
        // write in multi-thread
        for (int i = 0; i < numOfThreads; i++) {
            executorService.submit(new WriteLogic(writer, record));
        }
        executorService.shutdown();
        executorService.awaitTermination(2, TimeUnit.HOURS);
        writer.close();
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }

    // read the files and verify the count
    CarbonReader reader;
    try {
        reader = CarbonReader.builder(path, "_temp2122").projection(new String[] { "name", "age" }).build();
        int i = 0;
        while (reader.hasNext()) {
            Object[] row = (Object[]) reader.readNextRow();
            i++;
        }
        Assert.assertEquals(i, numOfThreads * recordsPerItr);
        reader.close();
    } catch (InterruptedException e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }

    FileUtils.deleteDirectory(new File(path));
}

From source file:edu.cmu.cs.lti.ark.fn.Semafor.java

/**
 * Reads conll sentences, parses them, and writes the json-serialized results.
 *
 * @param inputSupplier where to read conll sentences from
 * @param outputSupplier where to write the results to
 * @param numThreads the number of threads to use
 * @throws IOException//  ww  w  .j ava2 s  . co m
 * @throws InterruptedException
 */
public void runParser(final InputSupplier<? extends Readable> inputSupplier,
        final OutputSupplier<? extends Writer> outputSupplier, final int numThreads)
        throws IOException, InterruptedException {
    // use the producer-worker-consumer pattern to parse all sentences in multiple threads, while keeping
    // output in order.
    final BlockingQueue<Future<Optional<SemaforParseResult>>> results = Queues
            .newLinkedBlockingDeque(5 * numThreads);
    final ExecutorService workerThreadPool = newFixedThreadPool(numThreads);
    // try to shutdown gracefully. don't worry too much if it doesn't work
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                workerThreadPool.shutdown();
                workerThreadPool.awaitTermination(5, TimeUnit.SECONDS);
            } catch (InterruptedException ignored) {
            }
        }
    }));

    final PrintWriter output = new PrintWriter(outputSupplier.getOutput());
    try {
        // Start thread to fetch computed results and write to file
        final Thread consumer = new Thread(new Runnable() {
            @Override
            public void run() {
                while (!Thread.currentThread().isInterrupted()) {
                    try {
                        final Optional<SemaforParseResult> oResult = results.take().get();
                        if (!oResult.isPresent())
                            break; // got poison pill. we're done
                        output.println(oResult.get().toJson());
                        output.flush();
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }
        });
        consumer.start();
        // in main thread, put placeholders on results queue (so results stay in order), then
        // tell a worker thread to fill up the placeholder
        final SentenceCodec.SentenceIterator sentences = ConllCodec.readInput(inputSupplier.getInput());
        try {
            int i = 0;
            while (sentences.hasNext()) {
                final Sentence sentence = sentences.next();
                final int sentenceId = i;
                results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                    @Override
                    public Optional<SemaforParseResult> call() throws Exception {
                        final long start = System.currentTimeMillis();
                        try {
                            final SemaforParseResult result = parseSentence(sentence);
                            final long end = System.currentTimeMillis();
                            System.err.printf("parsed sentence %d in %d millis.%n", sentenceId, end - start);
                            return Optional.of(result);
                        } catch (Exception e) {
                            e.printStackTrace();
                            throw e;
                        }
                    }
                }));
                i++;
            }
            // put a poison pill on the queue to signal that we're done
            results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                @Override
                public Optional<SemaforParseResult> call() throws Exception {
                    return Optional.absent();
                }
            }));
            workerThreadPool.shutdown();
        } finally {
            closeQuietly(sentences);
        }
        // wait for consumer to finish
        consumer.join();
    } finally {
        closeQuietly(output);
    }
    System.err.println("Done.");
}

From source file:com.hygenics.parser.Upload.java

private void doScp() {
    ArrayList<String> files = new ArrayList<String>();
    File fp = new File(localPath);

    if (fp.isDirectory()) {
        for (String f : fp.list()) {
            files.add(localPath + f);/*w  ww .ja v  a2s.c  om*/
        }
    } else {
        files.add(localPath);
    }

    int p = 0;
    int partsize = files.size() / MAXFILESPERTHREAD;
    int offset = 0;

    if (partsize < 100) {
        partsize = files.size();
    }

    ExecutorService exec = Executors.newFixedThreadPool(this.numthreads);

    do {
        List<String> subset = files.subList(offset, offset + partsize);
        exec.execute(new SCP(subset, this.remotePath));

        p++;
        if (p == numthreads) {
            try {
                exec.awaitTermination(timeout, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            p = 0;
        }
        offset += partsize;
    } while (offset < files.size());

    if (p > 0) {
        try {
            exec.awaitTermination(timeout, TimeUnit.MILLISECONDS);
            exec.shutdown();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}