Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:com.fluidops.iwb.provider.CkanProvider.java

@Override
public void gather(List<Statement> res) throws Exception {
    // Read CKAN location and establish connection
    URL registryUrl = new URL(config.location);
    HttpURLConnection registryConnection = (HttpURLConnection) registryUrl.openConnection();
    registryConnection.setRequestMethod("GET");

    // Check if connection to CKAN could be established
    if (registryConnection.getResponseCode() != HttpURLConnection.HTTP_OK) {
        String msg = String.format("Connection to the CKAN registry could not be established. (%s, %s)",
                registryConnection.getResponseCode(), registryConnection.getResponseMessage());
        logger.info(msg);//w w w  .  j  ava  2  s  .  c  o m
        throw new IllegalStateException(msg);
    }
    logger.trace("Connection to CKAN established successfully.");

    String siteContent = GenUtil.readUrl(registryConnection.getInputStream());

    JSONObject groupAsJson = null;
    JSONArray packageListJsonArray = null;
    try {
        groupAsJson = new JSONObject(new JSONTokener(siteContent));
        packageListJsonArray = groupAsJson.getJSONArray("packages");
    } catch (JSONException e) {
        String msg = String.format("Returned content %s is not valid JSON. Check if the registry URL is valid.",
                siteContent);
        logger.debug(msg);
        throw new IllegalStateException(msg);
    }
    logger.trace("Extracted JSON from CKAN successfully");

    // Create metadata about LOD catalog
    res.add(ProviderUtils.createStatement(CKAN.CKAN_CATALOG, RDF.TYPE, Vocabulary.DCAT.CATALOG));
    res.add(ProviderUtils.createStatement(CKAN.CKAN_CATALOG, RDFS.LABEL, CKAN.CKAN_CATALOG_LABEL));

    // Extract metadata for individual data sets listed in CKAN
    MultiThreadedHttpConnectionManager connectionManager = null;
    ExecutorService pool = null;
    try {
        pool = Executors.newFixedThreadPool(10);
        connectionManager = new MultiThreadedHttpConnectionManager();
        HttpClient client = new HttpClient(connectionManager);

        List<Statement> synchedList = Collections.synchronizedList(res);
        for (int i = 0; i < packageListJsonArray.length(); i++) {
            String host = "http://www.ckan.net/package/" + packageListJsonArray.get(i).toString();
            String baseUri = findBaseUri(
                    "http://www.ckan.net/api/rest/package/" + packageListJsonArray.get(i).toString());
            baseUri = (baseUri == null) ? host : baseUri;
            pool.execute(new MetadataReader(client, host, baseUri, CKAN.CKAN_CATALOG, synchedList));
        }
    } finally {
        if (pool != null) {
            pool.shutdown();
            pool.awaitTermination(4, TimeUnit.HOURS);
        }
        if (connectionManager != null)
            connectionManager.shutdown();
    }
}

From source file:info.magnolia.imaging.caching.CachingImageStreamerRepositoryTest.java

/**
 * This test is not executed by default - too long !
 * Used to reproduce the "session already closed issue", see MGNLIMG-59.
 * Set the "expiration" property of the jobs map in CachingImageStreamer to a longer value
 * to have more chances of reproducing the problem.
 *///from   w  ww  .  ja  v a 2  s .c  o m
@Ignore
@Test
public void testConcurrencyAndJCRSessions() throws Exception {
    final HierarchyManager srcHM = MgnlContext.getHierarchyManager("website");
    final String srcPath = "/foo/bar";
    ContentUtil.createPath(srcHM, srcPath);

    // ParameterProvider for tests - return a new instance of the same node everytime
    // if we'd return the same src instance everytime, the purpose of this test would be null
    final ParameterProviderFactory<Object, Content> ppf = new TestParameterProviderFactory(srcHM, srcPath);

    final OutputFormat png = new OutputFormat();
    png.setFormatName("png");

    final ImageOperationChain<ParameterProvider<Content>> generator = new ImageOperationChain<ParameterProvider<Content>>();
    final URLImageLoader<ParameterProvider<Content>> load = new URLImageLoader<ParameterProvider<Content>>();
    load.setUrl(getClass().getResource("/funnel.gif").toExternalForm());
    generator.addOperation(load);
    generator.setOutputFormat(png);
    generator.setName("foo blob bar");
    generator.setParameterProviderFactory(ppf);

    // yeah, we're using a "wrong" workspace for the image cache, to avoid having to setup a custom one in this test
    final HierarchyManager hm = MgnlContext.getHierarchyManager("config");

    final ImageStreamer streamer = new CachingImageStreamer(hm, ppf.getCachingStrategy(),
            new DefaultImageStreamer());

    // thread pool of 10, launching 8 requests, can we hit some concurrency please ?
    final ExecutorService executor = Executors.newFixedThreadPool(10);
    final ByteArrayOutputStream[] outs = new ByteArrayOutputStream[8];
    final Future[] futures = new Future[8];
    for (int i = 0; i < outs.length; i++) {
        final int ii = i;
        outs[i] = new ByteArrayOutputStream();
        futures[i] = executor.submit(new Runnable() {
            @Override
            public void run() {
                final ParameterProvider p = generator.getParameterProviderFactory()
                        .newParameterProviderFor(null);
                try {
                    streamer.serveImage(generator, p, outs[ii]);
                } catch (Exception e) {
                    throw new RuntimeException(e); // TODO
                }
            }
        });
    }
    executor.shutdown();
    executor.awaitTermination(30, TimeUnit.SECONDS);

    for (Future<?> future : futures) {
        assertTrue(future.isDone());
        assertFalse(future.isCancelled());
        // ignore the results of TestJob - but if there was an exception thrown by TestJob.call(),
        // it is only thrown back at us when we call get() below. (so the test will fail badly if the job threw an exception)
        Object ignored = future.get();
    }

    shutdownRepository(true);

    // sleep for a while so that the jobs map's expiration thread can kick in !
    Thread.sleep(10000);
}

From source file:org.apache.carbondata.sdk.file.ConcurrentSdkReaderTest.java

@Test
public void testReadParallely() throws IOException, InterruptedException {
    int numFiles = 10;
    int numRowsPerFile = 10;
    short numThreads = 4;
    writeDataMultipleFiles(numFiles, numRowsPerFile);
    long count;/*from w  ww .j  ava 2s . c om*/

    // Sequential Reading
    CarbonReader reader = CarbonReader.builder(dataDir).build();
    try {
        count = 0;
        long start = System.currentTimeMillis();
        while (reader.hasNext()) {
            reader.readNextRow();
            count += 1;
        }
        long end = System.currentTimeMillis();
        System.out.println("[Sequential read] Time: " + (end - start) + " ms");
        Assert.assertEquals(numFiles * numRowsPerFile, count);
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    } finally {
        reader.close();
    }

    // Concurrent Reading
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    try {
        CarbonReader reader2 = CarbonReader.builder(dataDir).build();
        List<CarbonReader> multipleReaders = reader2.split(numThreads);
        try {
            List<ReadLogic> tasks = new ArrayList<>();
            List<Future<Long>> results;
            count = 0;

            for (CarbonReader reader_i : multipleReaders) {
                tasks.add(new ReadLogic(reader_i));
            }
            long start = System.currentTimeMillis();
            results = executorService.invokeAll(tasks);
            for (Future result_i : results) {
                count += (long) result_i.get();
            }
            long end = System.currentTimeMillis();
            System.out.println("[Parallel read] Time: " + (end - start) + " ms");
            Assert.assertEquals(numFiles * numRowsPerFile, count);
        } catch (Exception e) {
            e.printStackTrace();
            Assert.fail(e.getMessage());
        }
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.MINUTES);
    }
}

From source file:es.us.lsi.restest.engine.UnirestTest.java

private void makeParallelRequests() throws InterruptedException {
    ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(10);
    final AtomicInteger counter = new AtomicInteger(0);
    for (int i = 0; i < 200; i++) {
        newFixedThreadPool.execute(new Runnable() {
            public void run() {
                try {
                    Unirest.get("http://httpbin.org/get").queryString("index", counter.incrementAndGet())
                            .asJson();/* w w  w.  j av a 2 s .c  om*/
                } catch (UnirestException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }

    newFixedThreadPool.shutdown();
    newFixedThreadPool.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:org.copperengine.core.persistent.cassandra.CassandraStorage.java

@Override
public void initialize(final HybridDBStorageAccessor internalStorageAccessor, int numberOfThreads)
        throws Exception {
    createSchema(session, cluster);//from  w w  w.  j ava 2s  . co  m

    prepareStatements();

    // TODO instead of blocking the startup until all active workflow instances are read and resumed, it is
    // sufficient to read just their existing IDs in COP_WFI_ID and resume them in the background while already
    // starting the engine an accepting new instances.

    if (numberOfThreads <= 0)
        numberOfThreads = 1;
    logger.info("Starting to initialize with {} threads ...", numberOfThreads);
    final ExecutorService execService = Executors.newFixedThreadPool(numberOfThreads);
    final long startTS = System.currentTimeMillis();
    final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_WFI_ID_ALL).bind().setFetchSize(500)
            .setConsistencyLevel(ConsistencyLevel.ONE));
    int counter = 0;
    Row row;
    while ((row = rs.one()) != null) {
        counter++;
        final String wfId = row.getString("ID");
        execService.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    resume(wfId, internalStorageAccessor);
                } catch (Exception e) {
                    logger.error("resume failed", e);
                }
            }
        });
    }
    logger.info("Read {} IDs in {} msec", counter, System.currentTimeMillis() - startTS);
    execService.shutdown();
    final boolean timeoutHappened = !execService.awaitTermination(initializationTimeoutSeconds,
            TimeUnit.SECONDS);
    if (timeoutHappened) {
        throw new CopperRuntimeException("initialize timed out!");
    }
    logger.info("Finished initialization - read {} rows in {} msec", counter,
            System.currentTimeMillis() - startTS);
    runtimeStatisticsCollector.submit("storage.init", counter, System.currentTimeMillis() - startTS,
            TimeUnit.MILLISECONDS);
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal with bulk parse")
public void parseTbmmJournal_b0241h_withBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);/*from  w w  w.j  av a  2 s  .c o m*/
        uniqueWords.addAll(strings);
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i = i + BULK_SIZE) {
        final MorphologicParser parser = parsers[(i / BULK_SIZE) % NUMBER_OF_THREADS];
        int start = i;
        int end = i + BULK_SIZE < words.size() ? i + BULK_SIZE : words.size();
        final int wordIndex = i;

        final List<String> subWordList = words.subList(start, end);
        pool.execute(new BulkParseCommand(parser, subWordList, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:org.sead.nds.repository.BagGenerator.java

private void checkFiles(HashMap<String, String> shaMap, ZipFile zf) {
    ExecutorService executor = Executors.newFixedThreadPool(Repository.getNumThreads());
    ValidationJob.setZipFile(zf);/*from w w w .java 2  s. c  o m*/
    ValidationJob.setBagGenerator(this);
    log.info("Validating hashes for zipped data files");
    int i = 0;
    for (Entry<String, String> entry : shaMap.entrySet()) {

        ValidationJob vj = new ValidationJob(entry.getValue(), entry.getKey());
        executor.execute(vj);
        i++;
        if (i % 1000 == 0) {
            log.info("Queuing Hash Validations: " + i);
        }
    }
    log.info("All Hash Validations Queued: " + i);

    executor.shutdown();
    try {
        while (!executor.awaitTermination(10, TimeUnit.MINUTES)) {
            log.debug("Awaiting completion of hash calculations.");
        }
    } catch (InterruptedException e) {
        log.error("Hash Calculations interrupted", e);
    }
    log.info("Hash Validations Completed");
}

From source file:org.apache.falcon.entity.store.ConfigurationStore.java

private void loadEntity(final EntityType type) throws FalconException {
    try {/* w w w.j  av a  2  s .  c o  m*/
        final ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
        FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
        if (files != null) {
            final ExecutorService service = Executors.newFixedThreadPool(100);
            for (final FileStatus file : files) {
                service.execute(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            String fileName = file.getPath().getName();
                            String encodedEntityName = fileName.substring(0, fileName.length() - 4); // drop
                            // ".xml"
                            String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
                            Entity entity = restore(type, entityName);
                            entityMap.put(entityName, entity);
                        } catch (IOException | FalconException e) {
                            LOG.error("Unable to restore entity of", file);
                        }
                    }
                });
            }
            service.shutdown();
            if (service.awaitTermination(10, TimeUnit.MINUTES)) {
                LOG.info("Restored Configurations for entity type: {} ", type.name());
            } else {
                LOG.warn("Time out happened while waiting for all threads to finish while restoring entities "
                        + "for type: {}", type.name());
            }
            // Checking if all entities were loaded
            if (entityMap.size() != files.length) {
                throw new FalconException("Unable to restore configurations for entity type " + type.name());
            }
            for (Entity entity : entityMap.values()) {
                onReload(entity);
            }
        }
    } catch (IOException e) {
        throw new FalconException("Unable to restore configurations", e);
    } catch (InterruptedException e) {
        throw new FalconException(
                "Failed to restore configurations in 10 minutes for entity type " + type.name());
    }
}

From source file:org.pentaho.reporting.engine.classic.core.testsupport.gold.GoldTestBase.java

protected void runAllGoldReportsInParallel(int threads) throws Exception {
    initializeTestEnvironment();/*from w  w  w  .  j ava  2s .  c  om*/

    final List<Throwable> errors = Collections.synchronizedList(new ArrayList<Throwable>());

    final ExecutorService threadPool = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new TestThreadFactory(), new ThreadPoolExecutor.AbortPolicy());

    List<ExecuteReportRunner> reports = new ArrayList<ExecuteReportRunner>();
    reports.addAll(collectReports("reports", ReportProcessingMode.legacy, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.current, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.current, errors));

    for (ExecuteReportRunner report : reports) {
        threadPool.submit(report);
    }

    threadPool.shutdown();
    while (threadPool.isTerminated() == false) {
        threadPool.awaitTermination(5, TimeUnit.MINUTES);
    }
    if (errors.isEmpty() == false) {
        Log log = LogFactory.getLog(GoldTestBase.class);
        for (Throwable throwable : errors) {
            log.error("Failed", throwable);
        }
        Assert.fail();
    }
}

From source file:com.opengamma.engine.cache.BerkeleyDBValueIdentifierMapTest.java

@Test(timeOut = 30000)
public void interruptThread() throws Throwable {
    final ExecutorService threads = Executors.newSingleThreadExecutor();
    try {//w ww .  j ava2 s  .c om
        final Thread main = Thread.currentThread();
        final Runnable interrupter = new Runnable() {
            @Override
            public void run() {
                try {
                    Thread.sleep(1000);
                    main.interrupt();
                } catch (InterruptedException e) {
                    throw new OpenGammaRuntimeException("Interrupted", e);
                }
            }
        };
        threads.submit(interrupter);
        int count = 0;
        do {
            try {
                getPerformanceTest();
            } catch (OpenGammaRuntimeException e) {
                assertEquals("Interrupted", e.getMessage());
                count++;
                if (count <= 5) {
                    threads.submit(interrupter);
                } else {
                    break;
                }
            }
        } while (true);
    } finally {
        threads.shutdown();
        Thread.interrupted();
        threads.awaitTermination(5, TimeUnit.SECONDS);
    }
}