Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.latticesoft.app.ThreadCommand.java

public Object execute(Object o) throws CommandException {
    Map map = null;/*from ww w.  j a  va  2  s .com*/
    if (o != null && o instanceof Map) {
        map = (Map) o;
    } else {
        map = new HashMap();
    }
    try {
        ExecutorService es = Executors.newFixedThreadPool(threadCount);
        if (log.isInfoEnabled()) {
            log.info("JobSize: " + this.jobs.size());
        }
        for (int i = 0; i < jobs.size(); i++) {
            Runnable r = (Runnable) jobs.get(i);
            if (r instanceof BeanCommand) {
                BeanCommand cmd = (BeanCommand) r;
                cmd.setExecuteParam(map);
            }
            es.execute(r);
        }
        if (this.waitFor && this.timeout > 0 && this.timeoutUnit != null) {
            es.shutdown();
            long l = System.currentTimeMillis();
            if (log.isInfoEnabled()) {
                log.info("Begin of waiting...");
            }
            es.awaitTermination(this.timeout, this.timeoutUnit);
            long l2 = System.currentTimeMillis();
            if (log.isInfoEnabled()) {
                log.info("End of waiting..." + (l2 - l));
            }
        }
        //es.shutdown();
    } catch (Exception e) {
        if (log.isErrorEnabled()) {
            log.error(e);
        }
    }
    if (log.isDebugEnabled()) {
        log.debug("end!");
    }
    return map;
}

From source file:info.magnolia.imaging.caching.CachingImageStreamerRepositoryTest.java

@Test
public void testRequestForSimilarUncachedImageOnlyGeneratesItOnce() throws Exception {
    final HierarchyManager srcHM = MgnlContext.getHierarchyManager("website");
    final String srcPath = "/foo/bar";
    ContentUtil.createPath(srcHM, srcPath);

    // ParameterProvider for tests - return a new instance of the same node everytime
    // if we'd return the same src instance everytime, the purpose of this test would be null
    final ParameterProviderFactory<Object, Content> ppf = new TestParameterProviderFactory(srcHM, srcPath);

    final OutputFormat png = new OutputFormat();
    png.setFormatName("png");

    final BufferedImage dummyImg = ImageIO.read(getClass().getResourceAsStream("/funnel.gif"));
    assertNotNull("Couldn't load dummy test image", dummyImg);

    final ImageGenerator<ParameterProvider<Content>> generator = mock(ImageGenerator.class);
    when(generator.getParameterProviderFactory()).thenReturn(ppf);
    when(generator.getName()).thenReturn("test");
    when(generator.getOutputFormat(isA(ParameterProvider.class))).thenReturn(png);

    // aaaaand finally, here's the real reason for this test !
    when(generator.generate(isA(ParameterProvider.class))).thenReturn(dummyImg);

    // yeah, we're using a "wrong" workspace for the image cache, to avoid having to setup a custom one in this test
    final HierarchyManager hm = MgnlContext.getHierarchyManager("config");
    final ImageStreamer streamer = new CachingImageStreamer(hm, ppf.getCachingStrategy(),
            new DefaultImageStreamer());

    // Generator instances will always be the same (including paramProvFac)
    // since they are instantiated with the module config and c2b.
    // ParamProv is a new instance every time.
    // streamer can (must) be the same - once single HM, one cache.

    // thread pool of 10, launching 8 requests, can we hit some concurrency please ?
    final ExecutorService executor = Executors.newFixedThreadPool(10);
    final ByteArrayOutputStream[] outs = new ByteArrayOutputStream[8];
    final Future[] futures = new Future[8];
    for (int i = 0; i < outs.length; i++) {
        outs[i] = new ByteArrayOutputStream();
        futures[i] = executor.submit(new TestJob(generator, streamer, outs[i]));
    }/*from   w w  w  . j a  va2  s .c  o  m*/
    executor.shutdown();
    executor.awaitTermination(30, TimeUnit.SECONDS);

    for (Future<?> future : futures) {
        assertTrue(future.isDone());
        assertFalse(future.isCancelled());
        // ignore the results of TestJob - all we care about is if an exception was thrown
        // and if there was any, it is kept in Future until we call Future.get()
        future.get();
    }

    final NodeData cachedNodeData = hm.getNodeData("/test/website/foo/bar/generated-image");
    // update node meta data
    Content cachedNode = hm.getContent("/test/website/foo/bar");
    cachedNode.getMetaData().setModificationDate();
    cachedNode.save();
    final InputStream res = cachedNodeData.getStream();
    final ByteArrayOutputStream cachedOut = new ByteArrayOutputStream();
    IOUtils.copy(res, cachedOut);

    // assert all outs are the same
    for (int i = 1; i < outs.length; i++) {
        // TODO assert they're all equals byte to byte to the source? or in size? can't as-is since we convert...
        final byte[] a = outs[i - 1].toByteArray();
        final byte[] b = outs[i].toByteArray();
        assertTrue(a.length > 0);
        assertEquals("Different sizes (" + Math.abs(a.length - b.length) + " bytes diff.) with i=" + i,
                a.length, b.length);
        assertTrue("not equals for outs/" + i, Arrays.equals(a, b));
        outs[i - 1] = null; // cleanup all those byte[], or we'll soon run out of memory
    }
    assertTrue("failed comparing last thread's result with what we got from hierarchyManager",
            Arrays.equals(outs[outs.length - 1].toByteArray(), cachedOut.toByteArray()));
    outs[outs.length - 1] = null;

    // now start again another bunch of requests... they should ALL get their results from the cache
    final ExecutorService executor2 = Executors.newFixedThreadPool(10);
    final ByteArrayOutputStream[] outs2 = new ByteArrayOutputStream[8];
    final Future[] futures2 = new Future[8];
    for (int i = 0; i < outs2.length; i++) {
        outs2[i] = new ByteArrayOutputStream();
        futures2[i] = executor2.submit(new TestJob(generator, streamer, outs2[i]));
    }
    executor2.shutdown();
    executor2.awaitTermination(30, TimeUnit.SECONDS);

    for (Future<?> future : futures2) {
        assertTrue(future.isDone());
        assertFalse(future.isCancelled());
        // ignore the results of TestJob - all we care about is if an exception was thrown
        // and if there was any, it is kept in Future until we call Future.get()
        future.get();
    }

    final NodeData cachedNodeData2 = hm.getNodeData("/test/website/foo/bar/generated-image");
    final InputStream res2 = cachedNodeData2.getStream();
    final ByteArrayOutputStream cachedOut2 = new ByteArrayOutputStream();
    IOUtils.copy(res2, cachedOut2);

    // assert all outs are the same
    for (int i = 1; i < outs2.length; i++) {
        // TODO assert they're all equals byte to byte to the source? or in size? can't as-is since we re-save..
        final byte[] a = outs2[i - 1].toByteArray();
        final byte[] b = outs2[i].toByteArray();
        assertTrue(a.length > 0);
        assertEquals("Different sizes (" + Math.abs(a.length - b.length) + " bytes diff.) with i=" + i,
                a.length, b.length);
        assertTrue("not equals for outs2/" + i, Arrays.equals(a, b));
        outs2[i - 1] = null;
    }
    assertTrue("failed comparing last thread's result with what we got from hierarchyManager",
            Arrays.equals(outs2[outs2.length - 1].toByteArray(), cachedOut2.toByteArray()));

    outs2[outs2.length - 1] = null;
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java

@Test
public void testThreads() throws Exception {
    final int THREAD_QTY = 10;

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//from ww w.ja va 2 s. com
    try {
        final InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", 1);
        ExecutorService service = Executors.newFixedThreadPool(THREAD_QTY);
        for (int i = 0; i < THREAD_QTY; ++i) {
            service.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    Lease lease = semaphore.acquire();
                    try {
                        Thread.sleep(1);
                    } finally {
                        lease.close();
                    }
                    return null;
                }
            });
        }
        service.shutdown();
        Assert.assertTrue(service.awaitTermination(10, TimeUnit.SECONDS));
    } finally {
        client.close();
    }
}

From source file:org.apache.hive.hcatalog.templeton.tool.LaunchMapper.java

@Override
public void run(Context context) throws IOException, InterruptedException {

    Configuration conf = context.getConfiguration();
    LauncherDelegator.JobType jobType = LauncherDelegator.JobType.valueOf(conf.get(JOB_TYPE));
    String statusdir = conf.get(STATUSDIR_NAME);
    if (statusdir != null) {
        try {/*  www  .  j ava  2s  . c  o  m*/
            statusdir = TempletonUtils.addUserHomeDirectoryIfApplicable(statusdir, conf.get("user.name"));
        } catch (URISyntaxException e) {
            String msg = "Invalid status dir URI";
            LOG.error(msg, e);
            throw new IOException(msg, e);
        }
    }

    // Try to reconnect to a child job if one is found
    if (tryReconnectToRunningJob(conf, context, jobType, statusdir)) {
        return;
    }

    // Kill previously launched child MR jobs started by this launcher to prevent having
    // same jobs running side-by-side
    killLauncherChildJobs(conf, context.getJobID().toString());

    // Start the job
    Process proc = startJob(conf, context.getJobID().toString(), conf.get("user.name"),
            conf.get(OVERRIDE_CLASSPATH));

    ExecutorService pool = Executors.newCachedThreadPool();
    executeWatcher(pool, conf, context.getJobID(), proc.getInputStream(), statusdir, STDOUT_FNAME);
    executeWatcher(pool, conf, context.getJobID(), proc.getErrorStream(), statusdir, STDERR_FNAME);
    KeepAlive keepAlive = startCounterKeepAlive(pool, context);

    proc.waitFor();
    keepAlive.sendReport = false;
    pool.shutdown();
    if (!pool.awaitTermination(WATCHER_TIMEOUT_SECS, TimeUnit.SECONDS)) {
        pool.shutdownNow();
    }

    updateJobStateToDoneAndWriteExitValue(conf, statusdir, context.getJobID().toString(), proc.exitValue());

    Boolean enablelog = Boolean.parseBoolean(conf.get(ENABLE_LOG));
    if (enablelog && TempletonUtils.isset(statusdir)) {
        LOG.info("templeton: collecting logs for " + context.getJobID().toString() + " to " + statusdir
                + "/logs");
        LogRetriever logRetriever = new LogRetriever(statusdir, jobType, conf);
        logRetriever.run();
    }
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfSegmentReader.java

@Override
protected void execute(Namespace namespace) throws Exception {
    List<DistributedLogManager> managers = new ArrayList<>(flags.numLogs);
    for (int i = 0; i < flags.numLogs; i++) {
        String logName = String.format(flags.logName, i);
        managers.add(namespace.openLog(logName));
    }/*  w  ww . j av  a2 s. c o  m*/
    log.info("Successfully open {} logs", managers.size());

    // Get all the log segments
    final List<Pair<DistributedLogManager, LogSegmentMetadata>> segments = managers.stream()
            .flatMap(manager -> {
                try {
                    return manager.getLogSegments().stream().map(segment -> Pair.of(manager, segment));
                } catch (IOException e) {
                    throw new UncheckedIOException(e);
                }
            }).collect(Collectors.toList());

    final List<Split> splits = segments.stream()
            .flatMap(entry -> getNumSplits(entry.getLeft(), entry.getRight()).stream())
            .collect(Collectors.toList());

    // register shutdown hook to aggregate stats
    Runtime.getRuntime().addShutdownHook(new Thread(() -> {
        isDone.set(true);
        printAggregatedStats(cumulativeRecorder);
    }));

    ExecutorService executor = Executors.newFixedThreadPool(flags.numThreads);
    try {
        for (int i = 0; i < flags.numThreads; i++) {
            final int idx = i;
            final List<Split> splitsThisThread = splits.stream()
                    .filter(split -> splits.indexOf(split) % flags.numThreads == idx)
                    .collect(Collectors.toList());
            executor.submit(() -> {
                try {
                    read(splitsThisThread);
                } catch (Exception e) {
                    log.error("Encountered error at writing records", e);
                }
            });
        }
        log.info("Started {} write threads", flags.numThreads);
        reportStats();
    } finally {
        executor.shutdown();
        if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
            executor.shutdownNow();
        }
        managers.forEach(manager -> manager.asyncClose());
    }
}

From source file:com.comcast.cdn.traffic_control.traffic_router.core.dns.ZoneManager.java

protected static void initZoneCache(final TrafficRouter tr) {
    synchronized (ZoneManager.class) {
        final CacheRegister cacheRegister = tr.getCacheRegister();
        final JSONObject config = cacheRegister.getConfig();

        int poolSize = 1;
        final double scale = config.optDouble("zonemanager.threadpool.scale", 0.75);
        final int cores = Runtime.getRuntime().availableProcessors();

        if (cores > 2) {
            final Double s = Math.floor((double) cores * scale);

            if (s.intValue() > 1) {
                poolSize = s.intValue();
            }// w ww. j  ava2 s  .c o  m
        }

        final ExecutorService initExecutor = Executors.newFixedThreadPool(poolSize);

        final ExecutorService ze = Executors.newFixedThreadPool(poolSize);
        final ScheduledExecutorService me = Executors.newScheduledThreadPool(2); // 2 threads, one for static, one for dynamic, threads to refresh zones
        final int maintenanceInterval = config.optInt("zonemanager.cache.maintenance.interval", 300); // default 5 minutes
        final String dspec = "expireAfterAccess="
                + config.optString("zonemanager.dynamic.response.expiration", "300s"); // default to 5 minutes

        final LoadingCache<ZoneKey, Zone> dzc = createZoneCache(ZoneCacheType.DYNAMIC,
                CacheBuilderSpec.parse(dspec));
        final LoadingCache<ZoneKey, Zone> zc = createZoneCache(ZoneCacheType.STATIC);

        initZoneDirectory();

        try {
            LOGGER.info("Generating zone data");
            generateZones(tr, zc, dzc, initExecutor);
            initExecutor.shutdown();
            initExecutor.awaitTermination(5, TimeUnit.MINUTES);
            LOGGER.info("Zone generation complete");
        } catch (final InterruptedException ex) {
            LOGGER.warn("Initialization of zone data exceeded time limit of 5 minutes; continuing", ex);
        } catch (IOException ex) {
            LOGGER.fatal("Caught fatal exception while generating zone data!", ex);
        }

        me.scheduleWithFixedDelay(getMaintenanceRunnable(dzc, ZoneCacheType.DYNAMIC, maintenanceInterval), 0,
                maintenanceInterval, TimeUnit.SECONDS);
        me.scheduleWithFixedDelay(getMaintenanceRunnable(zc, ZoneCacheType.STATIC, maintenanceInterval), 0,
                maintenanceInterval, TimeUnit.SECONDS);

        final ExecutorService tze = ZoneManager.zoneExecutor;
        final ScheduledExecutorService tme = ZoneManager.zoneMaintenanceExecutor;
        final LoadingCache<ZoneKey, Zone> tzc = ZoneManager.zoneCache;
        final LoadingCache<ZoneKey, Zone> tdzc = ZoneManager.dynamicZoneCache;

        ZoneManager.zoneExecutor = ze;
        ZoneManager.zoneMaintenanceExecutor = me;
        ZoneManager.dynamicZoneCache = dzc;
        ZoneManager.zoneCache = zc;

        if (tze != null) {
            tze.shutdownNow();
        }

        if (tme != null) {
            tme.shutdownNow();
        }

        if (tzc != null) {
            tzc.invalidateAll();
        }

        if (tdzc != null) {
            tdzc.invalidateAll();
        }
    }
}

From source file:org.orekit.bodies.CelestialBodyFactoryTest.java

private void checkMultiThread(final int threads, final int runs) throws OrekitException {

    final AtomicReference<OrekitException> caught = new AtomicReference<OrekitException>();
    ExecutorService executorService = Executors.newFixedThreadPool(threads);

    List<Future<?>> results = new ArrayList<Future<?>>();
    for (int i = 0; i < threads; i++) {
        Future<?> result = executorService.submit(new Runnable() {
            public void run() {
                try {
                    for (int run = 0; run < runs; run++) {
                        CelestialBody mars = CelestialBodyFactory.getBody(CelestialBodyFactory.MARS);
                        Assert.assertNotNull(mars);
                        CelestialBodyFactory.clearCelestialBodyLoaders();
                    }/* w  w w. j  a  v a2s  .  c  o m*/
                } catch (OrekitException oe) {
                    caught.set(oe);
                }
            }
        });
        results.add(result);
    }

    try {
        executorService.shutdown();
        executorService.awaitTermination(5, TimeUnit.SECONDS);
    } catch (InterruptedException ie) {
        Assert.fail(ie.getLocalizedMessage());
    }

    for (Future<?> result : results) {
        Assert.assertTrue("Not all threads finished -> possible deadlock", result.isDone());
    }

    if (caught.get() != null) {
        throw caught.get();
    }
}

From source file:org.keycloak.testsuite.admin.ComponentsTest.java

private void testConcurrency(BiConsumer<ExecutorService, Integer> taskCreator) throws InterruptedException {
    ExecutorService s = Executors.newFixedThreadPool(NUMBER_OF_THREADS, new BasicThreadFactory.Builder()
            .daemon(true).uncaughtExceptionHandler((t, e) -> log.error(e.getMessage(), e)).build());
    this.remainingDeleteSubmissions = new CountDownLatch(NUMBER_OF_TASKS);

    for (int i = 0; i < NUMBER_OF_TASKS; i++) {
        taskCreator.accept(s, i);//from w  ww.  ja v a 2  s.  co  m
    }

    try {
        assertTrue("Did not create all components in time",
                this.remainingDeleteSubmissions.await(30, TimeUnit.SECONDS));
        s.shutdown();
        assertTrue("Did not finish before timeout", s.awaitTermination(30, TimeUnit.SECONDS));
    } finally {
        s.shutdownNow();
    }
}

From source file:org.apache.hadoop.hbase.util.RegionMover.java

/**
 * Loads the specified {@link #hostname} with regions listed in the {@link #filename} RegionMover
 * Object has to be created using {@link #RegionMover(RegionMoverBuilder)}
 * @return true if loading succeeded, false otherwise
 * @throws ExecutionException//from w  ww  .  j  a  v  a 2 s  . c o m
 * @throws InterruptedException if the loader thread was interrupted
 * @throws TimeoutException
 */
public boolean load() throws ExecutionException, InterruptedException, TimeoutException {
    setConf();
    ExecutorService loadPool = Executors.newFixedThreadPool(1);
    Future<Boolean> loadTask = loadPool.submit(new Load(this));
    loadPool.shutdown();
    try {
        if (!loadPool.awaitTermination((long) this.timeout, TimeUnit.SECONDS)) {
            LOG.warn("Timed out before finishing the loading operation. Timeout:" + this.timeout + "sec");
            loadPool.shutdownNow();
        }
    } catch (InterruptedException e) {
        loadPool.shutdownNow();
        Thread.currentThread().interrupt();
    }
    try {
        return loadTask.get(5, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        LOG.warn("Interrupted while loading Regions on " + this.hostname, e);
        throw e;
    } catch (ExecutionException e) {
        LOG.error("Error while loading regions on RegionServer " + this.hostname, e);
        throw e;
    }
}

From source file:org.geowebcache.sqlite.SqliteConnectionManagerTest.java

private void genericMultiThreadsTest(int threadsNumber, int workersNumber, long poolSize, File... files)
        throws Exception {
    SqliteConnectionManager connectionManager = new SqliteConnectionManager(poolSize, 10);
    connectionManagersToClean.add(connectionManager);
    ExecutorService executor = Executors.newFixedThreadPool(threadsNumber);
    Random random = new Random();
    List<Future<Tuple<File, String>>> results = new ArrayList<>();
    for (int i = 0; i < workersNumber; i++) {
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug(String.format("Submitted worker '%d'\\'%d'.", i, workersNumber));
        }// w  w w .  j  a  va2 s. co  m
        executor.submit(() -> {
            File file = files[random.nextInt(files.length)];
            String key = UUID.randomUUID().toString();
            return connectionManager.doWork(file, false, connection -> {
                insertInTestTable(connection, key, "value-" + key);
                closeConnectionQuietly(connection);
                return tuple(file, key);
            });
        });
    }
    executor.shutdown();
    executor.awaitTermination(60, TimeUnit.SECONDS);
    connectionManager.reapAllConnections();
    assertThat(connectionManager.getPool().size(), is(0));
    for (Future<Tuple<File, String>> result : results) {
        File file = result.get().first;
        String key = result.get().second;
        connectionManager.doWork(file, true, connection -> {
            String value = getFromTestTable(connection, key);
            assertThat(value, notNullValue());
            assertThat(value, is("value-" + key));
            closeConnectionQuietly(connection);
        });
    }
}