Example usage for java.util.concurrent ThreadPoolExecutor getTaskCount

List of usage examples for java.util.concurrent ThreadPoolExecutor getTaskCount

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor getTaskCount.

Prototype

public long getTaskCount() 

Source Link

Document

Returns the approximate total number of tasks that have ever been scheduled for execution.

Usage

From source file:eu.cassandra.sim.utilities.Utils.java

public static void printExecutorSummary(ThreadPoolExecutor executor) {
    System.out.println(String.format(
            "[monitor] [%d/%d] Active: %d, Completed: %d, Task: %d, isShutdown: %s, isTerminated: %s",
            executor.getPoolSize(), executor.getCorePoolSize(), executor.getActiveCount(),
            executor.getCompletedTaskCount(), executor.getTaskCount(), executor.isShutdown(),
            executor.isTerminated()));//  w w w  .  j a v  a2 s.  c  o  m
}

From source file:org.jmangos.commons.threadpool.CommonThreadPoolManager.java

/**
 * @see org.jmangos.commons.threadpool.ThreadPoolManager#fillPoolStats(org.jmangos.commons.threadpool.model.ThreadPoolType)
 *///from   w  w  w  .  j a va 2 s  .co m
@Override
public PoolStats fillPoolStats(final ThreadPoolType poolType) {

    ThreadPoolExecutor executor = null;
    switch (poolType) {
    case INSTANT:
        executor = this.instantPool;
        break;
    case SCHEDULED:
    default:
        executor = this.scheduledPool;
        break;
    }
    final PoolStats stats = new PoolStats(poolType);
    stats.setActiveCount(executor.getActiveCount());
    stats.setCompletedTaskCount(executor.getCompletedTaskCount());
    stats.setCorePoolSize(executor.getCorePoolSize());
    stats.setLargestPoolSize(executor.getLargestPoolSize());
    stats.setMaximumPoolSize(executor.getMaximumPoolSize());
    stats.setPoolSize(executor.getPoolSize());
    stats.setQueueSize(executor.getQueue().size());
    stats.setTaskCount(executor.getTaskCount());
    return stats;
}

From source file:org.esigate.test.cases.PerformanceTestCase.java

/**
 * Execute la tache avec plusieurs Threads
 * // w w  w .  java  2  s  . co m
 * @param request
 * @return
 * @throws Exception
 */
private long execute(HttpGetRequestRunnable request, int numberOfRequests, int threads) throws Exception {
    connectionManager = new PoolingHttpClientConnectionManager();
    httpClient = HttpClientBuilder.create().setConnectionManager(connectionManager).setMaxConnTotal(threads)
            .setMaxConnPerRoute(threads).setDefaultRequestConfig(
                    RequestConfig.custom().setConnectTimeout(10000).setSocketTimeout(10000).build())
            .build();
    // Warm up
    request.run();

    BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>();
    ThreadPoolExecutor threadPool = new ThreadPoolExecutor(threads, threads, 5, TimeUnit.SECONDS, queue);

    long start = System.currentTimeMillis();
    threadPool.prestartAllCoreThreads();
    for (int i = 0; i < numberOfRequests; i++) {
        threadPool.submit(request);
    }
    threadPool.shutdown();

    // wait maximum 20 s
    threadPool.awaitTermination(200, TimeUnit.SECONDS);
    connectionManager.shutdown();

    if (request.exception != null) {
        throw new AssertionFailedError(
                "Exception for request " + request.url + " after " + request.count + " requests",
                request.exception);
    }
    if (threadPool.getCompletedTaskCount() < threadPool.getTaskCount()) {
        // All task were not executed
        String msg = request.url + " : Only " + threadPool.getCompletedTaskCount() + "/"
                + threadPool.getTaskCount() + " have been renderered " + " => Maybe a performance issue";
        threadPool.shutdownNow();
        fail(msg);
    }

    long end = System.currentTimeMillis();
    long execTime = end - start;
    LOG.debug("Executed request " + request.url + " " + numberOfRequests + " times with " + threads
            + " threads in " + execTime + "ms");
    return execTime;

}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

/**
 * Constructs Safe executor.//  w  w  w . jav a 2  s . co  m
 *
 * @param numThreads
 *            - number of threads
 * @param baseName
 *            - base name of executor threads
 * @param threadFactory
 *            - for constructing threads
 * @param statsLogger
 *            - for reporting executor stats
 * @param traceTaskExecution
 *            - should we stat task execution
 * @param preserveMdcForTaskExecution
 *            - should we preserve MDC for task execution
 * @param warnTimeMicroSec
 *            - log long task exec warning after this interval
 * @param maxTasksInQueue
 *            - maximum items allowed in a thread queue. -1 for no limit
 */
protected OrderedExecutor(String baseName, int numThreads, ThreadFactory threadFactory, StatsLogger statsLogger,
        boolean traceTaskExecution, boolean preserveMdcForTaskExecution, long warnTimeMicroSec,
        int maxTasksInQueue, boolean enableBusyWait) {
    checkArgument(numThreads > 0);
    checkArgument(!StringUtils.isBlank(baseName));

    this.maxTasksInQueue = maxTasksInQueue;
    this.warnTimeMicroSec = warnTimeMicroSec;
    this.enableBusyWait = enableBusyWait;
    name = baseName;
    threads = new ExecutorService[numThreads];
    threadIds = new long[numThreads];
    for (int i = 0; i < numThreads; i++) {
        ThreadPoolExecutor thread = createSingleThreadExecutor(new ThreadFactoryBuilder()
                .setNameFormat(name + "-" + getClass().getSimpleName() + "-" + i + "-%d")
                .setThreadFactory(threadFactory).build());

        threads[i] = addExecutorDecorators(getBoundedExecutor(thread));

        final int idx = i;
        try {
            threads[idx].submit(() -> {
                threadIds[idx] = Thread.currentThread().getId();

                if (enableBusyWait) {
                    // Try to acquire 1 CPU core to the executor thread. If it fails we
                    // are just logging the error and continuing, falling back to
                    // non-isolated CPUs.
                    try {
                        CpuAffinity.acquireCore();
                    } catch (Throwable t) {
                        log.warn("Failed to acquire CPU core for thread {}", Thread.currentThread().getName(),
                                t.getMessage(), t);
                    }
                }
            }).get();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException("Couldn't start thread " + i, e);
        } catch (ExecutionException e) {
            throw new RuntimeException("Couldn't start thread " + i, e);
        }

        // Register gauges
        statsLogger.registerGauge(String.format("%s-queue-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getQueue().size();
            }
        });
        statsLogger.registerGauge(String.format("%s-completed-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getCompletedTaskCount();
            }
        });
        statsLogger.registerGauge(String.format("%s-total-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getTaskCount();
            }
        });
    }

    // Stats
    this.taskExecutionStats = statsLogger.scope(name).getOpStatsLogger("task_execution");
    this.taskPendingStats = statsLogger.scope(name).getOpStatsLogger("task_queued");
    this.traceTaskExecution = traceTaskExecution;
    this.preserveMdcForTaskExecution = preserveMdcForTaskExecution;
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * This function is to scan the root path of the file system to get either the
 * mapping between the region name and its best locality region server or the
 * degree of locality of each region on each of the servers having at least
 * one block of that region. The output map parameters are both optional.
 *
 * @param conf//from ww w .  j av a 2s .co  m
 *          the configuration to use
 * @param desiredTable
 *          the table you wish to scan locality for
 * @param threadPoolSize
 *          the thread pool size to use
 * @param regionToBestLocalityRSMapping
 *          the map into which to put the best locality mapping or null
 * @param regionDegreeLocalityMapping
 *          the map into which to put the locality degree mapping or null,
 *          must be a thread-safe implementation
 * @throws IOException
 *           in case of file system errors or interrupts
 */
private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable,
        int threadPoolSize, Map<String, String> regionToBestLocalityRSMapping,
        Map<String, Map<String, Float>> regionDegreeLocalityMapping) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path rootPath = FSUtils.getRootDir(conf);
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    Path queryPath;
    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
    if (null == desiredTable) {
        queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
    } else {
        queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
    }

    // reject all paths that are not appropriate
    PathFilter pathFilter = new PathFilter() {
        @Override
        public boolean accept(Path path) {
            // this is the region name; it may get some noise data
            if (null == path) {
                return false;
            }

            // no parent?
            Path parent = path.getParent();
            if (null == parent) {
                return false;
            }

            String regionName = path.getName();
            if (null == regionName) {
                return false;
            }

            if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
                return false;
            }
            return true;
        }
    };

    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);

    if (null == statusList) {
        return;
    } else {
        LOG.debug("Query Path: " + queryPath + " ; # list of files: " + statusList.length);
    }

    // lower the number of threads in case we have very few expected regions
    threadPoolSize = Math.min(threadPoolSize, statusList.length);

    // run in multiple threads
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(statusList.length));
    try {
        // ignore all file status items that are not of interest
        for (FileStatus regionStatus : statusList) {
            if (null == regionStatus) {
                continue;
            }

            if (!regionStatus.isDirectory()) {
                continue;
            }

            Path regionPath = regionStatus.getPath();
            if (null == regionPath) {
                continue;
            }

            tpe.execute(new FSRegionScanner(fs, regionPath, regionToBestLocalityRSMapping,
                    regionDegreeLocalityMapping));
        }
    } finally {
        tpe.shutdown();
        int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 60 * 1000);
        try {
            // here we wait until TPE terminates, which is either naturally or by
            // exceptions in the execution of the threads
            while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) {
                // printing out rough estimate, so as to not introduce
                // AtomicInteger
                LOG.info("Locality checking is underway: { Scanned Regions : " + tpe.getCompletedTaskCount()
                        + "/" + tpe.getTaskCount() + " }");
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }

    long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime;
    String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";

    LOG.info(overheadMsg);
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService.java

synchronized long countPendingDeletions() {
    long count = 0;
    for (ThreadPoolExecutor exec : executors.values()) {
        count += exec.getTaskCount() - exec.getCompletedTaskCount();
    }//w  w w  .  j a va 2s . co  m
    return count;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.SegmentDataStoreBlobGCIT.java

@Test
public void consistencyCheckInit() throws Exception {
    DataStoreState state = setUp();/*from  w ww  .j av a  2  s .  c o m*/
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    MarkSweepGarbageCollector gcObj = init(86400, executor);
    long candidates = gcObj.checkConsistency();
    assertEquals(1, executor.getTaskCount());
    assertEquals(0, candidates);
}

From source file:org.apache.jackrabbit.oak.plugins.segment.SegmentDataStoreBlobGCIT.java

@Test
public void consistencyCheckWithGc() throws Exception {
    DataStoreState state = setUp();/*from   ww w.  j  a  v a2 s  . c  o m*/
    Set<String> existingAfterGC = gcInternal(0);
    assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty());

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    MarkSweepGarbageCollector gcObj = init(86400, executor);
    long candidates = gcObj.checkConsistency();
    assertEquals(1, executor.getTaskCount());
    assertEquals(0, candidates);
}

From source file:org.apache.jackrabbit.oak.plugins.segment.SegmentDataStoreBlobGCIT.java

@Test
public void consistencyCheckWithRenegadeDelete() throws Exception {
    DataStoreState state = setUp();/*from   w  w  w  . j a v  a  2  s.  co  m*/

    // Simulate faulty state by deleting some blobs directly
    Random rand = new Random(87);
    List<String> existing = Lists.newArrayList(state.blobsPresent);

    long count = blobStore.countDeleteChunks(ImmutableList.of(existing.get(rand.nextInt(existing.size()))), 0);

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    MarkSweepGarbageCollector gcObj = init(86400, executor);
    long candidates = gcObj.checkConsistency();
    assertEquals(1, executor.getTaskCount());
    assertEquals(count, candidates);
}

From source file:org.apache.jackrabbit.oak.plugins.segment.SegmentDataStoreBlobGCIT.java

@Test
public void consistencyCheckInlined() throws Exception {
    blobStore = new DataStoreBlobStore(DataStoreUtils.createFDS(new File(getWorkDir(), "datastore"), 16516));
    DataStoreState state = setUp();/* w w w.  ja va2 s. c  o  m*/
    addInlined();
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
    MarkSweepGarbageCollector gcObj = init(86400, executor);
    long candidates = gcObj.checkConsistency();
    assertEquals(1, executor.getTaskCount());
    assertEquals(0, candidates);
}