Example usage for java.util.concurrent Executors newCachedThreadPool

List of usage examples for java.util.concurrent Executors newCachedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newCachedThreadPool.

Prototype

public static ExecutorService newCachedThreadPool() 

Source Link

Document

Creates a thread pool that creates new threads as needed, but will reuse previously constructed threads when they are available.

Usage

From source file:com.shmsoft.dmass.ec2.EC2Agent.java

private void setInitializedState(Cluster cluster) {
    ExecutorService es = Executors.newCachedThreadPool();
    for (Server server : cluster) {
        LoginChecker checker = new LoginChecker();
        checker.setServer(server);/*from  w w  w.j av  a2  s  .  c om*/
        server.setCheckerThread(checker);
        es.execute(checker);

    }
    es.shutdown();
    boolean finished = false;
    try {
        finished = es.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        e.printStackTrace(System.out);
    }
    // TODO what to do if 'finished" is false       
}

From source file:com.linkedin.pinot.integration.tests.MetadataAndDictionaryAggregationPlanClusterIntegrationTest.java

private void loadDataIntoH2(List<File> avroFiles) throws Exception {
    ExecutorService executor = Executors.newCachedThreadPool();
    setUpH2Connection(avroFiles, executor);
    executor.shutdown();//from   w  w  w.  ja  v  a  2  s  . c  o  m
    executor.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:com.safi.asterisk.handler.connection.AbstractConnectionManager.java

public AbstractConnectionManager() {

    loopbackCallLock = new Hashtable<String, Object>();
    channelKeepaliveMap = new Hashtable<String, KeepAlive>();
    listener = new SafiManagerEventListener();
    keepAliveExecutor = Executors.newCachedThreadPool();
    numAgiRequests = numCustomInitiations = 0;
}

From source file:com.ifesdjeen.cascading.cassandra.hadoop.ColumnFamilyInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();

    validateConfiguration(conf);/*from  w  w  w  . ja  v a 2 s . c  o m*/

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());
    partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration());
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
        Range<Token> jobRange = null;
        if (jobKeyRange != null && jobKeyRange.start_token != null) {
            assert partitioner
                    .preservesOrder() : "ConfigHelper.setInputKeyRange(..) can only be used with a order preserving paritioner";
            assert jobKeyRange.start_key == null : "only start_token supported";
            assert jobKeyRange.end_key == null : "only end_token supported";
            jobRange = new Range<Token>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token),
                    partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
        }

        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits
                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(
                        partitioner.getTokenFactory().fromString(range.start_token),
                        partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }

        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}

From source file:edu.berkeley.sparrow.examples.BackendBenchmarkProfiler.java

/**
 * Run an experiment which launches tasks at {@code arrivalRate} for {@code durationMs}
 * seconds and waits for all tasks to finish. Return a {@link DescriptiveStatistics}
 * object which contains stats about the distribution of task finish times. Tasks
 * are executed in a thread pool which contains at least {@code corePoolSize} threads
 * and grows up to {@code maxPoolSize} threads (growing whenever a new task arrives
 * and all existing threads are used). /*w  w w.  j a  v  a2 s.c o m*/
 * 
 * Setting {@code maxPoolSize} to a very large number enacts time sharing, while
 * setting it equal to {@code corePoolSize} creates a fixed size task pool.
 * 
 * The derivative of task finishes is tracked by bucketing tasks at the granularity
 * {@code bucketSize}. If it is detected that task finishes are increasing in an 
 * unbounded fashion (i.e. infinite queuing is occuring) a {@link RuntimeException} 
 * is thrown.
 */
public static void runExperiment(double arrivalRate, int corePoolSize, int maxPoolSize, long bucketSize,
        long durationMs, DescriptiveStatistics runTimes, DescriptiveStatistics waitTimes) {
    long startTime = System.currentTimeMillis();
    long keepAliveTime = 10;
    Random r = new Random();
    BlockingQueue<Runnable> runQueue = new LinkedBlockingQueue<Runnable>();
    ExecutorService threadPool = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
            TimeUnit.MILLISECONDS, runQueue);
    if (maxPoolSize == Integer.MAX_VALUE) {
        threadPool = Executors.newCachedThreadPool();
    }

    // run times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedRunTimes = new HashMap<Long, List<Long>>();
    // wait times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedWaitTimes = new HashMap<Long, List<Long>>();

    /*
     * This is a little tricky. 
     * 
     * We want to generate inter-arrival delays according to the arrival rate specified.
     * The simplest option would be to generate an arrival delay and then sleep() for it
     * before launching each task. This has in issue, however: sleep() might wait 
     * several ms longer than we ask it to. When task arrival rates get really fast, 
     * i.e. one task every 10 ms, sleeping an additional few ms will mean we launch 
     * tasks at a much lower rate than requested.
     * 
     * Instead, we keep track of task launches in a way that does not depend on how long
     * sleep() actually takes. We still might have tasks launch slightly after their
     * scheduled launch time, but we will not systematically "fall behind" due to
     * compounding time lost during sleep()'s;
     */
    long currTime = startTime;
    while (true) {
        long delay = (long) (generateInterarrivalDelay(r, arrivalRate) * 1000);

        // When should the next task launch, based on when the last task was scheduled
        // to launch.
        long nextTime = currTime + delay;

        // Diff gives how long we should wait for the next scheduled task. The difference 
        // may be negative if our last sleep() lasted too long relative to the inter-arrival
        // delay based on the last scheduled launch, so we round up to 0 in that case. 
        long diff = Math.max(0, nextTime - System.currentTimeMillis());
        currTime = nextTime;
        if (diff > 0) {
            try {
                Thread.sleep(diff);
            } catch (InterruptedException e) {
                System.err.println("Unexpected interruption!");
                System.exit(1);
            }
        }
        threadPool.submit((new BenchmarkRunnable(bucketedRunTimes, bucketedWaitTimes, bucketSize)));
        if (System.currentTimeMillis() > startTime + durationMs) {
            break;
        }
    }
    threadPool.shutdown();
    try {
        threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        System.err.println("Unexpected interruption!");
        System.exit(1);
    }
    List<Long> times = new ArrayList<Long>(bucketedRunTimes.keySet());
    Collections.sort(times);
    HashMap<Long, DescriptiveStatistics> bucketStats = new HashMap<Long, DescriptiveStatistics>();

    // Remove first and last buckets since they will not be completely full to do
    // discretization. 
    times.remove(0);
    times.remove(times.size() - 1);

    for (Long time : times) {
        DescriptiveStatistics stats = new DescriptiveStatistics();
        List<Long> list = bucketedRunTimes.get(time);
        for (Long l : list) {
            stats.addValue(l);
            runTimes.addValue(l);
        }
        bucketStats.put(time, stats);

        List<Long> waitList = bucketedWaitTimes.get(time);
        for (Long l : waitList) {
            waitTimes.addValue(l);
        }
    }
    int size = bucketStats.size();
    if (size >= 2) {
        DescriptiveStatistics first = bucketStats.get(times.get(0));
        DescriptiveStatistics last = bucketStats.get(times.get(times.size() - 1));
        double increase = last.getPercentile(50) / first.getPercentile(50);
        // A simple heuristic, if the median runtime went up by five from the first to 
        // last complete bucket, we assume we are seeing unbounded growth
        if (increase > 5.0) {
            throw new RuntimeException(
                    "Queue not in steady state: " + last.getMean() + " vs " + first.getMean());
        }
    }
}

From source file:byps.http.HWireClient.java

/**
 * Initializes a new client-side HTTP communication.
 * /*ww  w  .  j av  a2  s.  co  m*/
 * @param url
 *          URL to server.
 * @param flags
 *          A combination of BWire.FLAG_* values.
 * @param timeoutSeconds
 *          Read timeout in seconds. A timeout of zero is interpreted as an
 *          infinite timeout.
 * @param threadPool
 *          Optional: A thread pool. If null, a thread pool is internally
 *          created.
 * @see BWire#FLAG_GZIP
 */
public HWireClient(String url, int flags, int timeoutSeconds, Executor threadPool) {
    super(flags);

    if (log.isDebugEnabled())
        log.debug("HWireClient(" + url + ", flags=" + flags + ", timeoutSeconds=" + timeoutSeconds
                + ", threadPool=" + threadPool);

    if (url == null || url.length() == 0)
        throw new IllegalStateException("Missing URL");

    this.surl = url;
    this.timeoutSecondsClient = timeoutSeconds;

    this.isMyThreadPool = threadPool == null;
    if (threadPool == null) {
        threadPool = Executors.newCachedThreadPool();
    }
    this.threadPool = threadPool;

    this.testAdapter = new HTestAdapter(this);

    // Create HTTP client object by a HHttpClientFactory. ----------------

    // The class name of the HHttpClientFactory is taken from the
    // System.properties.
    String factName = System.getProperty(SYSTEM_PROPERTY_HTTP_CLIENT_FACTORY);
    if (log.isDebugEnabled())
        log.debug("factory from system.properties[" + SYSTEM_PROPERTY_HTTP_CLIENT_FACTORY + "]=" + factName);
    if (factName == null || factName.length() == 0)
        factName = JcnnClientFactory.class.getName();

    // Create a client factory object
    HHttpClientFactory fact = null;
    try {
        if (log.isDebugEnabled())
            log.debug("instantiate factName=" + factName);
        Class<?> clazz = Class.forName(factName);
        fact = (HHttpClientFactory) clazz.newInstance();
        if (log.isDebugEnabled())
            log.debug("fact=" + fact);
    } catch (Exception e) {
        if (log.isDebugEnabled())
            log.debug("failed to create factory, factName=" + factName, e);
        throw new IllegalStateException(e);
    }

    if (log.isDebugEnabled())
        log.debug("createHttpClient...");
    this.httpClient = fact.createHttpClient(url);
    if (log.isDebugEnabled())
        log.debug("createHttpClient OK, " + this.httpClient);

    if (log.isDebugEnabled())
        log.debug(")HWireClient");
}

From source file:cc.osint.graphd.graph.Graph.java

public Graph(String graphName) throws Exception {
    this.graphName = graphName;
    gr = new ListenableDirectedWeightedGraph<JSONVertex, JSONEdge>(JSONEdge.class);
    connectivityInspector = new ConnectivityInspector<JSONVertex, JSONEdge>(gr);
    vertices = new ConcurrentHashMap<String, JSONVertex>();

    // event handlers
    gr.addVertexSetListener(this);
    gr.addGraphListener(this);
    gr.addVertexSetListener(connectivityInspector);
    gr.addGraphListener(connectivityInspector);

    // simulation components
    executorService = Executors.newCachedThreadPool();
    fiberFactory = new PoolFiberFactory(executorService);
    vertexProcesses = new ProcessGroup<JSONVertex, JSONObject>(this, "vertex_processors", executorService,
            fiberFactory);// w  w w  .  ja  va  2  s  .  co  m
    edgeProcesses = new ProcessGroup<JSONEdge, JSONObject>(this, "edge_processors", executorService,
            fiberFactory);
    graphProcesses = new ProcessGroup<EventObject, JSONObject>(this, "graph_processors", executorService,
            fiberFactory);
    endpointChannelProcesses = new ProcessGroup<String, JSONObject>(this, "endpoint_channel_processors",
            executorService, fiberFactory);

    // graph index
    luceneDirectory = new RAMDirectory();
    indexWriter = new IndexWriter(luceneDirectory, analyzer, IndexWriter.MaxFieldLength.LIMITED);
    indexReader = indexWriter.getReader();
    searcher = new IndexSearcher(indexReader);

    // process registry
    simLuceneDirectory = new RAMDirectory();
    simIndexWriter = new IndexWriter(simLuceneDirectory, analyzer, IndexWriter.MaxFieldLength.LIMITED);
    simIndexReader = simIndexWriter.getReader();
    simSearcher = new IndexSearcher(simIndexReader);

}

From source file:de.rwth.dbis.acis.activitytracker.service.ActivityTrackerService.java

@GET
@Path("/")
@Produces(MediaType.APPLICATION_JSON)//from   w ww .j  av  a 2s.  co m
@ApiOperation(value = "This method returns a list of activities", notes = "Default the latest ten activities will be returned")
@ApiResponses(value = {
        @ApiResponse(code = HttpURLConnection.HTTP_OK, message = "Returns a list of activities"),
        @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, message = "Not found"),
        @ApiResponse(code = HttpURLConnection.HTTP_INTERNAL_ERROR, message = "Internal server problems") })
//TODO add filter
public HttpResponse getActivities(
        @ApiParam(value = "Before cursor pagination", required = false) @DefaultValue("-1") @QueryParam("before") int before,
        @ApiParam(value = "After cursor pagination", required = false) @DefaultValue("-1") @QueryParam("after") int after,
        @ApiParam(value = "Limit of elements of components", required = false) @DefaultValue("10") @QueryParam("limit") int limit,
        @ApiParam(value = "User authorization token", required = false) @DefaultValue("") @HeaderParam("authorization") String authorizationToken) {

    DALFacade dalFacade = null;
    try {
        if (before != -1 && after != -1) {
            ExceptionHandler.getInstance().throwException(ExceptionLocation.ACTIVITIESERVICE,
                    ErrorCode.WRONG_PARAMETER, "both: before and after parameter not possible");
        }
        int cursor = before != -1 ? before : after;
        Pageable.SortDirection sortDirection = after != -1 ? Pageable.SortDirection.ASC
                : Pageable.SortDirection.DESC;

        PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
        cm.setMaxTotal(20);
        CloseableHttpClient httpclient = HttpClients.custom().setConnectionManager(cm).build();

        dalFacade = getDBConnection();
        Gson gson = new Gson();
        ExecutorService executor = Executors.newCachedThreadPool();

        int getObjectCount = 0;
        PaginationResult<Activity> activities;
        List<ActivityEx> activitiesEx = new ArrayList<>();
        Pageable pageInfo = new PageInfo(cursor, limit, "", sortDirection);
        while (activitiesEx.size() < limit && getObjectCount < 5) {
            pageInfo = new PageInfo(cursor, limit, "", sortDirection);
            activities = dalFacade.findActivities(pageInfo);
            getObjectCount++;
            cursor = sortDirection == Pageable.SortDirection.ASC ? cursor + limit : cursor - limit;
            if (cursor < 0) {
                cursor = 0;
            }
            activitiesEx.addAll(
                    getObjectBodies(httpclient, executor, authorizationToken, activities.getElements()));
        }

        executor.shutdown();
        if (activitiesEx.size() > limit) {
            activitiesEx = activitiesEx.subList(0, limit);
        }
        PaginationResult<ActivityEx> activitiesExResult = new PaginationResult<>(pageInfo, activitiesEx);

        HttpResponse response = new HttpResponse(gson.toJson(activitiesExResult.getElements()),
                HttpURLConnection.HTTP_OK);
        Map<String, String> parameter = new HashMap<>();
        parameter.put("limit", String.valueOf(limit));
        response = this.addPaginationToHtppResponse(activitiesExResult, "", parameter, response);

        return response;

    } catch (ActivityTrackerException atException) {
        return new HttpResponse(ExceptionHandler.getInstance().toJSON(atException),
                HttpURLConnection.HTTP_INTERNAL_ERROR);
    } catch (Exception ex) {
        ActivityTrackerException atException = ExceptionHandler.getInstance().convert(ex,
                ExceptionLocation.ACTIVITIESERVICE, ErrorCode.UNKNOWN, ex.getMessage());
        return new HttpResponse(ExceptionHandler.getInstance().toJSON(atException),
                HttpURLConnection.HTTP_INTERNAL_ERROR);
    } finally {
        closeDBConnection(dalFacade);
    }
}

From source file:com.changhong.bigdata.flume.source.dirregex.DirRegexSource.java

/**
  * @Title: configure/*from   w  ww. j a  v  a2 s  .c  o  m*/
  * @Description: ?flume?
  * @author YuYue
  * @param context   
  * @throws
*/
public void configure(Context context) {
    logger.info("----------------------DirRegexSource configure...");
    try {
        // monitorDir?monitorFileRegex
        String strMonitorDir = context.getString("monitorDir");
        Preconditions.checkArgument(StringUtils.isNotBlank(strMonitorDir), "Missing Param:'monitorDir'");
        String monitorFileRegex = context.getString("monitorFileRegex", DEFAULT_MONITORFILEREGEX);
        Preconditions.checkArgument(StringUtils.isNotBlank(monitorFileRegex),
                "Missing Param:'monitorFileRegex'");
        monitorFilePattern = Pattern.compile(monitorFileRegex);
        // checkFile
        String strCheckFile = context.getString("checkFile");
        Preconditions.checkArgument(StringUtils.isNotBlank(strCheckFile), "Missing Param:'checkFile'");

        // contentRegex
        String contentRegex = context.getString("contentRegex");
        Preconditions.checkArgument(StringUtils.isNotBlank(contentRegex), "Missing Param:'contentRegex'");
        contentPattern = Pattern.compile(contentRegex);
        // ip
        ipstr = context.getString("ip");
        Preconditions.checkArgument(StringUtils.isNotBlank(ipstr), "Missing Param:'contentRegex'");

        // delayTime?charsetName?batchSize
        delayTime = context.getLong("delayTime", DEFAULT_DELAYTIME);
        Preconditions.checkArgument(delayTime > 0, "'delayTime' must be greater than 0");
        charsetName = context.getString("charsetName", DEFAULT_CHARSETNAME);
        Preconditions.checkArgument(StringUtils.isNotBlank(charsetName), "Missing Param:'charsetName'");
        batchSize = context.getInteger("batchSize", DEFAULT_BATCHSIZE);
        Preconditions.checkArgument(batchSize > 0, "'batchSize' must be greater than 0");

        monitorDir = new File(strMonitorDir);
        checkFile = new File(strCheckFile);

        properties = new Properties();
        if (!checkFile.exists()) {
            checkFile.createNewFile();
        } else {
            FileInputStream checkfile001 = new FileInputStream(checkFile);
            properties.load(checkfile001);
            checkfile001.close();
        }

        executorService = Executors.newCachedThreadPool();
        scheduledExecutorService = Executors.newSingleThreadScheduledExecutor();
        sourceCounter = new SourceCounter("DirRegexSource");
    } catch (Exception e) {
        // TODO Auto-generated catch block
        throw new IllegalArgumentException(e);
    }
    logger.info("----------------------DirRegexSource configured!");
}

From source file:com.amazonaws.services.kinesis.multilang.MultiLangDaemonConfig.java

private static ExecutorService buildExecutorService(Properties properties) {
    int maxActiveThreads = getMaxActiveThreads(properties);
    LOG.debug(String.format("Value for %s property is %d", PROP_MAX_ACTIVE_THREADS, maxActiveThreads));
    if (maxActiveThreads <= 0) {
        LOG.info("Using a cached thread pool.");
        return Executors.newCachedThreadPool();
    } else {/* w ww  .  j  av  a  2 s  . c o  m*/
        LOG.info(String.format("Using a fixed thread pool with %d max active threads.", maxActiveThreads));
        return Executors.newFixedThreadPool(maxActiveThreads);
    }
}