Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

@Override
@BeforeClass//  w w  w.j a  v a2s  .co  m
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineTarDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeTarDir);
    ensureDirectoryExistsAndIsEmpty(_unpackedSegments);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    extractAvroIfNeeded();

    int avroFileCount = getAvroFileCount();
    Preconditions.checkArgument(3 <= avroFileCount, "Need at least three Avro files for this test");

    setSegmentCount(avroFileCount);
    setOfflineSegmentCount(2);
    setRealtimeSegmentCount(avroFileCount - 1);

    final List<File> avroFiles = getAllAvroFiles();

    _schemaFile = getSchemaFile();
    _schema = Schema.fromFile(_schemaFile);

    // Create Pinot table
    setUpTable("mytable", getTimeColumnName(), getTimeColumnType(), KafkaStarterUtils.DEFAULT_ZK_STR,
            KAFKA_TOPIC, _schemaFile, avroFiles.get(0), getSortedColumn(), invertedIndexColumns);

    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Create segments from Avro data
    ExecutorService executor;
    if (_createSegmentsInParallel) {
        executor = Executors.newCachedThreadPool();
    } else {
        executor = Executors.newSingleThreadExecutor();

    }
    Future<Map<File, File>> offlineAvroToSegmentMapFuture = buildSegmentsFromAvro(offlineAvroFiles, executor, 0,
            _offlineSegmentDir, _offlineTarDir, "mytable", false, _schema);
    Future<Map<File, File>> realtimeAvroToSegmentMapFuture = buildSegmentsFromAvro(realtimeAvroFiles, executor,
            0, _realtimeSegmentDir, _realtimeTarDir, "mytable", false, _schema);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    // Redeem futures
    _offlineAvroToSegmentMap = offlineAvroToSegmentMapFuture.get();
    _realtimeAvroToSegmentMap = realtimeAvroToSegmentMapFuture.get();

    LOGGER.info("Offline avro to segment map: {}", _offlineAvroToSegmentMap);
    LOGGER.info("Realtime avro to segment map: {}", _realtimeAvroToSegmentMap);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", getOfflineSegmentCount());

    // Upload the offline segments
    int i = 0;
    for (String segmentName : _offlineTarDir.list()) {
        i++;
        LOGGER.info("Uploading segment {} : {}", i, segmentName);
        File file = new File(_offlineTarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    _compareStatusFileWriter = getLogWriter();
    _scanRspFileWriter = getScanRspRecordFileWriter();
    _compareStatusFileWriter.write("Start time:" + System.currentTimeMillis() + "\n");
    _compareStatusFileWriter.flush();
    startTimeMs = System.currentTimeMillis();
    LOGGER.info("Setup completed");
}

From source file:com.wavemaker.tools.apidocs.tools.spring.SpringSwaggerParserTest.java

@Test
public void testMultiThread() throws InterruptedException {
    ExecutorService service = Executors.newFixedThreadPool(4);
    List<Class<?>> controllerClasses = new ArrayList<>();
    controllerClasses.add(VacationController.class);
    controllerClasses.add(UserController.class);
    controllerClasses.add(DepartmentController.class);

    for (final Class<?> controllerClass : controllerClasses) {
        service.execute(new Runnable() {
            public void run() {
                Swagger swagger;/*from  ww w  .jav a  2  s .  c  o  m*/
                try {
                    swagger = runForSingleClass(controllerClass);
                } catch (SwaggerParserException e) {
                    throw new RuntimeException("Exception while parsing class:" + controllerClass.getName(), e);
                }
                Assert.assertNotNull(swagger);
                assertEquals(1, swagger.getTags().size());
                assertEquals(controllerClass.getName(), swagger.getTags().get(0).getFullyQualifiedName());
                try {
                    writeToFile(swagger, "class_" + controllerClass.getSimpleName() + ".json");
                } catch (IOException e) {
                    throw new RuntimeException("Error while writing to file", e);
                }
            }
        });
    }

    service.shutdown();
    service.awaitTermination(10, TimeUnit.SECONDS);
}

From source file:com.google.api.ads.adwords.jaxws.extensions.processors.onfile.ReportProcessorOnFile.java

private <R extends Report> void processFiles(String userId, String mccAccountId, Class<R> reportBeanClass,
        Collection<File> localFiles, ReportDefinitionDateRangeType dateRangeType, String dateStart,
        String dateEnd) {//from   w  w w  .  ja v a 2  s . c o m

    final CountDownLatch latch = new CountDownLatch(localFiles.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors);

    // Processing Report Local Files
    LOGGER.info(" Procesing reports...");

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (File file : localFiles) {
        LOGGER.trace(".");
        try {

            ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>();
            MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass);

            LOGGER.debug("Parsing file: " + file.getAbsolutePath());
            RunnableProcessorOnFile<R> runnableProcesor = new RunnableProcessorOnFile<R>(file, csvToBean,
                    mappingStrategy, dateRangeType, dateStart, dateEnd, mccAccountId, persister,
                    reportRowsSetSize);
            runnableProcesor.setLatch(latch);
            executorService.execute(runnableProcesor);

        } catch (Exception e) {
            LOGGER.error("Ignoring file (Error when processing): " + file.getAbsolutePath());
            e.printStackTrace();
        }
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        LOGGER.error(e.getMessage());
        e.printStackTrace();
    }
    executorService.shutdown();
    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000)
            + " seconds ***\n");
}

From source file:cp.server.app.ClientMultiThreadedExecution.java

public static void fetch() throws Exception {
    // Create an HttpClient with the ThreadSafeClientConnManager.
    // This connection manager must be used if more than one thread will
    // be using the HttpClient.
    // PoolingHttpClientConnectionManager cm = new
    // PoolingHttpClientConnectionManager();
    // cm.setMaxTotal(100);

    // CloseableHttpClient httpclient = HttpClients.custom()
    // .setConnectionManager(cm).build();
    ExecutorService pool = Executors.newFixedThreadPool(10);
    ServerDAO dao = new ServerDAO();
    List<Page> pages = null;
    Time ts = new Time(System.currentTimeMillis());
    int interval;

    try {//from w ww  . ja  va 2  s .  com
        // // before 10am, query with the comment yesterday
        // if (Integer.valueOf(ts.toString().substring(0, 2)) > 10)
        // {
        // interval = 1;
        // }
        // else
        // {
        // interval = 2;
        // }
        //
        // pages = dao.queryPagesByDayInterval(
        // ConnectionFactory.getConnection(), interval);
        //
        // System.out.println("load comments from " + pages.size() +
        // "pages.");
        // for (Page page : pages)
        // {
        // PAGESTACK.push(page.getUrl());
        // }
    } catch (Exception ex) {
        ex.printStackTrace();
    }

    try {
        // create an array of URIs to perform GETs on
        String[] urisToGet = { "http://sports.sina.com.cn", "http://news.sina.com.cn", "http://ent.sina.com.cn",
                "http://tech.sina.com.cn", "http://sports.sina.com.cn/o/2013-10-27/04016852444.shtml",
                "http://finance.sina.com.cn/china/20131027/043917125695.shtml",
                "http://sports.sina.com.cn/j/2013-10-27/06336852561.shtml",
                "http://sports.sina.com.cn/j/2013-10-26/21006851844.shtml" };

        for (int i = 0; i < 10000; i++) {
            for (int j = 0; j < urisToGet.length; j++) {
                PAGESTACK.push(urisToGet[j]);
            }
        }

        CountDownLatch cdl = new CountDownLatch(6);

        // create a thread for each URI
        GetThread[] threads = new GetThread[urisToGet.length];

        for (int i = 0; i < 4; i++) {
            // HttpGet httpget = new HttpGet(urisToGet[i]);
            threads[i] = new GetThread(urisToGet[i], i + 1, cdl);
        }

        // start the threads
        for (int j = 0; j < 4; j++) {
            pool.execute(threads[j]);
            // threads[j].start();
        }

        cdl.await();

    } finally {
        // httpclient.close();
        pool.shutdown();
    }
}

From source file:io.fabric8.kubernetes.pipeline.BuildImageStepExecution.java

@Override
protected ImageInspect run() throws Exception {
    return workspace.getChannel().call(new MasterToSlaveCallable<ImageInspect, Exception>() {
        @Override//from  w  ww.  ja  v  a2s  .co  m
        public ImageInspect call() throws Exception {
            ExecutorService executorService = Executors.newFixedThreadPool(2);
            try {
                Future<Boolean> createTarFuture;
                Future<ImageInspect> buildImageFuture;
                try (PipedInputStream pin = new PipedInputStream();
                        PipedOutputStream pout = new PipedOutputStream(pin)) {

                    createTarFuture = executorService.submit(new CreateTarTask(pout));
                    buildImageFuture = executorService.submit(new BuildImageTask(pin));
                }

                //Wait for the two tasks to complete.
                if (!createTarFuture.get(step.getTimeout(), TimeUnit.MILLISECONDS)) {
                    listener.getLogger().println("Failed to create docker image tarball.");
                }

                ImageInspect imageInspect = buildImageFuture.get(step.getTimeout(), TimeUnit.MILLISECONDS);
                if (imageInspect == null) {
                    throw new RuntimeException("Failed to build docker image.");
                } else {
                    return imageInspect;
                }
            } finally {
                executorService.shutdown();
                if (executorService.awaitTermination(30, TimeUnit.SECONDS)) {
                    executorService.shutdownNow();
                }
            }
        }
    });
}

From source file:com.serphacker.serposcope.task.proxy.ProxyChecker.java

@Override
public void run() {

    LOG.info("starting proxy checking task, threads = {}, timeout in MS = {}", nThread, timeoutMS);

    long start = System.currentTimeMillis();

    List<Proxy> proxies = db.proxy.list();
    if (proxies == null || proxies.isEmpty()) {
        LOG.debug("no proxy to check");
        return;//w ww .  j  av a  2  s. co  m
    }

    totalProxies = proxies.size();

    ExecutorService executor = Executors.newFixedThreadPool(nThread);
    db.proxy.updateStatus(Proxy.Status.UNCHECKED,
            proxies.stream().map((t) -> t.getId()).collect(Collectors.toList()));

    for (Proxy proxy : proxies) {
        executor.submit(new Runnable() {
            @Override
            public void run() {
                ScrapClient cli = new ScrapClient();

                cli.setTimeout(timeoutMS);
                ScrapProxy scrapProxy = proxy.toScrapProxy();
                cli.setProxy(scrapProxy);

                LOG.info("checking {}", scrapProxy);

                Proxy.Status proxyStatus = Proxy.Status.ERROR;

                //                    try{Thread.sleep(30000l);}catch(Exception ex){}

                int httpStatus = cli.get(judgeUrl);
                if (httpStatus == 200 && cli.getContentAsString() != null) {
                    Matcher matcher = PATTERN_IP.matcher(cli.getContentAsString());
                    if (matcher.find()) {
                        proxy.setRemoteip(matcher.group(1));
                        proxyStatus = Proxy.Status.OK;
                    }
                }

                proxy.setStatus(proxyStatus);
                proxy.setLastCheck(LocalDateTime.now());
                db.proxy.update(proxy);

                checked.incrementAndGet();
            }
        });
    }

    executor.shutdown();
    try {
        executor.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException ex) {
        executor.shutdownNow();
    }
    LOG.info("proxy checking finished in {}",
            DurationFormatUtils.formatDurationHMS(System.currentTimeMillis() - start));
}

From source file:org.openrdf.http.server.ProtocolTest.java

/**
 * Test for SES-1861//from  w  w w  . j a  v a  2s . com
 * 
 * @throws Exception
 */
@Test
public void testConcurrentNamespaceUpdates() throws Exception {
    int limitCount = 1000;
    int limitPrefix = 50;

    Random prng = new Random();

    // String repositoryLocation =
    // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame",
    // "Test-NativeStore");
    String repositoryLocation = TestServer.REPOSITORY_URL;

    ExecutorService threadPool = Executors.newFixedThreadPool(20);

    for (int count = 0; count < limitCount; count++) {
        final int number = count;
        final int i = prng.nextInt(limitPrefix);
        final String prefix = "prefix" + i;
        final String ns = "http://example.org/namespace" + i;

        final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix);

        Runnable runner = new Runnable() {

            public void run() {
                try {
                    if (number % 2 == 0) {
                        putNamespace(location, ns);
                    } else {
                        deleteNamespace(location);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    fail("Failed in test: " + number);
                }
            }
        };
        threadPool.execute(runner);
    }
    threadPool.shutdown();
    threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS);
    threadPool.shutdownNow();
}

From source file:com.aerospike.benchmarks.Main.java

private void doRWTest(AerospikeClient client) throws Exception {
    ExecutorService es = Executors.newFixedThreadPool(this.nThreads);
    RWTask[] tasks = new RWTask[this.nThreads];

    for (int i = 0; i < this.nThreads; i++) {
        RWTask rt;// w w w. j  a va  2 s  .  co m
        if (args.validate) {
            int tstart = this.startKey + ((int) (this.nKeys * (((float) i) / this.nThreads)));
            int tkeys = (int) (this.nKeys * (((float) (i + 1)) / this.nThreads))
                    - (int) (this.nKeys * (((float) i) / this.nThreads));
            rt = new RWTaskSync(client, args, counters, tstart, tkeys);
        } else {
            rt = new RWTaskSync(client, args, counters, this.startKey, this.nKeys);
        }
        tasks[i] = rt;
        es.execute(rt);
    }
    collectRWStats(tasks, null);
    es.shutdown();
}

From source file:io.hops.security.TestUsersGroups.java

public void testConcurrentSetSameOwner(int cacheTime, int cacheSize) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SECS, Integer.toString(cacheTime));
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SIZE, Integer.toString(cacheSize));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();//from w  w w  .  j a va2  s  . c  om

    DistributedFileSystem dfs = cluster.getFileSystem();
    Path base = new Path("/base");
    dfs.mkdirs(base);

    final String userName = "user";
    final String groupName = "group";
    final int CONCURRENT_USERS = 100;
    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_USERS);
    List<Callable<Boolean>> callables = new ArrayList<>();

    for (int i = 0; i < CONCURRENT_USERS; i++) {
        Path file = new Path(base, "file" + i);
        dfs.create(file).close();
        callables.add(new SetOwner(dfs, file, userName, groupName));
    }

    List<Future<Boolean>> futures = executorService.invokeAll(callables);
    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.SECONDS);

    for (Future<Boolean> f : futures) {
        assertTrue(f.get());
    }
    cluster.shutdown();
}

From source file:com.aerospike.benchmarks.Main.java

private void doAsyncRWTest(AsyncClient client) throws Exception {
    ExecutorService es = Executors.newFixedThreadPool(this.nThreads);
    RWTask[] tasks = new RWTask[this.nThreads];

    for (int i = 0; i < this.nThreads; i++) {
        RWTask rt;/*ww w . jav a2 s  .c om*/
        if (args.validate) {
            int tstart = this.startKey + ((int) (this.nKeys * (((float) i) / this.nThreads)));
            int tkeys = (int) (this.nKeys * (((float) (i + 1)) / this.nThreads))
                    - (int) (this.nKeys * (((float) i) / this.nThreads));
            rt = new RWTaskAsync(client, args, counters, tstart, tkeys);
        } else {
            rt = new RWTaskAsync(client, args, counters, this.startKey, this.nKeys);
        }
        tasks[i] = rt;
        es.execute(rt);
    }
    collectRWStats(tasks, client);
    es.shutdown();
}