Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:br.prof.salesfilho.oci.view.console.Main.java

public void extractFeatures() {

    if (this.propertySource.containsProperty("inputDir") && this.propertySource.containsProperty("outputDir")) {

        //Create new thread pool to each image file
        ExecutorService executor = Executors.newFixedThreadPool(2);

        BodyWomanFeatureExtractorExecutor e1 = new BodyWomanFeatureExtractorExecutor(true);
        e1.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e1.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e1.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e1.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e1);/*from   w  ww .j  ava 2  s.  c  o m*/

        BodyWomanFeatureExtractorExecutor e2 = new BodyWomanFeatureExtractorExecutor(false);

        e2.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e2.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e2.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e2.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e2);

        //Wait finish
        executor.shutdown();
        while (!executor.isTerminated()) {
        }
        File databaseFile = new File(e1.getDatabaseName());
        bodyWomanDescriptorService.openDatabase(databaseFile);
        bodyWomanDescriptorService.add(e1.getBodyWomanDescriptor());
        bodyWomanDescriptorService.add(e2.getBodyWomanDescriptor());
        bodyWomanDescriptorService.save(databaseFile);

    } else {
        usage();
    }
}

From source file:com.netflix.genie.client.JobClientIntegrationTests.java

/**
 * Method to test submitting/killing a job.
 *
 * @throws Exception If there is any problem.
 *//*from   w w  w . j a va  2s  .c o m*/
@Test
public void submitAndKillJob() throws Exception {

    createClusterAndCommandForTest();

    final String jobId = UUID.randomUUID().toString();

    final List<ClusterCriteria> clusterCriteriaList = Lists
            .newArrayList(new ClusterCriteria(Sets.newHashSet("laptop")));

    final Set<String> commandCriteria = Sets.newHashSet("bash");

    final String depFile1 = this.resourceLoader.getResource("/dep1").getFile().getAbsolutePath();
    final Set<String> dependencies = Sets.newHashSet(depFile1);

    final String setUpFile = this.resourceLoader.getResource("/setupfile").getFile().getAbsolutePath();

    final JobRequest jobRequest = new JobRequest.Builder(JOB_NAME, JOB_USER, JOB_VERSION,
            "-c 'echo HELLO WORLD!!!'", clusterCriteriaList, commandCriteria).withId(jobId)
                    .withDisableLogArchival(true).withSetupFile(setUpFile).withDependencies(dependencies)
                    .withDescription(JOB_DESCRIPTION).build();

    final ExecutorService executors = Executors.newFixedThreadPool(2);
    final Future<String> jobFuture;
    try {
        jobFuture = executors.submit(() -> jobClient.submitJob(jobRequest));
        executors.submit(() -> {
            boolean result = true;
            while (result) {
                try {
                    jobClient.getJob(jobId);
                    jobClient.killJob(jobId);
                    Thread.sleep(1000);
                    result = false;
                } catch (Exception ignored) {
                    result = true;
                }
            }
        });
    } finally {
        executors.shutdown();
        executors.awaitTermination(Integer.MAX_VALUE, TimeUnit.HOURS);
    }
    final Job job = jobClient.getJob(jobId);
    Assert.assertEquals(jobId, jobFuture.get());
    Assert.assertEquals(JobStatus.KILLED, job.getStatus());
}

From source file:org.biopax.validator.impl.ValidatorImpl.java

public void validate(final Validation validation) {
    assert (validation != null);

    if (validation == null || validation.getModel() == null) {
        throw new ValidatorException("Failed: no BioPAX model to validate "
                + "(have you successfully imported or created one already?)");
    }//from w w  w . j  a v a 2s. c  o m

    // register the validation (if not done already)
    if (!getResults().contains(validation)) {
        getResults().add(validation);
    }

    // break if max.errors exceeded (- reported by AOP interceptors, while parsing a file, or - in previous runs)
    if (validation.isMaxErrorsSet() && validation.getNotFixedErrors() > validation.getMaxErrors()) {
        log.info("Errors limit (" + validation.getMaxErrors() + ") is exceeded; exitting...");
        return;
    }

    Model model = (Model) validation.getModel();

    log.debug("validating model: " + model + " that has " + model.getObjects().size() + " objects");

    if (model.getLevel() != BioPAXLevel.L3) {
        model = (new LevelUpgrader()).filter(model);
        validation.setModel(model);
        log.info("Upgraded to BioPAX Level3 model: " + validation.getDescription());
    }

    assert (model != null && model.getLevel() == BioPAXLevel.L3);

    // Check/fix Rule<? extends BioPAXElement> rules concurrently (low risk of getting CMEx), 
    // because they normally do minor changes and simply cannot add/remove
    // elements in the Model (though, can alter a property of any biopax object)
    ExecutorService exec = Executors.newFixedThreadPool(100);

    // First, check/fix individual objects
    // (no need to copy; these rules cannot add/remove objects in model)
    for (BioPAXElement el : model.getObjects()) {
        // rules can check/fix specific elements
        //         for (Rule rule : rules) {            
        //            Behavior behavior = utils.getRuleBehavior(rule.getClass().getName(), validation.getProfile());       
        //              if (behavior == Behavior.IGNORE) 
        //                 continue; // skip               
        //            execute(exec, rule, validation, (Object) el);
        //         }
        //sequentially apply all (capable,enabled) rules to the object in a separate thread
        execute(exec, rules, validation, (Object) el);
    }
    exec.shutdown(); //end accepting new jobs
    try {
        exec.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new ValidatorException("Interrupted unexpectedly!");
    }

    //Second, apply all Rule<Model> rules -
    //run Rule<Model> rules concurrently
    exec = Executors.newFixedThreadPool(50);
    for (Rule rule : rules) {
        Behavior behavior = utils.getRuleBehavior(rule.getClass().getName(), validation.getProfile());
        if (behavior == Behavior.IGNORE)
            continue; // skip disabled rule           
        execute(exec, rule, validation, model);
    }
    exec.shutdown(); //end accepting jobs
    try {
        exec.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new ValidatorException("Interrupted unexpectedly!");
    }

    log.debug("All rules checked!");

    if (validation.isFix()) {
        // discover, explicitly add child elements to the model
        model.repair();
        // remove all dangling utility class objects
        ModelUtils.removeObjectsIfDangling(model, UtilityClass.class);
    }

    // add comments and some statistics
    validation.addComment("number of interactions : " + model.getObjects(Interaction.class).size());
    validation.addComment("number of physical entities : " + model.getObjects(PhysicalEntity.class).size());
    validation.addComment("number of genes : " + model.getObjects(Gene.class).size());
    validation.addComment("number of pathways : " + model.getObjects(Pathway.class).size());

    //update all error counts (total, fixed, notfixed)
    for (ErrorType errorType : validation.getError()) {
        errorType.setTotalCases(errorType.countErrors(null, null, false));
        errorType.setNotFixedCases(errorType.countErrors(null, null, true));
    }
    validation.setNotFixedProblems(validation.countErrors(null, null, null, null, false, true));
    validation.setNotFixedErrors(validation.countErrors(null, null, null, null, true, true));
    validation.setTotalProblemsFound(validation.countErrors(null, null, null, null, false, false));
    validation.setSummary("different types of problem: " + validation.getError().size());
}

From source file:hivemall.mix.server.MixServerTest.java

@Test
public void testSSL() throws InterruptedException {
    int port = NetUtils.getAvailablePort();
    CommandLine cl = CommandLineUtils.parseOptions(
            new String[] { "-port", Integer.toString(port), "-sync_threshold", "3", "-ssl" },
            MixServer.getOptions());/*from  ww  w .j  av  a  2  s  . c o  m*/
    MixServer server = new MixServer(cl);
    ExecutorService serverExec = Executors.newSingleThreadExecutor();
    serverExec.submit(server);

    waitForState(server, ServerState.RUNNING);

    PredictionModel model = new DenseModel(16777216, false);
    model.configureClock();
    MixClient client = null;
    try {
        client = new MixClient(MixEventName.average, "testSSL", "localhost:" + port, true, 2, model);
        model.configureMix(client, false);

        final Random rand = new Random(43);
        for (int i = 0; i < 100000; i++) {
            Integer feature = Integer.valueOf(rand.nextInt(100));
            float weight = (float) rand.nextGaussian();
            model.set(feature, new WeightValue(weight));
        }

        Thread.sleep(5000L);

        long numMixed = model.getNumMixed();
        Assert.assertEquals("number of mix events: " + numMixed, numMixed, 0L);

        serverExec.shutdown();
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:com.alibaba.cobar.client.CobarSqlMapClientTemplate.java

public void destroy() throws Exception {
    if (CollectionUtils.isNotEmpty(internalExecutorServiceRegistry)) {
        logger.info("shutdown executors of CobarSqlMapClientTemplate...");
        for (ExecutorService executor : internalExecutorServiceRegistry) {
            if (executor != null) {
                try {
                    executor.shutdown();
                    executor.awaitTermination(5, TimeUnit.MINUTES);
                    executor = null;//w ww .  j  a  v  a2  s.  c o  m
                } catch (InterruptedException e) {
                    logger.warn("interrupted when shuting down the query executor:\n{}", e);
                }
            }
        }
        getDataSourceSpecificExecutors().clear();
        logger.info("all of the executor services in CobarSqlMapClientTemplate are disposed.");
    }
}

From source file:info.pancancer.arch3.containerProvisioner.ContainerProvisionerThreads.java

public void startThreads() throws InterruptedException {
    ExecutorService pool = Executors.newFixedThreadPool(DEFAULT_THREADS);
    ProcessVMOrders processVMOrders = new ProcessVMOrders(this.configFile, this.options.has(this.endlessSpec));
    ProvisionVMs provisionVMs = new ProvisionVMs(this.configFile, this.options.has(this.endlessSpec),
            this.options.has(testSpec));
    CleanupVMs cleanupVMs = new CleanupVMs(this.configFile, this.options.has(this.endlessSpec));
    List<Future<?>> futures = new ArrayList<>();
    futures.add(pool.submit(processVMOrders));
    futures.add(pool.submit(provisionVMs));
    futures.add(pool.submit(cleanupVMs));
    try {// w  w w. j  av a2 s . co m
        for (Future<?> future : futures) {
            future.get();
        }
    } catch (InterruptedException | ExecutionException ex) {
        log.error(ex.toString());
        throw new RuntimeException(ex);
    } finally {
        pool.shutdown();
    }
}

From source file:com.sm.connector.client.PartitionClient.java

public T execute(List<RemoteClientImpl> list, int batchSize) {
    logger.info("execute " + filename + " threads " + list.size() + " invoker " + invoker.toString());
    int noOfThread = list.size();
    //List<RemoteClientImpl> list = createClients( urls);
    //CountDownLatch countDownLatch = new CountDownLatch(noOfThread);
    ExecutorService executor = Executors.newFixedThreadPool(noOfThread, new ThreadPoolFactory("Partition"));
    Aggregate<T> aggregate = null;
    try {//from   w w  w .  jav  a  2s . co  m
        aggregate = (Aggregate<T>) QueryUtils.createInstance(tClass);
    } catch (Exception ex) {
        throw new RuntimeException(ex.getMessage(), ex);
    }
    List<Future<Aggregate<T>>> results = new ArrayList<Future<Aggregate<T>>>(noOfThread);
    for (int i = 0; i < noOfThread; i++) {
        try {
            Aggregate<T> ft = (Aggregate<T>) QueryUtils.createInstance(tClass);
            RunThread runThread = new RunThread(i, list.get(i), batchSize, noOfThread, ft);
            Future<Aggregate<T>> t = executor.submit(runThread);
            results.add(t);
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
    }
    for (Future<Aggregate<T>> each : results) {
        try {
            aggregate.aggregate(each.get().get());
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
    }
    executor.shutdown();
    return aggregate.get();
}

From source file:com.brienwheeler.lib.concurrent.ExecutorsTest.java

@Test
public void testNewSingleThreadExecutorInvokeAll() throws InterruptedException, ExecutionException {
    NamedThreadFactory threadFactory = new NamedThreadFactory(THREAD_FACTORY_NAME);
    ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory);

    IntCallable one = new IntCallable(1);
    IntCallable two = new IntCallable(2);
    ArrayList<Callable<Integer>> tasks = new ArrayList<Callable<Integer>>();
    tasks.add(one);/* w w w.  j a va 2 s  .  co  m*/
    tasks.add(two);

    List<Future<Integer>> results = executor.invokeAll(tasks);
    Assert.assertEquals(2, results.size());
    Iterator<Future<Integer>> it = results.iterator();
    Future<Integer> oneResult = it.next();
    Future<Integer> twoResult = it.next();
    Assert.assertTrue(oneResult.isDone());
    Assert.assertEquals(1, oneResult.get().intValue());
    Assert.assertEquals(2, twoResult.get().intValue());

    results = executor.invokeAll(tasks, 10, TimeUnit.MILLISECONDS);
    Assert.assertEquals(2, results.size());
    it = results.iterator();
    oneResult = it.next();
    twoResult = it.next();
    Assert.assertTrue(oneResult.isDone());
    Assert.assertEquals(1, oneResult.get().intValue());
    Assert.assertEquals(2, twoResult.get().intValue());

    executor.shutdown();
}

From source file:com.iyonger.apm.web.model.AgentManager.java

public void running(final GrinderProperties grinderProperties, SingleConsole singleConsole,
        Set<AgentIdentity> availables) {
    ExecutorService execService = null;
    try {/*www. j  a v a2  s . c o  m*/
        // Make the agents connect to console.
        grinderProperties.setInt(GrinderProperties.CONSOLE_PORT, singleConsole.getConsolePort());
        execService = ExecutorFactory.createThreadPool("agentStarter", NUMBER_OF_THREAD);

        for (final AgentIdentity eachAgentIdentity : availables) {
            execService.submit(new Runnable() {
                @Override
                public void run() {
                    agentControllerServerDaemon.startAgent(grinderProperties, eachAgentIdentity);
                }
            });
        }

    } finally {
        if (execService != null) {
            execService.shutdown();
        }
    }
}

From source file:com.palantir.atlasdb.schema.stream.StreamTest.java

private void runConflictingTasksConcurrently(long streamId, TwoConflictingTasks twoConflictingTasks)
        throws InterruptedException {
    final CountDownLatch firstLatch = new CountDownLatch(1);
    final CountDownLatch secondLatch = new CountDownLatch(1);

    ExecutorService exec = PTExecutors.newFixedThreadPool(2);

    Future<?> firstFuture = exec.submit(() -> {
        try {//from   ww w  .  j a va2s.c o m
            txManager.runTaskThrowOnConflict(t -> {
                twoConflictingTasks.startFirstAndFail(t, streamId);
                letOtherTaskFinish(firstLatch, secondLatch);
                return null;
            });
            fail("Because we concurrently wrote, we should have failed with TransactionConflictException.");
        } catch (TransactionConflictException e) {
            // expected
        }
    });

    firstLatch.await();

    Future<?> secondFuture = exec.submit(
            (Runnable) () -> txManager.runTaskThrowOnConflict((TransactionTask<Void, RuntimeException>) t -> {
                twoConflictingTasks.startSecondAndFinish(t, streamId);
                return null;
            }));

    exec.shutdown();
    Futures.getUnchecked(secondFuture);

    secondLatch.countDown();
    Futures.getUnchecked(firstFuture);
}