Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.cloudifysource.esc.driver.provisioning.privateEc2.PrivateEC2CloudifyDriver.java

private MachineDetails[] doStartManagementMachines(final int numberOfManagementMachines, final long endTime,
        final TimeUnit unit) throws TimeoutException, CloudProvisioningException, PrivateEc2ParserException {
    final ExecutorService executors = Executors.newFixedThreadPool(numberOfManagementMachines);

    @SuppressWarnings("unchecked")
    final Future<MachineDetails>[] futures = (Future<MachineDetails>[]) new Future<?>[numberOfManagementMachines];

    try {// w w  w. ja v  a2 s.  c  o  m
        final PrivateEc2Template template = this.privateEc2Template;
        final String managementGroup = this.cloud.getProvider().getManagementGroup();
        final ProvisioningContextImpl ctx = (ProvisioningContextImpl) new ProvisioningContextAccess()
                .getManagementProvisioiningContext();

        logger.info("ctx_threadlocal=" + ctx);

        // Call startMachine asynchronously once for each management machine
        for (int i = 0; i < numberOfManagementMachines; i++) {
            final int index = i + 1;
            futures[i] = executors.submit(new Callable<MachineDetails>() {
                @Override
                public MachineDetails call() throws Exception {
                    return createServer(template, managementGroup + index, ctx, true, endTime, unit);
                }
            });

        }

        // Wait for each of the async calls to terminate.
        int numberOfErrors = 0;
        Exception firstCreationException = null;
        final MachineDetails[] createdManagementMachines = new MachineDetails[numberOfManagementMachines];
        for (int i = 0; i < createdManagementMachines.length; i++) {
            try {
                createdManagementMachines[i] = futures[i].get(endTime - System.currentTimeMillis(),
                        TimeUnit.MILLISECONDS);
            } catch (final InterruptedException e) {
                ++numberOfErrors;
                logger.log(Level.SEVERE, "Failed to start a management machine", e);
                if (firstCreationException == null) {
                    firstCreationException = e;
                }

            } catch (final ExecutionException e) {
                ++numberOfErrors;
                logger.log(Level.SEVERE, "Failed to start a management machine", e);
                if (firstCreationException == null) {
                    firstCreationException = e;
                }
            }
        }

        // In case of a partial error, shutdown all servers that did start up
        if (numberOfErrors > 0) {
            this.handleProvisioningFailure(numberOfManagementMachines, numberOfErrors, firstCreationException,
                    createdManagementMachines);
        }

        return createdManagementMachines;
    } finally {
        if (executors != null) {
            executors.shutdownNow();
        }
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

/**
 * Trashes or deletes all files under a directory. Leaves the directory as is.
 * @param fs FileSystem to use//from w  ww  . j a v  a 2s . c om
 * @param statuses fileStatuses of files to be deleted
 * @param conf hive configuration
 * @return true if deletion successful
 * @throws IOException
 */
public static boolean trashFiles(final FileSystem fs, final FileStatus[] statuses, final Configuration conf,
        final boolean purge) throws IOException {
    boolean result = true;

    if (statuses == null || statuses.length == 0) {
        return false;
    }
    final List<Future<Boolean>> futures = new LinkedList<>();
    final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0
            ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25),
                    new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Delete-Thread-%d").build())
            : null;
    final SessionState parentSession = SessionState.get();
    for (final FileStatus status : statuses) {
        if (null == pool) {
            result &= FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
        } else {
            futures.add(pool.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    SessionState.setCurrentSessionState(parentSession);
                    return FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
                }
            }));
        }
    }
    if (null != pool) {
        pool.shutdown();
        for (Future<Boolean> future : futures) {
            try {
                result &= future.get();
            } catch (InterruptedException | ExecutionException e) {
                LOG.error("Failed to delete: ", e);
                pool.shutdownNow();
                throw new IOException(e);
            }
        }
    }
    return result;
}

From source file:info.pancancer.arch3.worker.WorkerRunnable.java

/**
 * This function will execute a workflow, based on the content of the Job object that is passed in.
 *
 * @param message/*from   ww  w.  j av  a  2 s  .  com*/
 *            - The message that will be published on the queue when the worker starts running the job.
 * @param job
 *            - The job contains information about what workflow to execute, and how.
 * @return The complete stdout and stderr from the workflow execution will be returned.
 */
private WorkflowResult launchJob(String message, Job job, String seqwareEngine, String seqwareSettingsFile,
        String dockerImage) {
    WorkflowResult workflowResult = null;
    ExecutorService exService = Executors.newFixedThreadPool(2);
    WorkflowRunner workflowRunner = new WorkflowRunner();
    try {

        Path pathToINI = writeINIFile(job);
        resultsChannel.basicPublish(this.resultsQueueName, this.resultsQueueName,
                MessageProperties.PERSISTENT_TEXT_PLAIN, message.getBytes(StandardCharsets.UTF_8));
        resultsChannel.waitForConfirms();

        //TODO: Parameterize dockerImage
        if (dockerImage == null || dockerImage.trim() == null) {
            dockerImage = "pancancer/seqware_whitestar_pancancer:latest";
        }
        CommandLine cli = new CommandLine("docker");
        cli.addArgument("run");
        List<String> args = new ArrayList<>(
                Arrays.asList("--rm", "-h", "master", "-t", "-v", "/var/run/docker.sock:/var/run/docker.sock",
                        "-v", job.getWorkflowPath() + ":/workflow", "-v", pathToINI + ":/ini", "-v",
                        "/datastore:/datastore", "-v", "/home/" + this.userName + "/.gnos:/home/ubuntu/.gnos"));
        if (seqwareSettingsFile != null) {
            args.addAll(Arrays.asList("-v", seqwareSettingsFile + ":/home/seqware/.seqware/settings"));
        }
        args.addAll(Arrays.asList(dockerImage, "seqware", "bundle", "launch", "--dir", "/workflow", "--ini",
                "/ini", "--no-metadata", "--engine", seqwareEngine));

        String[] argsArray = new String[args.size()];
        cli.addArguments(args.toArray(argsArray));

        WorkerHeartbeat heartbeat = new WorkerHeartbeat();
        heartbeat.setQueueName(this.resultsQueueName);
        // channels should not be shared between threads https://www.rabbitmq.com/api-guide.html#channel-threads
        // heartbeat.setReportingChannel(resultsChannel);
        heartbeat.setSettings(settings);
        heartbeat.setSecondsDelay(
                settings.getDouble(Constants.WORKER_HEARTBEAT_RATE, WorkerHeartbeat.DEFAULT_DELAY));
        heartbeat.setJobUuid(job.getUuid());
        heartbeat.setVmUuid(this.vmUuid);
        heartbeat.setNetworkID(this.networkAddress);
        heartbeat.setStatusSource(workflowRunner);

        long presleep = settings.getLong(Constants.WORKER_PREWORKER_SLEEP, WorkerRunnable.DEFAULT_PRESLEEP);
        long postsleep = settings.getLong(Constants.WORKER_POSTWORKER_SLEEP, WorkerRunnable.DEFAULT_POSTSLEEP);
        long presleepMillis = Base.ONE_SECOND_IN_MILLISECONDS * presleep;
        long postsleepMillis = Base.ONE_SECOND_IN_MILLISECONDS * postsleep;

        workflowRunner.setCli(cli);
        workflowRunner.setPreworkDelay(presleepMillis);
        workflowRunner.setPostworkDelay(postsleepMillis);
        // Submit both
        @SuppressWarnings("unused")
        // We will never actually do submit.get(), because the heartbeat should keep running until it is terminated by
        // exService.shutdownNow().
        Future<?> submit = exService.submit(heartbeat);
        Future<WorkflowResult> workflowResultFuture = exService.submit(workflowRunner);
        // make sure both are complete
        workflowResult = workflowResultFuture.get();
        // don't get the heartbeat if the workflow is complete already

        log.info("Docker execution result: " + workflowResult.getWorkflowStdout());
    } catch (SocketException e) {
        // This comes from trying to get the IP address.
        log.error(e.getMessage(), e);
    } catch (IOException e) {
        // This could be caused by a problem writing the file, or publishing a message to the queue.
        log.error(e.getMessage(), e);
    } catch (ExecutionException e) {
        log.error("Error executing workflow: " + e.getMessage(), e);
    } catch (InterruptedException e) {
        log.error("Workflow may have been interrupted: " + e.getMessage(), e);
    } finally {
        exService.shutdownNow();
    }

    return workflowResult;
}

From source file:eu.eexcess.europeana.recommender.PartnerConnector.java

@Override
public Document queryPartner(PartnerConfiguration partnerConfiguration, SecureUserProfile userProfile,
        PartnerdataLogger logger) throws IOException {

    // Configure//from  ww  w .  j a  va2 s.c  o  m
    ExecutorService threadPool = Executors.newFixedThreadPool(10);

    //        ClientConfig config = new DefaultClientConfig();
    //        config.getClasses().add(JacksonJsonProvider.class);
    //        
    final Client client = new Client(PartnerConfigurationEnum.CONFIG.getClientJacksonJson());
    queryGenerator = PartnerConfigurationEnum.CONFIG.getQueryGenerator();
    String query = getQueryGenerator().toQuery(userProfile);
    long start = System.currentTimeMillis();

    Map<String, String> valuesMap = new HashMap<String, String>();
    valuesMap.put("query", URLParamEncoder.encode(query));
    valuesMap.put("apiKey", partnerConfiguration.apiKey); // add API key
    // searchEndpoint: "http://www.europeana.eu/api/v2/search.json?wskey=${apiKey}&query=${query}"
    int numResultsRequest = 10;
    if (userProfile.numResults != null && userProfile.numResults != 0)
        numResultsRequest = userProfile.numResults;
    valuesMap.put("numResults", numResultsRequest + "");
    String searchRequest = StrSubstitutor.replace(partnerConfiguration.searchEndpoint, valuesMap);

    WebResource service = client.resource(searchRequest);
    ObjectMapper mapper = new ObjectMapper();
    Builder builder = service.accept(MediaType.APPLICATION_JSON);
    EuropeanaResponse response = builder.get(EuropeanaResponse.class);
    if (response.items.size() > numResultsRequest)
        response.items = response.items.subList(0, numResultsRequest);
    PartnerdataTracer.dumpFile(this.getClass(), partnerConfiguration, response.toString(), "service-response",
            PartnerdataTracer.FILETYPE.JSON, logger);
    client.destroy();
    if (makeDetailRequests) {
        HashMap<EuropeanaDoc, Future<Void>> futures = new HashMap<EuropeanaDoc, Future<Void>>();
        final HashMap<EuropeanaDoc, EuropeanaDocDetail> docDetails = new HashMap<EuropeanaDoc, EuropeanaDocDetail>();
        final PartnerConfiguration partnerConfigLocal = partnerConfiguration;
        final String eexcessRequestId = logger.getActLogEntry().getRequestId();
        for (int i = 0; i < response.items.size(); i++) {
            final EuropeanaDoc item = response.items.get(i);

            Future<Void> future = threadPool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    EuropeanaDocDetail details = null;
                    try {
                        details = fetchDocumentDetails(item.id, partnerConfigLocal, eexcessRequestId);
                    } catch (EEXCESSDataTransformationException e) {
                        log.log(Level.INFO, "Error getting item with id" + item.id, e);
                        return null;
                    }
                    docDetails.put(item, details);
                    return null;
                }
            });
            futures.put(item, future);
        }

        for (EuropeanaDoc doc : futures.keySet()) {
            try {
                futures.get(doc).get(start + 15 * 500 - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
            } catch (InterruptedException | ExecutionException | TimeoutException e) {
                log.log(Level.WARNING, "Detail thread for " + doc.id + " did not responses in time", e);
            }

            //item.edmConcept.addAll(details.concepts);
            //         item.edmConcept = details.concepts; TODO: copy into doc
            //         item.edmCountry = details.edmCountry;
            //         item.edmPlace = details.places;
        }
    }

    long end = System.currentTimeMillis();

    long startXML = System.currentTimeMillis();

    Document newResponse = null;
    try {
        newResponse = this.transformJSON2XML(mapper.writeValueAsString(response));
    } catch (EEXCESSDataTransformationException e) {
        // TODO logger

        log.log(Level.INFO, "Error Transforming Json to xml", e);

    }
    long endXML = System.currentTimeMillis();
    System.out.println("millis " + (endXML - startXML) + "   " + (end - start));

    threadPool.shutdownNow();

    return newResponse;

}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testKilledServerWithEnsembleProvider() throws Exception {
    final int CLIENT_QTY = 10;
    final Timing timing = new Timing();
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    TestingCluster cluster = new TestingCluster(3);
    try {/*from  w w w  .  j  a  v  a 2  s  .c o  m*/
        cluster.start();

        final AtomicReference<String> connectionString = new AtomicReference<String>(
                cluster.getConnectString());
        final EnsembleProvider provider = new EnsembleProvider() {
            @Override
            public void start() throws Exception {
            }

            @Override
            public String getConnectionString() {
                return connectionString.get();
            }

            @Override
            public void close() throws IOException {
            }
        };

        final Semaphore acquiredSemaphore = new Semaphore(0);
        final AtomicInteger acquireCount = new AtomicInteger(0);
        final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY);
        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider)
                            .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection())
                            .retryPolicy(new ExponentialBackoffRetry(100, 3)).build();
                    try {
                        final Semaphore suspendedSemaphore = new Semaphore(0);
                        client.getConnectionStateListenable().addListener(new ConnectionStateListener() {
                            @Override
                            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                                if ((newState == ConnectionState.SUSPENDED)
                                        || (newState == ConnectionState.LOST)) {
                                    suspendedLatch.countDown();
                                    suspendedSemaphore.release();
                                }
                            }
                        });

                        client.start();

                        InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1);

                        while (!Thread.currentThread().isInterrupted()) {
                            Lease lease = null;
                            try {
                                lease = semaphore.acquire();
                                acquiredSemaphore.release();
                                acquireCount.incrementAndGet();
                                suspendedSemaphore.acquire();
                            } catch (Exception e) {
                                // just retry
                            } finally {
                                if (lease != null) {
                                    acquireCount.decrementAndGet();
                                    IOUtils.closeQuietly(lease);
                                }
                            }
                        }
                    } finally {
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        Assert.assertEquals(1, acquireCount.get());

        cluster.close();
        timing.awaitLatch(suspendedLatch);
        timing.forWaiting().sleepABit();
        Assert.assertEquals(0, acquireCount.get());

        cluster = new TestingCluster(3);
        cluster.start();

        connectionString.set(cluster.getConnectString());
        timing.forWaiting().sleepABit();

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        timing.forWaiting().sleepABit();
        Assert.assertEquals(1, acquireCount.get());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.SECONDS);
        executorService.shutdownNow();
        IOUtils.closeQuietly(cluster);
    }
}

From source file:com.dtolabs.rundeck.core.execution.impl.jsch.JschNodeExecutor.java

public NodeExecutorResult executeCommand(final ExecutionContext context, final String[] command,
        final INodeEntry node) {
    if (null == node.getHostname() || null == node.extractHostname()) {
        return NodeExecutorResultImpl.createFailure(StepFailureReason.ConfigurationFailure,
                "Hostname must be set to connect to remote node '" + node.getNodename() + "'", node);
    }//from  w  w  w  .  j ava2  s . co m

    final ExecutionListener listener = context.getExecutionListener();
    final Project project = new Project();
    AntSupport.addAntBuildListener(listener, project);

    boolean success = false;
    final ExtSSHExec sshexec;
    //perform jsch sssh command
    final NodeSSHConnectionInfo nodeAuthentication = new NodeSSHConnectionInfo(node, framework, context);
    final int timeout = nodeAuthentication.getSSHTimeout();
    try {

        sshexec = SSHTaskBuilder.build(node, command, project, context.getDataContext(), nodeAuthentication,
                context.getLoglevel(), listener);
    } catch (SSHTaskBuilder.BuilderException e) {
        return NodeExecutorResultImpl.createFailure(StepFailureReason.ConfigurationFailure, e.getMessage(),
                node);
    }

    //Sudo support

    final ExecutorService executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(null, r,
                    "SudoResponder " + node.getNodename() + ": " + System.currentTimeMillis());
        }
    });

    final Future<ResponderTask.ResponderResult> responderFuture;
    final SudoResponder sudoResponder = SudoResponder.create(node, framework, context);
    Runnable responderCleanup = null;
    if (sudoResponder.isSudoEnabled() && sudoResponder.matchesCommandPattern(command[0])) {
        final DisconnectResultHandler resultHandler = new DisconnectResultHandler();

        //configure two piped i/o stream pairs, to connect to the input/output of the SSH connection
        final PipedInputStream responderInput = new PipedInputStream();
        final PipedOutputStream responderOutput = new PipedOutputStream();
        final PipedInputStream jschInput = new PipedInputStream();
        //lead pipe allows connected inputstream to close and not hang the writer to this stream
        final PipedOutputStream jschOutput = new LeadPipeOutputStream();
        try {
            responderInput.connect(jschOutput);
            jschInput.connect(responderOutput);
        } catch (IOException e) {
            return NodeExecutorResultImpl.createFailure(StepFailureReason.IOFailure, e.getMessage(), node);
        }

        //first sudo prompt responder
        ResponderTask responder = new ResponderTask(sudoResponder, responderInput, responderOutput,
                resultHandler);

        /**
         * Callable will be executed by the ExecutorService
         */
        final Callable<ResponderTask.ResponderResult> responderResultCallable;

        //if 2nd responder
        final SudoResponder sudoResponder2 = SudoResponder.create(node, framework, context, SUDO2_OPT_PREFIX,
                DEFAULT_SUDO2_PASSWORD_OPTION, DEFAULT_SUDO2_COMMAND_PATTERN);
        if (sudoResponder2.isSudoEnabled()
                && sudoResponder2.matchesCommandPattern(CLIUtils.generateArgline(null, command, false))) {
            logger.debug("Enable second sudo responder");

            sudoResponder2.setDescription("Second " + SudoResponder.DEFAULT_DESCRIPTION);
            sudoResponder.setDescription("First " + SudoResponder.DEFAULT_DESCRIPTION);

            //sequence of the first then the second sudo responder
            responderResultCallable = responder.createSequence(sudoResponder2);
        } else {
            responderResultCallable = responder;
        }

        //set up SSH execution
        sshexec.setAllocatePty(true);
        sshexec.setInputStream(jschInput);
        sshexec.setSecondaryStream(jschOutput);
        sshexec.setDisconnectHolder(resultHandler);

        responderFuture = executor.submit(responderResultCallable);
        //close streams after responder is finished
        responderCleanup = new Runnable() {
            public void run() {
                logger.debug("SudoResponder shutting down...");
                try {
                    responderInput.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
                try {
                    responderOutput.flush();
                    responderOutput.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
                //executor pool shutdown
                executor.shutdownNow();
            }
        };
        executor.submit(responderCleanup);
    } else {
        responderFuture = null;
    }
    if (null != context.getExecutionListener()) {
        context.getExecutionListener().log(3, "Starting SSH Connection: " + nodeAuthentication.getUsername()
                + "@" + node.getHostname() + " (" + node.getNodename() + ")");
    }
    String errormsg = null;
    FailureReason failureReason = null;
    try {
        sshexec.execute();
        success = true;
    } catch (BuildException e) {
        final ExtractFailure extractJschFailure = extractFailure(e, node, timeout, framework);
        errormsg = extractJschFailure.getErrormsg();
        failureReason = extractJschFailure.getReason();
        context.getExecutionListener().log(0, errormsg);
    }
    if (null != responderCleanup) {
        responderCleanup.run();
    }
    shutdownAndAwaitTermination(executor);
    if (null != responderFuture) {
        try {
            logger.debug("Waiting 5 seconds for responder future result");
            final ResponderTask.ResponderResult result = responderFuture.get(5, TimeUnit.SECONDS);
            logger.debug("Responder result: " + result);
            if (!result.isSuccess() && !result.isInterrupted()) {
                context.getExecutionListener().log(0,
                        result.getResponder().toString() + " failed: " + result.getFailureReason());
            }
        } catch (InterruptedException e) {
            //ignore
        } catch (java.util.concurrent.ExecutionException e) {
            e.printStackTrace();
        } catch (TimeoutException e) {
            //ignore
        }
    }
    final int resultCode = sshexec.getExitStatus();

    if (success) {
        return NodeExecutorResultImpl.createSuccess(node);
    } else {
        return NodeExecutorResultImpl.createFailure(failureReason, errormsg, node, resultCode);
    }
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them./*from  w ww  . java  2s.  c o m*/
 * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS.
 * @param deltaQPS delta QPS.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
 * @throws Exception
 */

public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    double currentQPS = startQPS;
    int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                reportStartTime = currentTime;
                numReportIntervals++;

                if (numReportIntervals == numIntervalsToIncreaseQPS) {
                    // Try to find the next interval.
                    double newQPS = currentQPS + deltaQPS;
                    int newQueryIntervalMs;
                    // Skip the target QPS with the same interval as the previous one.
                    while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
                        newQPS += deltaQPS;
                    }
                    if (newQueryIntervalMs == 0) {
                        LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
                    } else {
                        // Find the next interval.
                        LOGGER.info(
                                "--------------------------------------------------------------------------------");
                        LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
                        int numQueriesExecutedInt = numQueriesExecuted.get();
                        LOGGER.info(
                                "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                        + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                                currentQPS, timePassed, numQueriesExecutedInt,
                                numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                                totalBrokerTime.get() / (double) numQueriesExecutedInt,
                                totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);

                        currentQPS = newQPS;
                        queryIntervalMs = newQueryIntervalMs;
                        LOGGER.info(
                                "Increase target QPS to: {}, the following statistics are for the new target QPS.",
                                currentQPS);
                    }
                } else {
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                    + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                            currentQPS, timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            currentQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.yccheok.jstock.gui.MainFrame.java

private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
    isFormWindowClosedCalled = true;//w  w  w  .  java  2 s  .co  m

    try {
        ExecutorService _stockInfoDatabaseMetaPool = this.stockInfoDatabaseMetaPool;
        this.stockInfoDatabaseMetaPool = null;

        _stockInfoDatabaseMetaPool.shutdownNow();

        // Always be the first statement. As no matter what happen, we must
        // save all the configuration files.
        this.save();

        if (this.needToSaveUserDefinedDatabase) {
            // We are having updated user database in memory.
            // Save it to disk.
            this.saveUserDefinedDatabaseAsCSV(jStockOptions.getCountry(), stockInfoDatabase);
        }

        // Hide the icon immediately.
        TrayIcon _trayIcon = trayIcon;
        if (_trayIcon != null) {
            SystemTray.getSystemTray().remove(_trayIcon);
            trayIcon = null;
        }

        dettachAllAndStopAutoCompleteJComboBox();
        this.indicatorPanel.dettachAllAndStopAutoCompleteJComboBox();

        log.info("latestNewsTask stop...");

        if (this.latestNewsTask != null) {
            this.latestNewsTask.cancel(true);
        }

        _stockInfoDatabaseMetaPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);

        // We suppose to call shutdownAll to clean up all network resources.
        // However, that will cause Exception in other threads if they are still using httpclient.
        // Exception in thread "Thread-4" java.lang.IllegalStateException: Connection factory has been shutdown.
        //
        // MultiThreadedHttpConnectionManager.shutdownAll();

        log.info("Widnow is closed.");
    } catch (Exception exp) {
        log.error("Unexpected error while trying to quit application", exp);
    }

    // All the above operations are done within try block, to ensure
    // System.exit(0) will always be called.
    //
    // Final clean up.
    System.exit(0);
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * [FLINK-6020]/*from w ww  .  jav  a  2 s.c o m*/
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    final List<Path> jars;
    if (blobType == PERMANENT_BLOB) {
        // implement via JAR file upload instead:
        File tmpFile = temporaryFolder.newFile();
        FileUtils.writeByteArrayToFile(tmpFile, data);
        jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
    } else {
        jars = null;
    }

    Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config, blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // for highAvailability
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        // uploading HA BLOBs works on BlobServer only (and, for now, via the BlobClient)

        for (int i = 0; i < concurrentPutOperations; i++) {
            final Supplier<BlobKey> callable;
            if (blobType == PERMANENT_BLOB) {
                // cannot use a blocking stream here (upload only possible via files)
                callable = () -> {
                    try {
                        List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId,
                                jars);
                        assertEquals(1, keys.size());
                        BlobKey uploadedKey = keys.get(0);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };

            } else {
                callable = () -> {
                    try {
                        BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                        BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            }
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            // check for unique BlobKey, but should have same hash
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
        // caches must not access the blob store (they are not allowed to write there)
        verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.b3log.symphony.util.Markdowns.java

/**
 * Converts the specified markdown text to HTML.
 *
 * @param markdownText the specified markdown text
 * @return converted HTML, returns an empty string "" if the specified markdown text is "" or {@code null}, returns
 * 'markdownErrorLabel' if exception//from w  w  w.j av a  2s .  co m
 */
public static String toHTML(final String markdownText) {
    if (Strings.isEmptyOrNull(markdownText)) {
        return "";
    }

    final String cachedHTML = getHTML(markdownText);
    if (null != cachedHTML) {
        return cachedHTML;
    }

    final ExecutorService pool = Executors.newSingleThreadExecutor();
    final long[] threadId = new long[1];

    final Callable<String> call = () -> {
        threadId[0] = Thread.currentThread().getId();

        String html = LANG_PROPS_SERVICE.get("contentRenderFailedLabel");

        if (MARKED_AVAILABLE) {
            html = toHtmlByMarked(markdownText);
            if (!StringUtils.startsWith(html, "<p>")) {
                html = "<p>" + html + "</p>";
            }
        } else {
            com.vladsch.flexmark.ast.Node document = PARSER.parse(markdownText);
            html = RENDERER.render(document);
            if (!StringUtils.startsWith(html, "<p>")) {
                html = "<p>" + html + "</p>";
            }
        }

        final Document doc = Jsoup.parse(html);
        final List<org.jsoup.nodes.Node> toRemove = new ArrayList<>();
        doc.traverse(new NodeVisitor() {
            @Override
            public void head(final org.jsoup.nodes.Node node, int depth) {
                if (node instanceof org.jsoup.nodes.TextNode) {
                    final org.jsoup.nodes.TextNode textNode = (org.jsoup.nodes.TextNode) node;
                    final org.jsoup.nodes.Node parent = textNode.parent();

                    if (parent instanceof Element) {
                        final Element parentElem = (Element) parent;

                        if (!parentElem.tagName().equals("code")) {
                            String text = textNode.getWholeText();
                            boolean nextIsBr = false;
                            final org.jsoup.nodes.Node nextSibling = textNode.nextSibling();
                            if (nextSibling instanceof Element) {
                                nextIsBr = "br".equalsIgnoreCase(((Element) nextSibling).tagName());
                            }

                            if (null != userQueryService) {
                                try {
                                    final Set<String> userNames = userQueryService.getUserNames(text);
                                    for (final String userName : userNames) {
                                        text = text.replace('@' + userName + (nextIsBr ? "" : " "),
                                                "@<a href='" + Latkes.getServePath() + "/member/" + userName
                                                        + "'>" + userName + "</a> ");
                                    }
                                    text = text.replace("@participants ",
                                            "@<a href='https://hacpai.com/article/1458053458339' class='ft-red'>participants</a> ");
                                } finally {
                                    JdbcRepository.dispose();
                                }
                            }

                            if (text.contains("@<a href=")) {
                                final List<org.jsoup.nodes.Node> nodes = Parser.parseFragment(text, parentElem,
                                        "");
                                final int index = textNode.siblingIndex();

                                parentElem.insertChildren(index, nodes);
                                toRemove.add(node);
                            } else {
                                textNode.text(Pangu.spacingText(text));
                            }
                        }
                    }
                }
            }

            @Override
            public void tail(org.jsoup.nodes.Node node, int depth) {
            }
        });

        toRemove.forEach(node -> node.remove());

        doc.select("pre>code").addClass("hljs");
        doc.select("a").forEach(a -> {
            String src = a.attr("href");
            if (!StringUtils.startsWithIgnoreCase(src, Latkes.getServePath())) {
                try {
                    src = URLEncoder.encode(src, "UTF-8");
                } catch (final Exception e) {
                }
                a.attr("href", Latkes.getServePath() + "/forward?goto=" + src);
                a.attr("target", "_blank");
            }
        });
        doc.outputSettings().prettyPrint(false);

        String ret = doc.select("body").html();
        ret = StringUtils.trim(ret);

        // cache it
        putHTML(markdownText, ret);

        return ret;
    };

    Stopwatchs.start("Md to HTML");
    try {
        final Future<String> future = pool.submit(call);

        return future.get(MD_TIMEOUT, TimeUnit.MILLISECONDS);
    } catch (final TimeoutException e) {
        LOGGER.log(Level.ERROR, "Markdown timeout [md=" + markdownText + "]");
        Callstacks.printCallstack(Level.ERROR, new String[] { "org.b3log" }, null);

        final Set<Thread> threads = Thread.getAllStackTraces().keySet();
        for (final Thread thread : threads) {
            if (thread.getId() == threadId[0]) {
                thread.stop();

                break;
            }
        }
    } catch (final Exception e) {
        LOGGER.log(Level.ERROR, "Markdown failed [md=" + markdownText + "]", e);
    } finally {
        pool.shutdownNow();

        Stopwatchs.end();
    }

    return LANG_PROPS_SERVICE.get("contentRenderFailedLabel");
}