Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:com.cloud.storage.resource.VmwareStorageProcessor.java

private void addRemoveInternetScsiTargetsToAllHosts(final boolean add,
        final List<HostInternetScsiHbaStaticTarget> targets, List<HostMO> hosts) throws Exception {
    ExecutorService executorService = Executors.newFixedThreadPool(hosts.size());

    final List<Exception> exceptions = new ArrayList<Exception>();

    for (HostMO host : hosts) {
        HostStorageSystemMO hostStorageSystem = host.getHostStorageSystemMO();

        boolean iScsiHbaConfigured = false;

        for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
            if (hba instanceof HostInternetScsiHba) {
                // just finding an instance of HostInternetScsiHba means that we have found at least one configured iSCSI HBA
                // at least one iSCSI HBA must be configured before a CloudStack user can use this host for iSCSI storage
                iScsiHbaConfigured = true;

                final String iScsiHbaDevice = hba.getDevice();

                final HostStorageSystemMO hss = hostStorageSystem;

                executorService.submit(new Thread() {
                    @Override/*  ww w  . j  ava  2 s  .  c om*/
                    public void run() {
                        try {
                            if (add) {
                                hss.addInternetScsiStaticTargets(iScsiHbaDevice, targets);
                            } else {
                                hss.removeInternetScsiStaticTargets(iScsiHbaDevice, targets);
                            }

                            hss.rescanHba(iScsiHbaDevice);
                            hss.rescanVmfs();
                        } catch (Exception ex) {
                            synchronized (exceptions) {
                                exceptions.add(ex);
                            }
                        }
                    }
                });
            }
        }

        if (!iScsiHbaConfigured) {
            throw new Exception("An iSCSI HBA must be configured before a host can use iSCSI storage.");
        }
    }

    executorService.shutdown();

    if (!executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES)) {
        throw new Exception("The system timed out before completing the task 'rescanAllHosts'.");
    }

    if (exceptions.size() > 0) {
        throw new Exception(exceptions.get(0).getMessage());
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testKilledServerWithEnsembleProvider() throws Exception {
    final int CLIENT_QTY = 10;
    final Timing timing = new Timing();
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    TestingCluster cluster = new TestingCluster(3);
    try {//w  ww.  j  a  v  a 2  s  .c o  m
        cluster.start();

        final AtomicReference<String> connectionString = new AtomicReference<String>(
                cluster.getConnectString());
        final EnsembleProvider provider = new EnsembleProvider() {
            @Override
            public void start() throws Exception {
            }

            @Override
            public String getConnectionString() {
                return connectionString.get();
            }

            @Override
            public void close() throws IOException {
            }
        };

        final Semaphore acquiredSemaphore = new Semaphore(0);
        final AtomicInteger acquireCount = new AtomicInteger(0);
        final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY);
        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider)
                            .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection())
                            .retryPolicy(new ExponentialBackoffRetry(100, 3)).build();
                    try {
                        final Semaphore suspendedSemaphore = new Semaphore(0);
                        client.getConnectionStateListenable().addListener(new ConnectionStateListener() {
                            @Override
                            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                                if ((newState == ConnectionState.SUSPENDED)
                                        || (newState == ConnectionState.LOST)) {
                                    suspendedLatch.countDown();
                                    suspendedSemaphore.release();
                                }
                            }
                        });

                        client.start();

                        InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1);

                        while (!Thread.currentThread().isInterrupted()) {
                            Lease lease = null;
                            try {
                                lease = semaphore.acquire();
                                acquiredSemaphore.release();
                                acquireCount.incrementAndGet();
                                suspendedSemaphore.acquire();
                            } catch (Exception e) {
                                // just retry
                            } finally {
                                if (lease != null) {
                                    acquireCount.decrementAndGet();
                                    IOUtils.closeQuietly(lease);
                                }
                            }
                        }
                    } finally {
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        Assert.assertEquals(1, acquireCount.get());

        cluster.close();
        timing.awaitLatch(suspendedLatch);
        timing.forWaiting().sleepABit();
        Assert.assertEquals(0, acquireCount.get());

        cluster = new TestingCluster(3);
        cluster.start();

        connectionString.set(cluster.getConnectString());
        timing.forWaiting().sleepABit();

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        timing.forWaiting().sleepABit();
        Assert.assertEquals(1, acquireCount.get());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.SECONDS);
        executorService.shutdownNow();
        IOUtils.closeQuietly(cluster);
    }
}

From source file:edu.cmu.tetrad.search.TestIndTestConditionalCorrelation.java

public void test8() {
    int NTHREDS = 100;
    long start = System.currentTimeMillis();

    ExecutorService executor = Executors.newFixedThreadPool(NTHREDS);
    for (int i = 0; i < 5000; i++) {
        Runnable worker = new MyRunnable(10000000L + i);
        executor.execute(worker);/*ww  w  .j av  a2 s .  co m*/
    }
    // This will make the executor accept no new threads
    // and finish all existing threads in the queue
    //        executor.shutdown();
    try {
        // Wait until all threads are finish
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        System.out.println("Finished all threads");
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    long stop = System.currentTimeMillis();

    System.out.println((stop - start) + " ms");
}

From source file:edu.cmu.lti.oaqa.bioasq.concept.rerank.scorers.GoPubMedConceptRetrievalScorer.java

@Override
public void prepare(JCas jcas) throws AnalysisEngineProcessException {
    List<String> tokens = TypeUtil.getOrderedTokens(jcas).stream().map(Token::getCoveredText)
            .map(name -> name.replaceAll("[^A-Za-z0-9_\\-]+", " ").trim())
            .filter(name -> !name.isEmpty() && !stoplist.contains(name.toLowerCase())).collect(toList());
    List<String> wIdConceptNames = TypeUtil
            .getConcepts(jcas).stream().filter(concept -> !TypeUtil.getConceptIds(concept).isEmpty())
            .map(TypeUtil::getConceptNames).map(names -> names.stream()
                    .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" ")))
            .collect(toList());//from  w  ww .ja va2 s  .  c o m
    List<String> woIdConceptNames = TypeUtil
            .getConcepts(jcas).stream().filter(concept -> TypeUtil.getConceptIds(concept).isEmpty())
            .map(TypeUtil::getConceptNames).map(names -> names.stream()
                    .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" ")))
            .collect(toList());
    List<String> cmentionNames = TypeUtil.getConceptMentions(jcas).stream().map(ConceptMention::getMatchedName)
            .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(toList());
    ExecutorService es = Executors.newCachedThreadPool();
    // execute against all tokens
    String concatenatedTokens = String.join(" ", tokens);
    LOG.debug("Query string: {}", concatenatedTokens);
    for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
        es.execute(() -> {
            try {
                List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedTokens,
                        pages, hits, ontology);
                String conf = "tokens_concatenated@" + ontology.name();
                updateFeatureTable(results, conf);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }
    // execute against concatenated concept names
    String concatenatedConceptNames = String.join(" ", Iterables.concat(wIdConceptNames, woIdConceptNames));
    LOG.debug("Query string: {}", concatenatedConceptNames);
    for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
        es.execute(() -> {
            try {
                List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas,
                        concatenatedConceptNames, pages, hits, ontology);
                String conf = "concept_names_concatenated@" + ontology.name();
                updateFeatureTable(results, conf);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }
    // execute against concatenated concept mentions
    String concatenatedCmentions = String.join(" ", cmentionNames);
    LOG.debug("Query string: {}", concatenatedCmentions);
    for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
        es.execute(() -> {
            try {
                List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas,
                        concatenatedCmentions, pages, hits, ontology);
                String conf = "cmention_names_concatenated@" + ontology.name();
                updateFeatureTable(results, conf);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }
    // execute against each concept name whose has an ID
    for (String conceptName : wIdConceptNames) {
        LOG.debug("Query string: {}", conceptName);
        for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
            es.execute(() -> {
                try {
                    List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName,
                            pages, hits, ontology);
                    String conf = "w_id_concept_names_individual@" + ontology.name();
                    updateFeatureTable(results, conf);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            });
        }
    }
    // execute against each concept name whose has no ID
    for (String conceptName : woIdConceptNames) {
        LOG.debug("Query string: {}", conceptName);
        for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
            es.execute(() -> {
                try {
                    List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName,
                            pages, hits, ontology);
                    String conf = "wo_id_concept_names_individual@" + ontology.name();
                    updateFeatureTable(results, conf);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            });
        }
    }
    // execute against each concept mention
    for (String cmentionName : cmentionNames) {
        LOG.debug("Query string: {}", cmentionName);
        for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
            es.execute(() -> {
                try {
                    List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, cmentionName,
                            pages, hits, ontology);
                    String conf = "cmention_names_individual@" + ontology.name();
                    updateFeatureTable(results, conf);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            });
        }
    }
    es.shutdown();
    try {
        if (!es.awaitTermination(timeout, TimeUnit.MINUTES)) {
            LOG.warn("Timeout occurs for one or some concept retrieval services.");
        }
    } catch (InterruptedException e) {
        throw new AnalysisEngineProcessException(e);
    }
    confs = uri2conf2score.columnKeySet();
}

From source file:org.apache.hadoop.hbase.regionserver.HLog.java

private static List<Path> splitLog(final Path rootDir, final FileStatus[] logfiles, final FileSystem fs,
        final HBaseConfiguration conf) throws IOException {
    final Map<byte[], WriterAndPath> logWriters = Collections
            .synchronizedMap(new TreeMap<byte[], WriterAndPath>(Bytes.BYTES_COMPARATOR));
    List<Path> splits = null;

    // Number of threads to use when log splitting to rewrite the logs.
    // More means faster but bigger mem consumption.
    int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);

    // Number of logs to read concurrently when log splitting.
    // More means faster but bigger mem consumption  */
    int concurrentLogReads = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3);
    // Is append supported?
    boolean append = isAppend(conf);
    try {/*from w ww  .j av a2  s  .c o m*/
        int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / concurrentLogReads)).intValue();
        for (int step = 0; step < maxSteps; step++) {
            final Map<byte[], LinkedList<HLogEntry>> logEntries = new TreeMap<byte[], LinkedList<HLogEntry>>(
                    Bytes.BYTES_COMPARATOR);
            // Stop at logfiles.length when it's the last step
            int endIndex = step == maxSteps - 1 ? logfiles.length
                    : step * concurrentLogReads + concurrentLogReads;
            for (int i = (step * concurrentLogReads); i < endIndex; i++) {
                // Check for possibly empty file. With appends, currently Hadoop 
                // reports a zero length even if the file has been sync'd. Revisit if
                // HADOOP-4751 is committed.
                long length = logfiles[i].getLen();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length + ": "
                            + logfiles[i].getPath() + ", length=" + logfiles[i].getLen());
                }
                recoverLog(fs, logfiles[i].getPath(), append);
                SequenceFile.Reader in = null;
                int count = 0;
                try {
                    in = new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
                    try {
                        HLogKey key = newKey(conf);
                        KeyValue val = new KeyValue();
                        while (in.next(key, val)) {
                            byte[] regionName = key.getRegionName();
                            LinkedList<HLogEntry> queue = logEntries.get(regionName);
                            if (queue == null) {
                                queue = new LinkedList<HLogEntry>();
                                LOG.debug("Adding queue for " + Bytes.toStringBinary(regionName));
                                logEntries.put(regionName, queue);
                            }
                            HLogEntry hle = new HLogEntry(val, key);
                            queue.push(hle);
                            count++;
                            // Make the key and value new each time; otherwise same instance
                            // is used over and over.
                            key = newKey(conf);
                            val = new KeyValue();
                        }
                        LOG.debug("Pushed=" + count + " entries from " + logfiles[i].getPath());
                    } catch (IOException e) {
                        LOG.debug("IOE Pushed=" + count + " entries from " + logfiles[i].getPath());
                        e = RemoteExceptionHandler.checkIOException(e);
                        if (!(e instanceof EOFException)) {
                            LOG.warn("Exception processing " + logfiles[i].getPath()
                                    + " -- continuing. Possible DATA LOSS!", e);
                        }
                    }
                } catch (IOException e) {
                    if (length <= 0) {
                        LOG.warn("Empty hlog, continuing: " + logfiles[i] + " count=" + count, e);
                        continue;
                    }
                    throw e;
                } finally {
                    try {
                        if (in != null) {
                            in.close();
                        }
                    } catch (IOException e) {
                        LOG.warn("Close in finally threw exception -- continuing", e);
                    }
                    // Delete the input file now so we do not replay edits. We could
                    // have gotten here because of an exception. If so, probably
                    // nothing we can do about it. Replaying it, it could work but we
                    // could be stuck replaying for ever. Just continue though we
                    // could have lost some edits.
                    fs.delete(logfiles[i].getPath(), true);
                }
            }
            ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads);
            for (final byte[] key : logEntries.keySet()) {
                Thread thread = new Thread(Bytes.toStringBinary(key)) {
                    @Override
                    public void run() {
                        LinkedList<HLogEntry> entries = logEntries.get(key);
                        LOG.debug("Thread got " + entries.size() + " to process");
                        long threadTime = System.currentTimeMillis();
                        try {
                            int count = 0;
                            // Items were added to the linkedlist oldest first. Pull them
                            // out in that order.
                            for (ListIterator<HLogEntry> i = entries.listIterator(entries.size()); i
                                    .hasPrevious();) {
                                HLogEntry logEntry = i.previous();
                                WriterAndPath wap = logWriters.get(key);
                                if (wap == null) {
                                    Path logfile = new Path(
                                            HRegion.getRegionDir(
                                                    HTableDescriptor.getTableDir(rootDir,
                                                            logEntry.getKey().getTablename()),
                                                    HRegionInfo.encodeRegionName(key)),
                                            HREGION_OLDLOGFILE_NAME);
                                    Path oldlogfile = null;
                                    SequenceFile.Reader old = null;
                                    if (fs.exists(logfile)) {
                                        FileStatus stat = fs.getFileStatus(logfile);
                                        if (stat.getLen() <= 0) {
                                            LOG.warn("Old hlog file " + logfile + " is zero "
                                                    + "length. Deleting existing file");
                                            fs.delete(logfile, false);
                                        } else {
                                            LOG.warn("Old hlog file " + logfile + " already "
                                                    + "exists. Copying existing file to new file");
                                            oldlogfile = new Path(logfile.toString() + ".old");
                                            fs.rename(logfile, oldlogfile);
                                            old = new SequenceFile.Reader(fs, oldlogfile, conf);
                                        }
                                    }
                                    SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, logfile,
                                            getKeyClass(conf), KeyValue.class, getCompressionType(conf));
                                    wap = new WriterAndPath(logfile, w);
                                    logWriters.put(key, wap);
                                    if (LOG.isDebugEnabled()) {
                                        LOG.debug("Creating new hlog file writer for path " + logfile
                                                + " and region " + Bytes.toStringBinary(key));
                                    }

                                    if (old != null) {
                                        // Copy from existing log file
                                        HLogKey oldkey = newKey(conf);
                                        KeyValue oldval = new KeyValue();
                                        for (; old.next(oldkey, oldval); count++) {
                                            if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
                                                LOG.debug("Copied " + count + " edits");
                                            }
                                            w.append(oldkey, oldval);
                                        }
                                        old.close();
                                        fs.delete(oldlogfile, true);
                                    }
                                }
                                wap.w.append(logEntry.getKey(), logEntry.getEdit());
                                count++;
                            }
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Applied " + count + " total edits to " + Bytes.toStringBinary(key)
                                        + " in " + (System.currentTimeMillis() - threadTime) + "ms");
                            }
                        } catch (IOException e) {
                            e = RemoteExceptionHandler.checkIOException(e);
                            LOG.warn("Got while writing region " + Bytes.toStringBinary(key) + " log " + e);
                            e.printStackTrace();
                        }
                    }
                };
                threadPool.execute(thread);
            }
            threadPool.shutdown();
            // Wait for all threads to terminate
            try {
                for (int i = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); i++) {
                    LOG.debug("Waiting for hlog writers to terminate, iteration #" + i);
                }
            } catch (InterruptedException ex) {
                LOG.warn("Hlog writers were interrupted, possible data loss!");
            }
        }
    } finally {
        splits = new ArrayList<Path>(logWriters.size());
        for (WriterAndPath wap : logWriters.values()) {
            wap.w.close();
            LOG.debug("Closed " + wap.p);
            splits.add(wap.p);
        }
    }
    return splits;
}

From source file:org.apache.reef.io.network.NetworkServiceTest.java

/**
 * NetworkService messaging rate benchmark.
 *///from w w  w  . j  a va 2 s . com
@Test
public void testMessagingNetworkServiceRateDisjoint() throws Exception {

    Assume.assumeFalse("Use log level INFO to run benchmarking", LOG.isLoggable(Level.FINEST));

    LOG.log(Level.FINEST, name.getMethodName());

    final IdentifierFactory factory = new StringIdentifierFactory();

    final Injector injector = Tang.Factory.getTang().newInjector();
    injector.bindVolatileParameter(NameServerParameters.NameServerIdentifierFactory.class, factory);
    injector.bindVolatileInstance(LocalAddressProvider.class, this.localAddressProvider);

    try (final NameServer server = injector.getInstance(NameServer.class)) {
        final int nameServerPort = server.getPort();

        final BlockingQueue<Object> barrier = new LinkedBlockingQueue<>();

        final int numThreads = 4;
        final int size = 2000;
        final int numMessages = 300000 / (Math.max(1, size / 512));
        final int totalNumMessages = numMessages * numThreads;

        final ExecutorService e = Executors.newCachedThreadPool();
        for (int t = 0; t < numThreads; t++) {
            final int tt = t;

            e.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        final Monitor monitor = new Monitor();

                        // network service
                        final String name2 = "task2-" + tt;
                        final String name1 = "task1-" + tt;
                        final Configuration nameResolverConf = Tang.Factory.getTang()
                                .newConfigurationBuilder(NameResolverConfiguration.CONF
                                        .set(NameResolverConfiguration.NAME_SERVER_HOSTNAME, localAddress)
                                        .set(NameResolverConfiguration.NAME_SERVICE_PORT, nameServerPort)
                                        .build())
                                .build();

                        final Injector injector = Tang.Factory.getTang().newInjector(nameResolverConf);

                        LOG.log(Level.FINEST, "=== Test network service receiver start");
                        LOG.log(Level.FINEST, "=== Test network service sender start");
                        try (final NameResolver nameResolver = injector.getInstance(NameResolver.class)) {
                            injector.bindVolatileParameter(
                                    NetworkServiceParameters.NetworkServiceIdentifierFactory.class, factory);
                            injector.bindVolatileInstance(NameResolver.class, nameResolver);
                            injector.bindVolatileParameter(NetworkServiceParameters.NetworkServiceCodec.class,
                                    new StringCodec());
                            injector.bindVolatileParameter(
                                    NetworkServiceParameters.NetworkServiceTransportFactory.class,
                                    injector.getInstance(MessagingTransportFactory.class));
                            injector.bindVolatileParameter(
                                    NetworkServiceParameters.NetworkServiceExceptionHandler.class,
                                    new ExceptionHandler());

                            final Injector injectorNs2 = injector.forkInjector();
                            injectorNs2.bindVolatileParameter(
                                    NetworkServiceParameters.NetworkServiceHandler.class,
                                    new MessageHandler<String>(name2, monitor, numMessages));
                            final NetworkService<String> ns2 = injectorNs2.getInstance(NetworkService.class);

                            final Injector injectorNs1 = injector.forkInjector();
                            injectorNs1.bindVolatileParameter(
                                    NetworkServiceParameters.NetworkServiceHandler.class,
                                    new MessageHandler<String>(name1, null, 0));
                            final NetworkService<String> ns1 = injectorNs1.getInstance(NetworkService.class);

                            ns2.registerId(factory.getNewInstance(name2));
                            final int port2 = ns2.getTransport().getListeningPort();
                            server.register(factory.getNewInstance(name2),
                                    new InetSocketAddress(localAddress, port2));

                            ns1.registerId(factory.getNewInstance(name1));
                            final int port1 = ns1.getTransport().getListeningPort();
                            server.register(factory.getNewInstance(name1),
                                    new InetSocketAddress(localAddress, port1));

                            final Identifier destId = factory.getNewInstance(name2);
                            final String message = StringUtils.repeat('1', size);

                            try (Connection<String> conn = ns1.newConnection(destId)) {
                                conn.open();
                                for (int i = 0; i < numMessages; i++) {
                                    conn.write(message);
                                }
                                monitor.mwait();
                            } catch (final NetworkException e) {
                                e.printStackTrace();
                                throw new RuntimeException(e);
                            }
                        }
                    } catch (final Exception e) {
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            });
        }

        // start and time
        final long start = System.currentTimeMillis();
        final Object ignore = new Object();
        for (int i = 0; i < numThreads; i++) {
            barrier.add(ignore);
        }
        e.shutdown();
        e.awaitTermination(100, TimeUnit.SECONDS);
        final long end = System.currentTimeMillis();

        final double runtime = ((double) end - start) / 1000;
        LOG.log(Level.FINEST, "size: " + size + "; messages/s: " + totalNumMessages / runtime
                + " bandwidth(bytes/s): " + ((double) totalNumMessages * 2 * size) / runtime); // x2 for unicode chars
    }
}

From source file:org.yccheok.jstock.gui.MainFrame.java

private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
    isFormWindowClosedCalled = true;//from   ww  w. ja v  a 2s .com

    try {
        ExecutorService _stockInfoDatabaseMetaPool = this.stockInfoDatabaseMetaPool;
        this.stockInfoDatabaseMetaPool = null;

        _stockInfoDatabaseMetaPool.shutdownNow();

        // Always be the first statement. As no matter what happen, we must
        // save all the configuration files.
        this.save();

        if (this.needToSaveUserDefinedDatabase) {
            // We are having updated user database in memory.
            // Save it to disk.
            this.saveUserDefinedDatabaseAsCSV(jStockOptions.getCountry(), stockInfoDatabase);
        }

        // Hide the icon immediately.
        TrayIcon _trayIcon = trayIcon;
        if (_trayIcon != null) {
            SystemTray.getSystemTray().remove(_trayIcon);
            trayIcon = null;
        }

        dettachAllAndStopAutoCompleteJComboBox();
        this.indicatorPanel.dettachAllAndStopAutoCompleteJComboBox();

        log.info("latestNewsTask stop...");

        if (this.latestNewsTask != null) {
            this.latestNewsTask.cancel(true);
        }

        _stockInfoDatabaseMetaPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);

        // We suppose to call shutdownAll to clean up all network resources.
        // However, that will cause Exception in other threads if they are still using httpclient.
        // Exception in thread "Thread-4" java.lang.IllegalStateException: Connection factory has been shutdown.
        //
        // MultiThreadedHttpConnectionManager.shutdownAll();

        log.info("Widnow is closed.");
    } catch (Exception exp) {
        log.error("Unexpected error while trying to quit application", exp);
    }

    // All the above operations are done within try block, to ensure
    // System.exit(0) will always be called.
    //
    // Final clean up.
    System.exit(0);
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

public void shutDownAll() {
    if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
        try {//from  ww w  .  java 2  s  .c  o  m
            CacheObserverHolder.getInstance().beforeShutdownAll();
        } finally {
            LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        }
    }
    if (!this.isShutDownAll.compareAndSet(false, true)) {
        // it's already doing shutdown by another thread
        try {
            this.shutDownAllFinished.await();
        } catch (InterruptedException e) {
            logger.debug("Shutdown all interrupted while waiting for another thread to do the shutDownAll");
            Thread.currentThread().interrupt();
        }
        return;
    }
    synchronized (GemFireCacheImpl.class) {
        try {
            boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");

            if (testIGE) {
                InternalGemFireError assErr = new InternalGemFireError(
                        LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
                throw assErr;
            }

            // bug 44031 requires multithread shutdownall should be grouped
            // by root region. However, shutDownAllDuringRecovery.conf test revealed that
            // we have to close colocated child regions first.
            // Now check all the PR, if anyone has colocate-with attribute, sort all the
            // PRs by colocation relationship and close them sequentially, otherwise still
            // group them by root region.
            TreeMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
            if (prTrees.size() > 1 && shutdownAllPoolSize != 1) {
                ExecutorService es = getShutdownAllExecutorService(prTrees.size());
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    es.execute(new Runnable() {
                        public void run() {
                            ConnectionTable.threadWantsSharedResources();
                            shutdownSubTreeGracefully(prSubMap);
                        }
                    });
                } // for each root
                es.shutdown();
                try {
                    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    logger.debug("Shutdown all interrupted while waiting for PRs to be shutdown gracefully.");
                }

            } else {
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    shutdownSubTreeGracefully(prSubMap);
                }
            }

            close("Shut down all members", null, false, true);
        } finally {
            this.shutDownAllFinished.countDown();
        }
    }
}

From source file:org.yccheok.jstock.gui.JStock.java

private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
    isFormWindowClosedCalled = true;/*from  www . j  a  v  a  2s. com*/

    try {
        ExecutorService _stockInfoDatabaseMetaPool = this.stockInfoDatabaseMetaPool;
        this.stockInfoDatabaseMetaPool = null;

        _stockInfoDatabaseMetaPool.shutdownNow();

        // Always be the first statement. As no matter what happen, we must
        // save all the configuration files.
        this.save();

        if (this.needToSaveUserDefinedDatabase) {
            // We are having updated user database in memory.
            // Save it to disk.
            this.saveUserDefinedDatabaseAsCSV(jStockOptions.getCountry(), stockInfoDatabase);
        }

        // Hide the icon immediately.
        TrayIcon _trayIcon = trayIcon;
        if (_trayIcon != null) {
            SystemTray.getSystemTray().remove(_trayIcon);
            trayIcon = null;
        }

        dettachAllAndStopAutoCompleteJComboBox();
        this.indicatorPanel.dettachAllAndStopAutoCompleteJComboBox();

        log.info("latestNewsTask stop...");

        if (this.latestNewsTask != null) {
            this.latestNewsTask.cancel(true);
        }

        _stockInfoDatabaseMetaPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);

        // We suppose to call shutdownAll to clean up all network resources.
        // However, that will cause Exception in other threads if they are still using httpclient.
        // Exception in thread "Thread-4" java.lang.IllegalStateException: Connection factory has been shutdown.
        //
        // MultiThreadedHttpConnectionManager.shutdownAll();

        log.info("Widnow is closed.");
    } catch (Exception exp) {
        log.error("Unexpected error while trying to quit application", exp);
    }

    Platform.exit();

    // All the above operations are done within try block, to ensure
    // System.exit(0) will always be called.
    //
    // Final clean up.
    System.exit(0);
}

From source file:MSUmpire.DIA.DIAPack.java

public void TargetedExtractionQuant(boolean export, FragmentLibManager libManager, float ReSearchProb,
        float RTWindow) throws IOException, SQLException, XmlPullParserException {
    if (IDsummary.GetMappedPepIonList().isEmpty()) {
        Logger.getRootLogger().error("There is no peptide ion for targeted re-extraction.");
        return;//from   w  w  w .j  a v  a  2 s.c  o  m
    }
    parameter.RT_window_Targeted = RTWindow;
    GenerateClusterScanNomapping();
    ExecutorService executorPool = null;

    //Targeted re-extraction scoring
    TScoring = new TargetMatchScoring(Filename, libManager.LibID);

    if (parameter.UseOldVersion) {
        TScoring.SetUseOldVersion();
    }
    Logger.getRootLogger().info("No. of identified peptide ions: " + IDsummary.GetPepIonList().size());
    Logger.getRootLogger().info("No. of mapped peptide ions: " + IDsummary.GetMappedPepIonList().size());
    ArrayList<PepIonID> SearchList = new ArrayList<>();
    //For each peptide ions in targeted re-extraction, determine whether to research the peptide ion given a re-search probability threshold
    for (PepIonID pepIonID : IDsummary.GetMappedPepIonList().values()) {
        if (libManager.PeptideFragmentLib.containsKey(pepIonID.GetKey())
                && libManager.GetFragmentLib(pepIonID.GetKey()).FragmentGroups.size() >= 3
                && pepIonID.TargetedProbability() < ReSearchProb) {
            pepIonID.CreateQuantInstance(parameter.MaxNoPeakCluster);
            pepIonID.MS1PeakClusters = new ArrayList<>();
            pepIonID.MS2UnfragPeakClusters = new ArrayList<>();
            pepIonID.UScoreProbability_MS1 = 0f;
            pepIonID.MS1AlignmentProbability = 0f;
            pepIonID.UScoreProbability_MS2 = 0f;
            pepIonID.MS2AlignmentProbability = 0f;
            pepIonID.TPPModSeq = "Ext";
            SearchList.add(pepIonID);
        }
    }
    Logger.getRootLogger().info("No. of searchable peptide ions: " + SearchList.size());

    for (LCMSPeakDIAMS2 DIAWindow : DIAWindows) {
        Logger.getRootLogger().info("Assigning clusters for peak groups in MS2 isolation window:"
                + FilenameUtils.getBaseName(DIAWindow.ScanCollectionName));

        if (!DIAWindow.ReadPeakCluster() || !DIAWindow.ReadPrecursorFragmentClu2Cur()) {
            Logger.getRootLogger().warn("Reading results for " + DIAWindow.ScanCollectionName + " failed");
            continue;
        }

        executorPool = Executors.newFixedThreadPool(NoCPUs);
        //For each target peptide  ion
        for (PepIonID pepIonID : SearchList) {
            if (DIAWindow.DIA_MZ_Range.getX() <= pepIonID.NeutralPrecursorMz()
                    && DIAWindow.DIA_MZ_Range.getY() >= pepIonID.NeutralPrecursorMz()) {
                //If the spectrum of peptide ion in the spectral library has more than three fragment peaks
                if (libManager.GetFragmentLib(pepIonID.GetKey()).FragmentGroups.size() >= 3) {
                    //U-score spectral matching
                    UmpireSpecLibMatch matchunit = new UmpireSpecLibMatch(MS1FeatureMap, DIAWindow, pepIonID,
                            libManager.GetFragmentLib(pepIonID.GetKey()),
                            libManager.GetDecoyFragmentLib(pepIonID.GetKey()), parameter);
                    executorPool.execute(matchunit);
                    TScoring.libTargetMatches.add(matchunit);
                } else {
                    Logger.getRootLogger()
                            .warn("skipping " + pepIonID.GetKey() + ", it has only "
                                    + libManager.GetFragmentLib(pepIonID.GetKey()).FragmentGroups.size()
                                    + " matched fragments");
                }
            }
        }

        //For each identified peptide ion, calculate their U-score for LDA training
        for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
            if (libManager.PeptideFragmentLib.containsKey(pepIonID.GetKey())
                    && DIAWindow.DIA_MZ_Range.getX() <= pepIonID.NeutralPrecursorMz()
                    && DIAWindow.DIA_MZ_Range.getY() >= pepIonID.NeutralPrecursorMz()) {
                //If the spectrum of peptide ion in the spectral library has more than three fragment peaks
                if (libManager.GetFragmentLib(pepIonID.GetKey()).FragmentGroups.size() >= 3) {
                    //U-score spectral matching
                    UmpireSpecLibMatch matchunit = new UmpireSpecLibMatch(MS1FeatureMap, DIAWindow, pepIonID,
                            libManager.GetFragmentLib(pepIonID.GetKey()),
                            libManager.GetDecoyFragmentLib(pepIonID.GetKey()), parameter);
                    matchunit.IdentifiedPeptideIon = true;
                    executorPool.execute(matchunit);
                    TScoring.libIDMatches.add(matchunit);
                } else {
                    Logger.getRootLogger()
                            .warn("skipping " + pepIonID.GetKey() + ", it has only "
                                    + libManager.GetFragmentLib(pepIonID.GetKey()).FragmentGroups.size()
                                    + " matched fragments");
                }
            }
        }
        executorPool.shutdown();

        try {
            executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            Logger.getRootLogger().info("interrupted..");
        }
        DIAWindow.ClearAllPeaks();
    }

    Logger.getRootLogger().info("Removing entries with no precursor signal hits: total target entries: "
            + TScoring.libTargetMatches.size());
    ArrayList<UmpireSpecLibMatch> newlist = new ArrayList<>();
    for (UmpireSpecLibMatch match : TScoring.libTargetMatches) {
        if (!match.DecoyHits.isEmpty() || !match.TargetHits.isEmpty()) {
            newlist.add(match);
        }
    }
    TScoring.libTargetMatches = newlist;
    Logger.getRootLogger().info("Remaining entries: " + TScoring.libTargetMatches.size());

    //U-score and probablilty calculatation  
    TScoring.Process();
    TargetHitPepXMLWriter pepxml = new TargetHitPepXMLWriter(GetiProphExtPepxml(libManager.LibID),
            IDsummary.FastaPath, IDsummary.DecoyTag, TScoring);
    TScoring = null;
    executorPool = Executors.newFixedThreadPool(NoCPUs);

    //Assign precursor peak cluster, extract fragments and do quantification
    for (PepIonID pepIonID : IDsummary.GetMappedPepIonList().values()) {
        DIAAssignQuantUnit quantunit = new DIAAssignQuantUnit(pepIonID, MS1FeatureMap, parameter);
        executorPool.execute(quantunit);
    }
    for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
        DIAAssignQuantUnit quantunit = new DIAAssignQuantUnit(pepIonID, MS1FeatureMap, parameter);
        executorPool.execute(quantunit);
    }
    executorPool.shutdown();

    try {
        executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    if (export) {
        ExportID();
    }
}