Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.yccheok.jstock.gui.JStock.java

private void formWindowClosed(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowClosed
    isFormWindowClosedCalled = true;//from   w  w  w.j av  a  2s .c o m

    try {
        ExecutorService _stockInfoDatabaseMetaPool = this.stockInfoDatabaseMetaPool;
        this.stockInfoDatabaseMetaPool = null;

        _stockInfoDatabaseMetaPool.shutdownNow();

        // Always be the first statement. As no matter what happen, we must
        // save all the configuration files.
        this.save();

        if (this.needToSaveUserDefinedDatabase) {
            // We are having updated user database in memory.
            // Save it to disk.
            this.saveUserDefinedDatabaseAsCSV(jStockOptions.getCountry(), stockInfoDatabase);
        }

        // Hide the icon immediately.
        TrayIcon _trayIcon = trayIcon;
        if (_trayIcon != null) {
            SystemTray.getSystemTray().remove(_trayIcon);
            trayIcon = null;
        }

        dettachAllAndStopAutoCompleteJComboBox();
        this.indicatorPanel.dettachAllAndStopAutoCompleteJComboBox();

        log.info("latestNewsTask stop...");

        if (this.latestNewsTask != null) {
            this.latestNewsTask.cancel(true);
        }

        _stockInfoDatabaseMetaPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);

        // We suppose to call shutdownAll to clean up all network resources.
        // However, that will cause Exception in other threads if they are still using httpclient.
        // Exception in thread "Thread-4" java.lang.IllegalStateException: Connection factory has been shutdown.
        //
        // MultiThreadedHttpConnectionManager.shutdownAll();

        log.info("Widnow is closed.");
    } catch (Exception exp) {
        log.error("Unexpected error while trying to quit application", exp);
    }

    Platform.exit();

    // All the above operations are done within try block, to ensure
    // System.exit(0) will always be called.
    //
    // Final clean up.
    System.exit(0);
}

From source file:gdsc.smlm.ij.plugins.CreateData.java

private void showSummary(List<? extends FluorophoreSequenceModel> fluorophores,
        List<LocalisationModel> localisations) {
    IJ.showStatus("Calculating statistics ...");

    createSummaryTable();//from w w  w  . j a  va 2  s.c  o  m

    Statistics[] stats = new Statistics[NAMES.length];
    for (int i = 0; i < stats.length; i++) {
        stats[i] = (settings.showHistograms || alwaysRemoveOutliers[i]) ? new StoredDataStatistics()
                : new Statistics();
    }

    // Use the localisations that were drawn to create the sampled on/off times
    rebuildNeighbours(localisations);

    // Assume that there is at least one localisation
    LocalisationModel first = localisations.get(0);
    int currentId = first.getId(); // The current localisation
    int lastT = first.getTime(); // The last time this localisation was on
    int blinks = 0; // Number of blinks
    int currentT = 0; // On-time of current pulse
    double signal = 0;
    final double centreOffset = settings.size * 0.5;
    // Used to convert the sampled times in frames into seconds
    final double framesPerSecond = 1000.0 / settings.exposureTime;
    for (LocalisationModel l : localisations) {
        if (l.getData() == null)
            System.out.println("oops");
        final double noise = (l.getData() != null) ? l.getData()[1] : 1;
        final double intensity = (l.getData() != null) ? l.getData()[4] : l.getIntensity();
        final double intensityInPhotons = intensity / settings.getTotalGain();
        final double snr = intensity / noise;
        stats[SIGNAL].add(intensityInPhotons);
        stats[NOISE].add(noise / settings.getTotalGain());
        stats[SNR].add(snr);
        // Average intensity only from continuous spots.
        // The continuous flag is for spots that have all the simulation steps continuously on.
        // Try using the neighbour pointers instead to get the 'sampled' continuous spots.
        //if (l.isContinuous())
        if (l.getNext() != null && l.getPrevious() != null) {
            stats[SIGNAL_CONTINUOUS].add(intensityInPhotons);
            stats[SNR_CONTINUOUS].add(snr);
        }

        int id = l.getId();
        // Check if this a new fluorophore
        if (currentId != id) {
            // Add previous fluorophore
            stats[SAMPLED_BLINKS].add(blinks);
            stats[SAMPLED_T_ON].add(currentT / framesPerSecond);
            stats[TOTAL_SIGNAL].add(signal);

            // Reset
            blinks = 0;
            currentT = 1;
            currentId = id;
            signal = intensityInPhotons;
        } else {
            signal += intensityInPhotons;
            // Check if the current fluorophore pulse is broken (i.e. a blink)
            if (l.getTime() - 1 > lastT) {
                blinks++;
                stats[SAMPLED_T_ON].add(currentT / framesPerSecond);
                currentT = 1;
                stats[SAMPLED_T_OFF].add(((l.getTime() - 1) - lastT) / framesPerSecond);
            } else {
                // Continuous on-time
                currentT++;
            }
        }

        lastT = l.getTime();

        stats[X].add((l.getX() - centreOffset) * settings.pixelPitch);
        stats[Y].add((l.getY() - centreOffset) * settings.pixelPitch);
        stats[Z].add(l.getZ() * settings.pixelPitch);
    }
    // Final fluorophore
    stats[SAMPLED_BLINKS].add(blinks);
    stats[SAMPLED_T_ON].add(currentT / framesPerSecond);
    stats[TOTAL_SIGNAL].add(signal);

    if (fluorophores != null) {
        for (FluorophoreSequenceModel f : fluorophores) {
            stats[BLINKS].add(f.getNumberOfBlinks());
            // On-time
            for (double t : f.getOnTimes())
                stats[T_ON].add(t);
            // Off-time
            for (double t : f.getOffTimes())
                stats[T_OFF].add(t);
        }
    } else {
        // show no blinks
        stats[BLINKS].add(0);
        stats[T_ON].add(1);
        //stats[T_OFF].add(0);
    }

    if (results != null) {
        final double gain = settings.getTotalGain();
        final boolean emCCD = (settings.getEmGain() > 1);
        for (PeakResult r : results.getResults()) {
            stats[PRECISION].add(r.getPrecision(settings.pixelPitch, gain, emCCD));
            stats[WIDTH].add(r.getSD());
        }
        // Compute density per frame. Multithread for speed
        if (settings.densityRadius > 0) {
            IJ.showStatus("Calculating density ...");
            ExecutorService threadPool = Executors.newFixedThreadPool(Prefs.getThreads());
            List<Future<?>> futures = new LinkedList<Future<?>>();
            final ArrayList<float[]> coords = new ArrayList<float[]>();
            int t = results.getResults().get(0).peak;
            final Statistics densityStats = stats[DENSITY];
            final float radius = (float) (settings.densityRadius * getHWHM());
            final Rectangle bounds = results.getBounds();
            currentIndex = 0;
            finalIndex = results.getResults().get(results.getResults().size() - 1).peak;
            // Store the density for each result.
            int[] allDensity = new int[results.size()];
            int allIndex = 0;
            for (PeakResult r : results.getResults()) {
                if (t != r.peak) {
                    allIndex += runDensityCalculation(threadPool, futures, coords, densityStats, radius, bounds,
                            allDensity, allIndex);
                }
                coords.add(new float[] { r.getXPosition(), r.getYPosition() });
                t = r.peak;
            }
            runDensityCalculation(threadPool, futures, coords, densityStats, radius, bounds, allDensity,
                    allIndex);
            Utils.waitForCompletion(futures);
            threadPool.shutdownNow();
            threadPool = null;
            IJ.showProgress(1);

            // Split results into singles (density = 0) and clustered (density > 0)
            MemoryPeakResults singles = copyMemoryPeakResults("Singles");
            MemoryPeakResults clustered = copyMemoryPeakResults("Clustered");

            int i = 0;
            for (PeakResult r : results.getResults()) {
                // Store density in the original value field
                r.origValue = allDensity[i];
                if (allDensity[i++] == 0)
                    singles.add(r);
                else
                    clustered.add(r);
            }
        }
    }

    StringBuilder sb = new StringBuilder();
    sb.append(datasetNumber).append("\t");
    sb.append((fluorophores == null) ? localisations.size() : fluorophores.size()).append("\t");
    sb.append(stats[SAMPLED_BLINKS].getN() + (int) stats[SAMPLED_BLINKS].getSum()).append("\t");
    sb.append(localisations.size()).append("\t");
    sb.append(Utils.rounded(getHWHM(), 4)).append("\t");
    double s = getPsfSD();
    sb.append(Utils.rounded(s, 4)).append("\t");
    s *= settings.pixelPitch;
    final double sa = PSFCalculator.squarePixelAdjustment(s, settings.pixelPitch) / settings.pixelPitch;
    sb.append(Utils.rounded(sa, 4)).append("\t");
    int nStats = (imagePSF) ? stats.length - 2 : stats.length;
    for (int i = 0; i < nStats; i++) {
        double centre = (alwaysRemoveOutliers[i])
                ? ((StoredDataStatistics) stats[i]).getStatistics().getPercentile(50)
                : stats[i].getMean();
        sb.append(Utils.rounded(centre, 4)).append("\t");
    }
    if (java.awt.GraphicsEnvironment.isHeadless()) {
        IJ.log(sb.toString());
        return;
    } else {
        summaryTable.append(sb.toString());
    }

    // Show histograms
    if (settings.showHistograms) {
        IJ.showStatus("Calculating histograms ...");
        boolean[] chosenHistograms = getChoosenHistograms();

        int[] idList = new int[NAMES.length];
        int count = 0;

        boolean requireRetile = false;
        for (int i = 0; i < NAMES.length; i++) {
            if (chosenHistograms[i]) {
                idList[count++] = Utils.showHistogram(TITLE, (StoredDataStatistics) stats[i], NAMES[i],
                        (integerDisplay[i]) ? 1 : 0,
                        (settings.removeOutliers || alwaysRemoveOutliers[i]) ? 2 : 0,
                        settings.histogramBins * ((integerDisplay[i]) ? 100 : 1));
                requireRetile = requireRetile || Utils.isNewWindow();
            }
        }

        if (count > 0 && requireRetile) {
            idList = Arrays.copyOf(idList, count);
            new WindowOrganiser().tileWindows(idList);
        }
    }
    IJ.showStatus("");
}

From source file:eu.eexcess.ddb.recommender.PartnerConnector.java

@Override
public Document queryPartner(PartnerConfiguration partnerConfiguration, SecureUserProfile userProfile,
        PartnerdataLogger logger) throws IOException {

    //          final String url = "https://api.deutsche-digitale-bibliothek.de/items/OAXO2AGT7YH35YYHN3YKBXJMEI77W3FF/view";
    final String key = PartnerConfigurationEnum.CONFIG.getPartnerConfiguration().apiKey;

    // get XML data via HTTP request header authentication
    /*//from   ww w.  ja v a2s .  c om
    // get JSON data via HTTP request header authentication
    String httpJsonResult = httpGet(url, new HashMap<String, String>() {
       {
           put("Authorization", "OAuth oauth_consumer_key=\"" + key + "\"");
           put("Accept", "application/json");
       }
    });
    logger.info(httpJsonResult); // print results
            
    // get JSON data via query parameter authentication
    // remember: use URL encoded Strings online -> URLEncoder.encode(s, enc)
    String queryJsonURL = url + "?oauth_consumer_key=" + URLEncoder.encode(key, "UTF-8");
    String queryJsonResult = httpGet(queryJsonURL, null);
    logger.info(queryJsonResult); // print results
    */

    //
    //
    // EUROPEANA Impl
    //
    //

    // Configure
    ExecutorService threadPool = Executors.newFixedThreadPool(10);

    //        ClientConfig config = new DefaultClientConfig();
    //        config.getClasses().add(JacksonJsonProvider.class);
    //        
    //final Client client = new Client(PartnerConfigurationEnum.CONFIG.getClientJacksonJson());
    try {
        queryGenerator = (QueryGeneratorApi) Class.forName(partnerConfiguration.queryGeneratorClass)
                .newInstance();
    } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        // TODO add logger!
        log.log(Level.INFO, "Error getting Query Generator", e);

    }

    String query = getQueryGenerator().toQuery(userProfile);
    long start = System.currentTimeMillis();

    Map<String, String> valuesMap = new HashMap<String, String>();
    valuesMap.put("query", URLParamEncoder.encode(query));
    int numResultsRequest = 10;
    if (userProfile.numResults != null && userProfile.numResults != 0)
        numResultsRequest = userProfile.numResults;
    valuesMap.put("numResults", numResultsRequest + "");
    String searchRequest = StrSubstitutor.replace(partnerConfiguration.searchEndpoint, valuesMap);
    String httpJSONResult = httpGet(searchRequest, new HashMap<String, String>() {
        /**
        * 
        */
        private static final long serialVersionUID = -5911519512191023737L;

        {
            put("Authorization", "OAuth oauth_consumer_key=\"" + key + "\"");
            //                put("Accept", "application/xml");
            put("Accept", "application/json");

        }
    });
    log.info(httpJSONResult); // print results
    //ObjectMapper mapper = new ObjectMapper();
    //DDBResponse ddbResponse = mapper.readValue(httpJSONResult, DDBResponse.class);

    /*
            JAXBContext jaxbContext = JAXBContext.newInstance(DDBDocument.class);
            Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
            ZBWDocument zbwResponse = (DDBDocument) jaxbUnmarshaller.unmarshal(respStringReader);
            for (ZBWDocumentHit hit : zbwResponse.hits.hit) {
               try{
    */
    /*
    WebResource service = client.resource(searchRequest);
    ObjectMapper mapper = new ObjectMapper();
    Builder builder = service.accept(MediaType.APPLICATION_JSON);
    EuropeanaResponse response= builder.get(EuropeanaResponse.class);
    if (response.items.size() > numResultsRequest)
       response.items = response.items.subList(0, numResultsRequest);
    PartnerdataTracer.dumpFile(this.getClass(), partnerConfiguration, response.toString(), "service-response", PartnerdataTracer.FILETYPE.JSON);
    client.destroy();     
    if (makeDetailRequests) 
    {
       HashMap<EuropeanaDoc, Future<Void>> futures= new HashMap<EuropeanaDoc, Future<Void>>();
       final HashMap<EuropeanaDoc, EuropeanaDocDetail> docDetails= new HashMap<EuropeanaDoc,EuropeanaDocDetail>();
       final PartnerConfiguration partnerConfigLocal = partnerConfiguration;
       for (int i = 0;i<response.items.size()  ;i++) {
      final EuropeanaDoc item = response.items.get(i);
              
      Future<Void> future = threadPool.submit(new Callable<Void>() {
       @Override
       public Void call() throws Exception {
          EuropeanaDocDetail details = null;
          try {
             details = fetchDocumentDetails( item.id, partnerConfigLocal);
          } catch (EEXCESSDataTransformationException e) {
             logger.log(Level.INFO,"Error getting item with id"+item.id,e);
             return null;
          }      
          docDetails.put(item,details);
          return null;
       }
    });
    futures.put(item, future);
     }
            
     for (EuropeanaDoc doc : futures.keySet()) {
    try {
       futures.get(doc).get(start + 15 * 500 - System.currentTimeMillis(),
             TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException
          | TimeoutException e) {
       logger.log(Level.WARNING,"Detail thread for "+doc.id+" did not responses in time",e);
    }
            
            
    //item.edmConcept.addAll(details.concepts);
    //         item.edmConcept = details.concepts; TODO: copy into doc
    //         item.edmCountry = details.edmCountry;
    //         item.edmPlace = details.places;
     }
    }
    */
    long end = System.currentTimeMillis();

    long startXML = System.currentTimeMillis();

    Document newResponse = null;
    try {
        newResponse = this.transformJSON2XML(httpJSONResult);
    } catch (EEXCESSDataTransformationException e) {
        // TODO logger

        log.log(Level.INFO, "Error Transforming Json to xml", e);

    }
    long endXML = System.currentTimeMillis();
    System.out.println("millis " + (endXML - startXML) + "   " + (end - start));

    threadPool.shutdownNow();

    return newResponse;

}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testHotSpotReconnect() throws InterruptedException {
    int numClients = 100;
    ExecutorService executor = Executors.newFixedThreadPool(numClients,
            new NatsThreadFactory("testhotspotreconnect"));

    final BlockingQueue<String> rch = new LinkedBlockingQueue<String>();
    final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>();
    final AtomicBoolean shutdown = new AtomicBoolean(false);
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {
            try (NatsServer s3 = runServerOnPort(1226)) {

                final class NATSClient implements Runnable {
                    Connection nc = null;
                    final AtomicInteger numReconnects = new AtomicInteger(0);
                    final AtomicInteger numDisconnects = new AtomicInteger(0);
                    String currentUrl = null;
                    final AtomicInteger instance = new AtomicInteger(-1);

                    final Options opts;

                    NATSClient(int inst) {
                        this.instance.set(inst);
                        opts = defaultOptions();
                        opts.servers = Nats.processUrlArray(testServers);

                        opts.disconnectedCb = new DisconnectedCallback() {
                            public void onDisconnect(ConnectionEvent event) {
                                numDisconnects.incrementAndGet();
                                try {
                                    dch.put(instance.get());
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }// w ww  . j a va  2  s.c o m
                                nc.setDisconnectedCallback(null);
                            }
                        };
                        opts.reconnectedCb = new ReconnectedCallback() {
                            public void onReconnect(ConnectionEvent event) {
                                numReconnects.incrementAndGet();
                                currentUrl = nc.getConnectedUrl();
                                try {
                                    rch.put(currentUrl);
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }
                            }
                        };
                    }

                    @Override
                    public void run() {
                        try {
                            nc = opts.connect();
                            assertTrue(!nc.isClosed());
                            assertNotNull(nc.getConnectedUrl());
                            currentUrl = nc.getConnectedUrl();
                            // System.err.println("Instance " + instance + " connected to " +
                            // currentUrl);
                            while (!shutdown.get()) {
                                sleep(10);
                            }
                            nc.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                    }

                    public synchronized boolean isConnected() {
                        return (nc != null && !nc.isClosed());
                    }

                    public void shutdown() {
                        shutdown.set(true);
                    }
                }

                List<NATSClient> tasks = new ArrayList<NATSClient>(numClients);
                for (int i = 0; i < numClients; i++) {
                    NATSClient task = new NATSClient(i);
                    tasks.add(task);
                    executor.submit(task);
                }

                Map<String, Integer> cs = new HashMap<String, Integer>();

                int numReady = 0;
                while (numReady < numClients) {
                    numReady = 0;
                    for (NATSClient cli : tasks) {
                        if (cli.isConnected()) {
                            numReady++;
                        }
                    }
                    sleep(100);
                }

                s1.shutdown();
                sleep(1000);

                int disconnected = 0;
                // wait for disconnects
                while (dch.size() > 0 && disconnected < numClients) {
                    Integer instance = -1;
                    instance = dch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for disconnect signal", instance);
                    disconnected++;
                }
                assertTrue(disconnected > 0);

                int reconnected = 0;
                // wait for reconnects
                for (int i = 0; i < disconnected; i++) {
                    String url = null;
                    while (rch.size() == 0) {
                        sleep(50);
                    }
                    url = rch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for reconnect signal", url);
                    reconnected++;
                    Integer count = cs.get(url);
                    if (count != null) {
                        cs.put(url, ++count);
                    } else {
                        cs.put(url, 1);
                    }
                }

                for (NATSClient client : tasks) {
                    client.shutdown();
                }
                executor.shutdownNow();
                assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS));

                assertEquals(disconnected, reconnected);

                int numServers = 2;

                assertEquals(numServers, cs.size());

                int expected = numClients / numServers;
                // We expect a 40 percent variance
                int var = (int) ((float) expected * 0.40);

                int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4]));
                // System.err.printf("var = %d, delta = %d\n", var, delta);
                if (delta > var) {
                    String str = String.format("Connected clients to servers out of range: %d/%d", delta, var);
                    fail(str);
                }
            }
        }
    }
}

From source file:org.opencb.opencga.storage.hadoop.variant.HadoopVariantStorageEngine.java

@Override
public List<StoragePipelineResult> index(List<URI> inputFiles, URI outdirUri, boolean doExtract,
        boolean doTransform, boolean doLoad) throws StorageEngineException {

    if (inputFiles.size() == 1 || !doLoad) {
        return super.index(inputFiles, outdirUri, doExtract, doTransform, doLoad);
    }/*from w  ww . j ava  2 s  . co  m*/

    final boolean doArchive;
    final boolean doMerge;

    if (!getOptions().containsKey(HADOOP_LOAD_ARCHIVE) && !getOptions().containsKey(HADOOP_LOAD_VARIANT)) {
        doArchive = true;
        doMerge = true;
    } else {
        doArchive = getOptions().getBoolean(HADOOP_LOAD_ARCHIVE, false);
        doMerge = getOptions().getBoolean(HADOOP_LOAD_VARIANT, false);
    }

    if (!doArchive && !doMerge) {
        return Collections.emptyList();
    }

    final int nThreadArchive = getOptions().getInt(HADOOP_LOAD_ARCHIVE_BATCH_SIZE, 2);
    ObjectMap extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, true).append(HADOOP_LOAD_VARIANT,
            false);

    final List<StoragePipelineResult> concurrResult = new CopyOnWriteArrayList<>();
    List<VariantStoragePipeline> etlList = new ArrayList<>();
    ExecutorService executorService = Executors.newFixedThreadPool(nThreadArchive, r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        return t;
    }); // Set Daemon for quick shutdown !!!
    LinkedList<Future<StoragePipelineResult>> futures = new LinkedList<>();
    List<Integer> indexedFiles = new CopyOnWriteArrayList<>();
    for (URI inputFile : inputFiles) {
        //Provide a connected storageETL if load is required.

        VariantStoragePipeline storageETL = newStorageETL(doLoad, new ObjectMap(extraOptions));
        futures.add(executorService.submit(() -> {
            try {
                Thread.currentThread().setName(Paths.get(inputFile).getFileName().toString());
                StoragePipelineResult storagePipelineResult = new StoragePipelineResult(inputFile);
                URI nextUri = inputFile;
                boolean error = false;
                if (doTransform) {
                    try {
                        nextUri = transformFile(storageETL, storagePipelineResult, concurrResult, nextUri,
                                outdirUri);

                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }

                if (doLoad && doArchive && !error) {
                    try {
                        loadFile(storageETL, storagePipelineResult, concurrResult, nextUri, outdirUri);
                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }
                if (doLoad && !error) {
                    // Read the VariantSource to get the original fileName (it may be different from the
                    // nextUri.getFileName if this is the transformed file)
                    String fileName = storageETL.readVariantSource(nextUri, null).getFileName();
                    // Get latest study configuration from DB, might have been changed since
                    StudyConfiguration studyConfiguration = storageETL.getStudyConfiguration();
                    // Get file ID for the provided file name
                    Integer fileId = studyConfiguration.getFileIds().get(fileName);
                    indexedFiles.add(fileId);
                }
                return storagePipelineResult;
            } finally {
                try {
                    storageETL.close();
                } catch (StorageEngineException e) {
                    logger.error("Issue closing DB connection ", e);
                }
            }
        }));
    }

    executorService.shutdown();

    int errors = 0;
    try {
        while (!futures.isEmpty()) {
            executorService.awaitTermination(1, TimeUnit.MINUTES);
            // Check values
            if (futures.peek().isDone() || futures.peek().isCancelled()) {
                Future<StoragePipelineResult> first = futures.pop();
                StoragePipelineResult result = first.get(1, TimeUnit.MINUTES);
                if (result.getTransformError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getTransformError().printStackTrace();
                } else if (result.getLoadError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getLoadError().printStackTrace();
                }
                concurrResult.add(result);
            }
        }
        if (errors > 0) {
            throw new StoragePipelineException("Errors found", concurrResult);
        }

        if (doLoad && doMerge) {
            int batchMergeSize = getOptions().getInt(HADOOP_LOAD_VARIANT_BATCH_SIZE, 10);
            // Overwrite default ID list with user provided IDs
            List<Integer> pendingFiles = indexedFiles;
            if (getOptions().containsKey(HADOOP_LOAD_VARIANT_PENDING_FILES)) {
                List<Integer> idList = getOptions().getAsIntegerList(HADOOP_LOAD_VARIANT_PENDING_FILES);
                if (!idList.isEmpty()) {
                    // only if the list is not empty
                    pendingFiles = idList;
                }
            }

            List<Integer> filesToMerge = new ArrayList<>(batchMergeSize);
            int i = 0;
            for (Iterator<Integer> iterator = pendingFiles.iterator(); iterator.hasNext(); i++) {
                Integer indexedFile = iterator.next();
                filesToMerge.add(indexedFile);
                if (filesToMerge.size() == batchMergeSize || !iterator.hasNext()) {
                    extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, false)
                            .append(HADOOP_LOAD_VARIANT, true)
                            .append(HADOOP_LOAD_VARIANT_PENDING_FILES, filesToMerge);

                    AbstractHadoopVariantStoragePipeline localEtl = newStorageETL(doLoad, extraOptions);

                    int studyId = getOptions().getInt(Options.STUDY_ID.key());
                    localEtl.preLoad(inputFiles.get(i), outdirUri);
                    localEtl.merge(studyId, filesToMerge);
                    localEtl.postLoad(inputFiles.get(i), outdirUri);
                    filesToMerge.clear();
                }
            }

            annotateLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());
            calculateStatsForLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());

        }
    } catch (InterruptedException e) {
        Thread.interrupted();
        throw new StoragePipelineException("Interrupted!", e, concurrResult);
    } catch (ExecutionException e) {
        throw new StoragePipelineException("Execution exception!", e, concurrResult);
    } catch (TimeoutException e) {
        throw new StoragePipelineException("Timeout Exception", e, concurrResult);
    } finally {
        if (!executorService.isShutdown()) {
            try {
                executorService.shutdownNow();
            } catch (Exception e) {
                logger.error("Problems shutting executer service down", e);
            }
        }
    }
    return concurrResult;
}

From source file:org.ugent.caagt.genestacker.search.bb.BranchAndBound.java

@Override
public ParetoFrontier runSearch(long runtimeLimit, int numThreads) throws GenestackerException {

    // create list to store previously generated schemes
    previousSchemes = new ArrayList<>();
    // create set to store previously generated scheme alternatives
    previousSchemeAlternatives = new HashSet<>();
    // create queue for schemes to be considered
    schemeQueue = new LinkedList<>();

    // reset ids//from w  w  w. j a va 2s .  c o  m
    SeedLotNode.resetIDs();
    PlantNode.resetIDs();
    CrossingNode.resetIDs();
    CrossingSchemeAlternatives.resetIDs();

    // create thread pool and completion service for scheme extension

    // inform user about number of cross workers used (verbose)
    logger.info(VERBOSE, "Number of threads used for extending partial schemes: {}", numThreads);
    ExecutorService extPool = Executors.newFixedThreadPool(numThreads);
    CompletionService<List<CrossingSchemeAlternatives>> extCompletionService = new ExecutorCompletionService<>(
            extPool);

    // initialize solution manager
    BranchAndBoundSolutionManager solutionManager = new BranchAndBoundSolutionManager(dominatesRelation,
            ideotype, popSizeTools, maxNumSeedsPerCrossing, constraints, heuristics, seedLotFilters,
            homozygousIdeotypeParents);
    // set initial Pareto frontier, if any
    if (initialFrontier != null) {
        solutionManager.setFrontier(initialFrontier);
    }

    // apply initial plant filter, if any
    if (initialPlantFilter != null) {

        // verbose
        logger.info(VERBOSE, "Filtering initial plants ...");

        initialPlants = initialPlantFilter.filter(initialPlants);

        //verbose
        logger.info(VERBOSE, "Retained {} initial plants (see below)", initialPlants.size());
        for (Plant p : initialPlants) {
            logger.info(VERBOSE, "\n{}", p);
        }

    }

    // create initial partial schemes from initial plants
    List<CrossingSchemeAlternatives> initialParentSchemes = new ArrayList<>();
    for (Plant p : initialPlants) {
        // create uniform seed lot
        SeedLot sl = new SeedLot(p.getGenotype());
        // create seedlot node
        SeedLotNode sln = new SeedLotNode(sl, 0);
        // create and attach plant node
        PlantNode pn = new PlantNode(p, 0, sln);
        // create partial crossing scheme
        CrossingScheme s = new CrossingScheme(popSizeTools, pn);
        initialParentSchemes.add(new CrossingSchemeAlternatives(s));
    }
    registerNewSchemes(initialParentSchemes, solutionManager);

    // now iteratively cross schemes with previous schemes to create larger schemes,
    // until all solutions have been inspected or pruned
    while (!runtimeLimitExceeded() && !schemeQueue.isEmpty()) {

        // get next scheme from queue
        CrossingSchemeAlternatives cur = schemeQueue.poll();

        // fire progression message (verbose)
        logger.info(VERBOSE, "num solutions: {} ### prog: {} ({}) ### cur scheme: {} - T = {}",
                solutionManager.getFrontier().getNumSchemes(), previousSchemes.size(), schemeQueue.size(), cur,
                TimeFormatting.formatTime(System.currentTimeMillis() - getStart()));
        // debug: create diagram of current scheme (all alternatives)
        if (logger.isDebugEnabled()) {
            for (int i = 0; i < cur.nrOfAlternatives(); i++) {
                logger.debug("Cur scheme (alternative {}): {}", i + 1,
                        writeDiagram(cur.getAlternatives().get(i)));
            }
            // wait for enter
            DebugUtils.waitForEnter();
        }

        // delete possible pruned alternatives
        Iterator<CrossingScheme> it = cur.iterator();
        int numForCrossing = 0;
        int numForSelfing = 0;
        while (it.hasNext()) {
            CrossingScheme alt = it.next();
            // check if alternative should be removed
            if (previousSchemeAlternatives.contains(alt)) {
                // equivalent scheme alternative generated before, delete current alternative
                it.remove();
            } else if (solutionManager.pruneDequeueScheme(alt)) {
                // prune dequeued scheme (e.g. by the optimal subscheme heuristic)
                it.remove();
            } else {
                // check pruning for crossing/selfing
                boolean pruneCross = solutionManager.pruneCrossCurrentScheme(alt);
                boolean pruneSelf = solutionManager.pruneSelfCurrentScheme(alt);
                if (pruneCross && pruneSelf) {
                    // alternative not useful anymore
                    it.remove();
                } else {
                    // count nr of alternatives useful for crossing or selfing
                    if (!pruneCross) {
                        numForCrossing++;
                    }
                    if (!pruneSelf) {
                        numForSelfing++;
                    }
                }
            }
        }

        if (cur.nrOfAlternatives() > 0) {

            // if useful, self current scheme
            if (numForSelfing > 0) {
                registerNewSchemes(selfScheme(cur, map, solutionManager), solutionManager);
            }

            // if useful, cross with previous schemes
            if (numForCrossing > 0) {
                // launch workers to combine with previous schemes
                Iterator<CrossingSchemeAlternatives> previousSchemesIterator = previousSchemes.iterator();
                for (int w = 0; w < numThreads; w++) {
                    // submit worker
                    extCompletionService
                            .submit(new CrossWorker(previousSchemesIterator, cur, solutionManager, map));
                    // very verbose
                    logger.info(VERY_VERBOSE, "Launched cross worker {} of {}", w + 1, numThreads);
                }
                // handle results of completed workers in the order in which they complete
                for (int w = 0; w < numThreads; w++) {
                    try {
                        // wait for next worker to complete and register its solutions
                        registerNewSchemes(extCompletionService.take().get(), solutionManager);
                        // very verbose
                        logger.info(VERY_VERBOSE, "{}/{} cross workers finished", w + 1, numThreads);
                    } catch (InterruptedException | ExecutionException ex) {
                        // something went wrong with the cross workers
                        throw new SearchException("An error occured while extending the current scheme.", ex);
                    }
                }
            }

            // put the scheme in the sorted set with previously considered schemes (only done if useful for later crossings)
            previousSchemes.add(cur);
            // register scheme alternatives
            previousSchemeAlternatives.addAll(cur.getAlternatives());
        }
    }

    if (runtimeLimitExceeded()) {
        // info
        logger.info("Runtime limit exceeded");
    }

    // shutdown thread pool
    extPool.shutdownNow();

    return solutionManager.getFrontier();
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Ignore("Flakey: HBASE-8989")
@Test//from  w  ww. ja  va 2  s  .com
public void testClientPoolThreadLocal() throws IOException {
    final byte[] tableName = Bytes.toBytes("testClientPoolThreadLocal");

    int poolSize = Integer.MAX_VALUE;
    int numVersions = 3;
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local");
    conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);

    final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3);

    final long ts = EnvironmentEdgeManager.currentTimeMillis();
    final Get get = new Get(ROW);
    get.addColumn(FAMILY, QUALIFIER);
    get.setMaxVersions();

    for (int versions = 1; versions <= numVersions; versions++) {
        Put put = new Put(ROW);
        put.add(FAMILY, QUALIFIER, ts + versions, VALUE);
        table.put(put);

        Result result = table.get(get);
        NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

        assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions
                + "; " + put.toString() + ", " + get.toString(), versions, navigableMap.size());
        for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
            assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                    Bytes.equals(VALUE, entry.getValue()));
        }
    }

    final Object waitLock = new Object();
    ExecutorService executorService = Executors.newFixedThreadPool(numVersions);
    final AtomicReference<AssertionError> error = new AtomicReference<AssertionError>(null);
    for (int versions = numVersions; versions < numVersions * 2; versions++) {
        final int versionsCopy = versions;
        executorService.submit(new Callable<Void>() {
            @Override
            public Void call() {
                try {
                    Put put = new Put(ROW);
                    put.add(FAMILY, QUALIFIER, ts + versionsCopy, VALUE);
                    table.put(put);

                    Result result = table.get(get);
                    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

                    assertEquals(
                            "The number of versions of '" + Bytes.toString(FAMILY) + ":"
                                    + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy,
                            versionsCopy, navigableMap.size());
                    for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
                        assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                                Bytes.equals(VALUE, entry.getValue()));
                    }
                    synchronized (waitLock) {
                        waitLock.wait();
                    }
                } catch (Exception e) {
                } catch (AssertionError e) {
                    // the error happens in a thread, it won't fail the test,
                    // need to pass it to the caller for proper handling.
                    error.set(e);
                    LOG.error(e);
                }

                return null;
            }
        });
    }
    synchronized (waitLock) {
        waitLock.notifyAll();
    }
    executorService.shutdownNow();
    assertNull(error.get());
}

From source file:org.apache.flink.mesos.runtime.clusterframework.MesosApplicationMasterRunner.java

/**
 * The main work method, must run as a privileged action.
 *
 * @return The return code for the Java process.
 *//*from w  w w .  j a  va 2s  . com*/
protected int runPrivileged(Configuration config, Configuration dynamicProperties) {

    ActorSystem actorSystem = null;
    WebMonitor webMonitor = null;
    MesosArtifactServer artifactServer = null;
    ScheduledExecutorService futureExecutor = null;
    ExecutorService ioExecutor = null;
    MesosServices mesosServices = null;

    try {
        // ------- (1) load and parse / validate all configurations -------

        // Note that we use the "appMasterHostname" given by the system, to make sure
        // we use the hostnames consistently throughout akka.
        // for akka "localhost" and "localhost.localdomain" are different actors.
        final String appMasterHostname = InetAddress.getLocalHost().getHostName();

        // Mesos configuration
        final MesosConfiguration mesosConfig = createMesosConfig(config, appMasterHostname);

        // JM configuration
        int numberProcessors = Hardware.getNumberCPUCores();

        futureExecutor = Executors.newScheduledThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-future"));

        ioExecutor = Executors.newFixedThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-io"));

        mesosServices = MesosServicesUtils.createMesosServices(config);

        // TM configuration
        final MesosTaskManagerParameters taskManagerParameters = MesosTaskManagerParameters.create(config);

        LOG.info("TaskManagers will be created with {} task slots",
                taskManagerParameters.containeredParameters().numSlots());
        LOG.info(
                "TaskManagers will be started with container size {} MB, JVM heap size {} MB, "
                        + "JVM direct memory limit {} MB, {} cpus",
                taskManagerParameters.containeredParameters().taskManagerTotalMemoryMB(),
                taskManagerParameters.containeredParameters().taskManagerHeapSizeMB(),
                taskManagerParameters.containeredParameters().taskManagerDirectMemoryLimitMB(),
                taskManagerParameters.cpus());

        // JM endpoint, which should be explicitly configured based on acquired net resources
        final int listeningPort = config.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY,
                ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT);
        checkState(listeningPort >= 0 && listeningPort <= 65536, "Config parameter \""
                + ConfigConstants.JOB_MANAGER_IPC_PORT_KEY + "\" is invalid, it must be between 0 and 65536");

        // ----------------- (2) start the actor system -------------------

        // try to start the actor system, JobManager and JobManager actor system
        // using the configured address and ports
        actorSystem = BootstrapTools.startActorSystem(config, appMasterHostname, listeningPort, LOG);

        Address address = AkkaUtils.getAddress(actorSystem);
        final String akkaHostname = address.host().get();
        final int akkaPort = (Integer) address.port().get();

        LOG.info("Actor system bound to hostname {}.", akkaHostname);

        // try to start the artifact server
        LOG.debug("Starting Artifact Server");
        final int artifactServerPort = config.getInteger(ConfigConstants.MESOS_ARTIFACT_SERVER_PORT_KEY,
                ConfigConstants.DEFAULT_MESOS_ARTIFACT_SERVER_PORT);
        final String artifactServerPrefix = UUID.randomUUID().toString();
        artifactServer = new MesosArtifactServer(artifactServerPrefix, akkaHostname, artifactServerPort,
                config);

        // ----------------- (3) Generate the configuration for the TaskManagers -------------------

        // generate a container spec which conveys the artifacts/vars needed to launch a TM
        ContainerSpecification taskManagerContainerSpec = new ContainerSpecification();

        // propagate the AM dynamic configuration to the TM
        taskManagerContainerSpec.getDynamicConfiguration().addAll(dynamicProperties);

        // propagate newly-generated configuration elements
        final Configuration taskManagerConfig = BootstrapTools.generateTaskManagerConfiguration(
                new Configuration(), akkaHostname, akkaPort,
                taskManagerParameters.containeredParameters().numSlots(), TASKMANAGER_REGISTRATION_TIMEOUT);
        taskManagerContainerSpec.getDynamicConfiguration().addAll(taskManagerConfig);

        // apply the overlays
        applyOverlays(config, taskManagerContainerSpec);

        // configure the artifact server to serve the specified artifacts
        configureArtifactServer(artifactServer, taskManagerContainerSpec);

        // ----------------- (4) start the actors -------------------

        // 1) JobManager & Archive (in non-HA case, the leader service takes this)
        // 2) Web Monitor (we need its port to register)
        // 3) Resource Master for Mesos
        // 4) Process reapers for the JobManager and Resource Master

        // 1: the JobManager
        LOG.debug("Starting JobManager actor");

        // we start the JobManager with its standard name
        ActorRef jobManager = JobManager.startJobManagerActors(config, actorSystem, futureExecutor, ioExecutor,
                new scala.Some<>(JobManager.JOB_MANAGER_NAME()), scala.Option.<String>empty(),
                getJobManagerClass(), getArchivistClass())._1();

        // 2: the web monitor
        LOG.debug("Starting Web Frontend");

        webMonitor = BootstrapTools.startWebMonitorIfConfigured(config, actorSystem, jobManager, LOG);
        if (webMonitor != null) {
            final URL webMonitorURL = new URL("http", appMasterHostname, webMonitor.getServerPort(), "/");
            mesosConfig.frameworkInfo().setWebuiUrl(webMonitorURL.toExternalForm());
        }

        // 3: Flink's Mesos ResourceManager
        LOG.debug("Starting Mesos Flink Resource Manager");

        // create the worker store to persist task information across restarts
        MesosWorkerStore workerStore = mesosServices.createMesosWorkerStore(config, ioExecutor);

        // we need the leader retrieval service here to be informed of new
        // leader session IDs, even though there can be only one leader ever
        LeaderRetrievalService leaderRetriever = LeaderRetrievalUtils.createLeaderRetrievalService(config,
                jobManager);

        Props resourceMasterProps = MesosFlinkResourceManager.createActorProps(getResourceManagerClass(),
                config, mesosConfig, workerStore, leaderRetriever, taskManagerParameters,
                taskManagerContainerSpec, artifactServer, LOG);

        ActorRef resourceMaster = actorSystem.actorOf(resourceMasterProps, "Mesos_Resource_Master");

        // 4: Process reapers
        // The process reapers ensure that upon unexpected actor death, the process exits
        // and does not stay lingering around unresponsive

        LOG.debug("Starting process reapers for JobManager");

        actorSystem.actorOf(Props.create(ProcessReaper.class, resourceMaster, LOG, ACTOR_DIED_EXIT_CODE),
                "Mesos_Resource_Master_Process_Reaper");

        actorSystem.actorOf(Props.create(ProcessReaper.class, jobManager, LOG, ACTOR_DIED_EXIT_CODE),
                "JobManager_Process_Reaper");
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("Mesos JobManager initialization failed", t);

        if (webMonitor != null) {
            try {
                webMonitor.stop();
            } catch (Throwable ignored) {
                LOG.warn("Failed to stop the web frontend", ignored);
            }
        }

        if (artifactServer != null) {
            try {
                artifactServer.stop();
            } catch (Throwable ignored) {
                LOG.error("Failed to stop the artifact server", ignored);
            }
        }

        if (actorSystem != null) {
            try {
                actorSystem.shutdown();
            } catch (Throwable tt) {
                LOG.error("Error shutting down actor system", tt);
            }
        }

        if (futureExecutor != null) {
            try {
                futureExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down future executor", tt);
            }
        }

        if (ioExecutor != null) {
            try {
                ioExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down io executor", tt);
            }
        }

        if (mesosServices != null) {
            try {
                mesosServices.close(false);
            } catch (Throwable tt) {
                LOG.error("Error closing the mesos services.", tt);
            }
        }

        return INIT_ERROR_EXIT_CODE;
    }

    // everything started, we can wait until all is done or the process is killed
    LOG.info("Mesos JobManager started");

    // wait until everything is done
    actorSystem.awaitTermination();

    // if we get here, everything work out jolly all right, and we even exited smoothly
    if (webMonitor != null) {
        try {
            webMonitor.stop();
        } catch (Throwable t) {
            LOG.error("Failed to stop the web frontend", t);
        }
    }

    try {
        artifactServer.stop();
    } catch (Throwable t) {
        LOG.error("Failed to stop the artifact server", t);
    }

    org.apache.flink.runtime.concurrent.Executors.gracefulShutdown(AkkaUtils.getTimeout(config).toMillis(),
            TimeUnit.MILLISECONDS, futureExecutor, ioExecutor);

    try {
        mesosServices.close(true);
    } catch (Throwable t) {
        LOG.error("Failed to clean up and close MesosServices.", t);
    }

    return 0;
}

From source file:com.web.server.WebServer.java

/**
 * This is the start of the all the services in web server
 * @param args/*from ww w .j av a  2  s .  com*/
 * @throws IOException 
 * @throws SAXException 
 */
public static void main(String[] args) throws IOException, SAXException {

    Hashtable urlClassLoaderMap = new Hashtable();
    Hashtable executorServicesMap = new Hashtable();
    Hashtable ataMap = new Hashtable<String, ATAConfig>();
    Hashtable messagingClassMap = new Hashtable();
    ConcurrentHashMap servletMapping = new ConcurrentHashMap();
    DigesterLoader serverdigesterLoader = DigesterLoader.newLoader(new FromXmlRulesModule() {

        protected void loadRules() {
            // TODO Auto-generated method stub
            try {
                loadXMLRules(new InputSource(new FileInputStream("./config/serverconfig-rules.xml")));
            } catch (FileNotFoundException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }

        }
    });
    Digester serverdigester = serverdigesterLoader.newDigester();
    final ServerConfig serverconfig = (ServerConfig) serverdigester
            .parse(new InputSource(new FileInputStream("./config/serverconfig.xml")));
    DigesterLoader messagingdigesterLoader = DigesterLoader.newLoader(new FromXmlRulesModule() {

        protected void loadRules() {
            // TODO Auto-generated method stub
            try {
                loadXMLRules(new InputSource(new FileInputStream("./config/messagingconfig-rules.xml")));
            } catch (FileNotFoundException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }

        }
    });
    Digester messagingdigester = messagingdigesterLoader.newDigester();
    MessagingElem messagingconfig = (MessagingElem) messagingdigester
            .parse(new InputSource(new FileInputStream("./config/messaging.xml")));
    //System.out.println(messagingconfig);
    ////System.out.println(serverconfig.getDeploydirectory());
    PropertyConfigurator.configure("log4j.properties");
    /*MemcachedClient cache=new MemcachedClient(
        new InetSocketAddress("localhost", 1000));*/

    // Store a value (async) for one hour
    //c.set("someKey", 36, new String("arun"));
    // Retrieve a value.        
    System.setProperty(Context.INITIAL_CONTEXT_FACTORY, "org.apache.naming.java.javaURLContextFactory");
    System.setProperty(Context.URL_PKG_PREFIXES, "org.apache.naming");
    ExecutorService executor = java.util.concurrent.Executors.newCachedThreadPool();

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName name = null;
    try {
        name = new ObjectName("com.web.server:type=WarDeployer");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    WarDeployer warDeployer = new WarDeployer(serverconfig.getDeploydirectory(), serverconfig.getFarmWarDir(),
            serverconfig.getClustergroup(), urlClassLoaderMap, executorServicesMap, messagingClassMap,
            servletMapping, messagingconfig, sessionObjects);
    warDeployer.setPriority(MIN_PRIORITY);
    try {
        mbs.registerMBean(warDeployer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //warDeployer.start();
    executor.execute(warDeployer);

    ServerSocketChannel serverSocketChannel = ServerSocketChannel.open();

    serverSocketChannel.bind(new InetSocketAddress("0.0.0.0", Integer.parseInt(serverconfig.getPort())));

    serverSocketChannel.configureBlocking(false);

    final byte[] shutdownBt = new byte[50];
    WebServerRequestProcessor webserverRequestProcessor = new WebServer().new WebServerRequestProcessor(
            servletMapping, urlClassLoaderMap, serverSocketChannel, serverconfig.getDeploydirectory(),
            Integer.parseInt(serverconfig.getShutdownport()), 1);
    webserverRequestProcessor.setPriority(MIN_PRIORITY);
    try {
        name = new ObjectName("com.web.server:type=WebServerRequestProcessor");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(webserverRequestProcessor, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //webserverRequestProcessor.start();
    executor.execute(webserverRequestProcessor);

    for (int i = 0; i < 10; i++) {
        WebServerRequestProcessor webserverRequestProcessor1 = new WebServer().new WebServerRequestProcessor(
                servletMapping, urlClassLoaderMap, serverSocketChannel, serverconfig.getDeploydirectory(),
                Integer.parseInt(serverconfig.getShutdownport()), 2);
        webserverRequestProcessor1.setPriority(MIN_PRIORITY);
        try {
            name = new ObjectName("com.web.server:type=WebServerRequestProcessor" + (i + 1));
        } catch (MalformedObjectNameException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        try {
            mbs.registerMBean(webserverRequestProcessor1, name);
        } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        executor.execute(webserverRequestProcessor1);
    }

    ServerSocketChannel serverSocketChannelServices = ServerSocketChannel.open();

    serverSocketChannelServices
            .bind(new InetSocketAddress("0.0.0.0", Integer.parseInt(serverconfig.getServicesport())));

    serverSocketChannelServices.configureBlocking(false);

    ExecutorServiceThread executorService = new ExecutorServiceThread(serverSocketChannelServices,
            executorServicesMap, Integer.parseInt(serverconfig.getShutdownport()), ataMap, urlClassLoaderMap,
            serverconfig.getDeploydirectory(), serverconfig.getServicesdirectory(),
            serverconfig.getEarservicesdirectory(), serverconfig.getNodesport());

    try {
        name = new ObjectName("com.web.services:type=ExecutorServiceThread");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(executorService, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //executorService.start();
    executor.execute(executorService);

    for (int i = 0; i < 10; i++) {
        ExecutorServiceThread executorService1 = new ExecutorServiceThread(serverSocketChannelServices,
                executorServicesMap, Integer.parseInt(serverconfig.getShutdownport()), ataMap,
                urlClassLoaderMap, serverconfig.getDeploydirectory(), serverconfig.getServicesdirectory(),
                serverconfig.getEarservicesdirectory(), serverconfig.getNodesport());

        try {
            name = new ObjectName("com.web.services:type=ExecutorServiceThread" + (i + 1));
        } catch (MalformedObjectNameException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        try {
            mbs.registerMBean(executorService1, name);
        } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        executor.execute(executorService1);
    }

    WebServerHttpsRequestProcessor webserverHttpsRequestProcessor = new WebServer().new WebServerHttpsRequestProcessor(
            servletMapping, urlClassLoaderMap, Integer.parseInt(serverconfig.getHttpsport()),
            serverconfig.getDeploydirectory(), Integer.parseInt(serverconfig.getShutdownport()),
            serverconfig.getHttpscertificatepath(), serverconfig.getHttpscertificatepasscode(), 1);
    try {
        name = new ObjectName("com.web.server:type=WebServerHttpsRequestProcessor");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(webserverHttpsRequestProcessor, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    webserverHttpsRequestProcessor.setPriority(MAX_PRIORITY);
    //webserverRequestProcessor.start();
    executor.execute(webserverHttpsRequestProcessor);

    /* for(int i=0;i<2;i++){
        webserverHttpsRequestProcessor=new WebServer().new WebServerHttpsRequestProcessor(urlClassLoaderMap,Integer.parseInt(serverconfig.getHttpsport())+(i+1),serverconfig.getDeploydirectory(),Integer.parseInt(serverconfig.getShutdownport()),serverconfig.getHttpscertificatepath(),serverconfig.getHttpscertificatepasscode(),1);
              
      try {
    name = new ObjectName("com.web.server:type=WebServerHttpsRequestProcessor"+(i+1));
      } catch (MalformedObjectNameException e1) {
    // TODO Auto-generated catch block
    e1.printStackTrace();
      } 
              
      try {
    mbs.registerMBean(webserverHttpsRequestProcessor, name);
      } catch (InstanceAlreadyExistsException | MBeanRegistrationException
       | NotCompliantMBeanException e1) {
    // TODO Auto-generated catch block
    e1.printStackTrace();
      }
              
      executor.execute(webserverHttpsRequestProcessor);
    }*/

    /*ATAServer ataServer=new ATAServer(serverconfig.getAtaaddress(),serverconfig.getAtaport(),ataMap);
            
    try {
       name = new ObjectName("com.web.services:type=ATAServer");
    } catch (MalformedObjectNameException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    } 
            
    try {
       mbs.registerMBean(ataServer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException
    | NotCompliantMBeanException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    }
            
            
    ataServer.start();*/

    /*ATAConfigClient ataClient=new ATAConfigClient(serverconfig.getAtaaddress(),serverconfig.getAtaport(),serverconfig.getServicesport(),executorServicesMap);
            
    try {
       name = new ObjectName("com.web.services:type=ATAConfigClient");
    } catch (MalformedObjectNameException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    } 
            
    try {
       mbs.registerMBean(ataClient, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException
    | NotCompliantMBeanException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    }
    ataClient.start();*/

    MessagingServer messageServer = new MessagingServer(serverconfig.getMessageport(), messagingClassMap);

    try {
        name = new ObjectName("com.web.messaging:type=MessagingServer");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(messageServer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    //messageServer.start();
    executor.execute(messageServer);

    RandomQueueMessagePicker randomqueuemessagepicker = new RandomQueueMessagePicker(messagingClassMap);

    try {
        name = new ObjectName("com.web.messaging:type=RandomQueueMessagePicker");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(randomqueuemessagepicker, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //randomqueuemessagepicker.start();
    executor.execute(randomqueuemessagepicker);

    RoundRobinQueueMessagePicker roundrobinqueuemessagepicker = new RoundRobinQueueMessagePicker(
            messagingClassMap);

    try {
        name = new ObjectName("com.web.messaging:type=RoundRobinQueueMessagePicker");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(roundrobinqueuemessagepicker, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //roundrobinqueuemessagepicker.start();
    executor.execute(roundrobinqueuemessagepicker);

    TopicMessagePicker topicpicker = new TopicMessagePicker(messagingClassMap);

    try {
        name = new ObjectName("com.web.messaging:type=TopicMessagePicker");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(topicpicker, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    //topicpicker.start();
    executor.execute(topicpicker);

    try {
        name = new ObjectName("com.web.server:type=SARDeployer");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    SARDeployer sarDeployer = SARDeployer.newInstance(serverconfig.getDeploydirectory());
    try {
        mbs.registerMBean(sarDeployer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    executor.execute(sarDeployer);
    /*try {
       mbs.invoke(name, "startDeployer", null, null);
    } catch (InstanceNotFoundException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    } catch (ReflectionException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    } catch (MBeanException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    }
    */
    System.setProperty(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.rmi.registry.RegistryContextFactory");
    System.setProperty(Context.PROVIDER_URL, "rmi://localhost:" + serverconfig.getServicesregistryport());
    ;
    Registry registry = LocateRegistry.createRegistry(Integer.parseInt(serverconfig.getServicesregistryport()));

    /*JarDeployer jarDeployer=new JarDeployer(registry,serverconfig.getServicesdirectory(), serverconfig.getServiceslibdirectory(),serverconfig.getCachedir(),executorServicesMap, urlClassLoaderMap);
    try {
       name = new ObjectName("com.web.server:type=JarDeployer");
    } catch (MalformedObjectNameException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    } 
            
    try {
       mbs.registerMBean(jarDeployer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException
    | NotCompliantMBeanException e1) {
       // TODO Auto-generated catch block
       e1.printStackTrace();
    }
            
    //jarDeployer.start();
    executor.execute(jarDeployer);*/

    EARDeployer earDeployer = new EARDeployer(registry, serverconfig.getEarservicesdirectory(),
            serverconfig.getDeploydirectory(), executorServicesMap, urlClassLoaderMap,
            serverconfig.getCachedir(), warDeployer);
    try {
        name = new ObjectName("com.web.server:type=EARDeployer");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(earDeployer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //earDeployer.start();
    executor.execute(earDeployer);

    JVMConsole jvmConsole = new JVMConsole(Integer.parseInt(serverconfig.getJvmConsolePort()));

    try {
        name = new ObjectName("com.web.server:type=JVMConsole");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(jvmConsole, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    executor.execute(jvmConsole);

    ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor();
    XMLDeploymentScanner xmlDeploymentScanner = new XMLDeploymentScanner(serverconfig.getDeploydirectory(),
            serverconfig.getServiceslibdirectory());
    exec.scheduleAtFixedRate(xmlDeploymentScanner, 0, 1000, TimeUnit.MILLISECONDS);

    EmbeddedJMS embeddedJMS = null;
    try {
        embeddedJMS = new EmbeddedJMS();
        embeddedJMS.start();
    } catch (Exception ex) {
        // TODO Auto-generated catch block
        ex.printStackTrace();
    }

    EJBDeployer ejbDeployer = new EJBDeployer(serverconfig.getServicesdirectory(), registry,
            Integer.parseInt(serverconfig.getServicesregistryport()), embeddedJMS);
    try {
        name = new ObjectName("com.web.server:type=EJBDeployer");
    } catch (MalformedObjectNameException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        mbs.registerMBean(ejbDeployer, name);
    } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    //jarDeployer.start();
    executor.execute(ejbDeployer);

    new Thread() {
        public void run() {
            try {
                ServerSocket serverSocket = new ServerSocket(Integer.parseInt(serverconfig.getShutdownport()));
                while (true) {
                    Socket sock = serverSocket.accept();
                    InputStream istream = sock.getInputStream();
                    istream.read(shutdownBt);
                    String shutdownStr = new String(shutdownBt);
                    String[] shutdownToken = shutdownStr.split("\r\n\r\n");
                    //System.out.println(shutdownStr);
                    if (shutdownToken[0].startsWith("shutdown WebServer")) {
                        synchronized (shutDownObject) {
                            shutDownObject.notifyAll();
                        }
                    }
                }
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }.start();

    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        public void run() {
            System.out.println("IN shutdown Hook");
            synchronized (shutDownObject) {
                shutDownObject.notifyAll();
            }
        }
    }));
    try {
        synchronized (shutDownObject) {
            shutDownObject.wait();
        }
        executor.shutdownNow();
        serverSocketChannel.close();
        serverSocketChannelServices.close();
        embeddedJMS.stop();

    } catch (Exception e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    System.out.println("IN shutdown Hook1");
    /*try{
       Thread.sleep(10000);
    }
    catch(Exception ex){
               
    }*/

    //webserverRequestProcessor.stop();
    //webserverRequestProcessor1.stop();

    /*warDeployer.stop();
    executorService.stop();
    //ataServer.stop();
    //ataClient.stop();
    messageServer.stop();
    randomqueuemessagepicker.stop();
    roundrobinqueuemessagepicker.stop();
    topicpicker.stop();*/
    /*try {
       mbs.invoke(new ObjectName("com.web.server:type=SARDeployer"), "destroyDeployer", null, null);
    } catch (InstanceNotFoundException | MalformedObjectNameException
    | ReflectionException | MBeanException e) {
       // TODO Auto-generated catch block
       e.printStackTrace();
    }*/
    //earDeployer.stop();
    System.exit(0);
}