Example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator

List of usage examples for com.google.common.util.concurrent MoreExecutors listeningDecorator

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator.

Prototype

@GwtIncompatible("TODO")
public static ListeningScheduledExecutorService listeningDecorator(ScheduledExecutorService delegate) 

Source Link

Document

Creates a ScheduledExecutorService whose submit and invokeAll methods submit ListenableFutureTask instances to the given delegate executor.

Usage

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskSchedulerService.java

@VisibleForTesting
public LlapTaskSchedulerService(TaskSchedulerContext taskSchedulerContext, Clock clock, boolean initMetrics) {
    super(taskSchedulerContext);
    this.clock = clock;
    this.delayedTaskSchedulerCallable = createDelayedTaskSchedulerCallable();
    try {/*from  ww w. j a v a2 s .co m*/
        this.conf = TezUtils.createConfFromUserPayload(taskSchedulerContext.getInitialUserPayload());
    } catch (IOException e) {
        throw new TezUncheckedException(
                "Failed to parse user payload for " + LlapTaskSchedulerService.class.getSimpleName(), e);
    }
    this.containerFactory = new ContainerFactory(taskSchedulerContext.getApplicationAttemptId(),
            taskSchedulerContext.getCustomClusterIdentifier());
    // TODO HIVE-13483 Get all of these properties from the registry. This will need to take care of different instances
    // publishing potentially different values when we support changing configurations dynamically.
    // For now, this can simply be fetched from a single registry instance.
    this.nodeBlacklistConf = new NodeBlacklistConf(
            HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS,
                    TimeUnit.MILLISECONDS),
            HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS,
                    TimeUnit.MILLISECONDS),
            HiveConf.getFloatVar(conf, ConfVars.LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR));

    this.numSchedulableTasksPerNode = HiveConf.getIntVar(conf,
            ConfVars.LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE);

    long localityDelayMs = HiveConf.getTimeVar(conf, ConfVars.LLAP_TASK_SCHEDULER_LOCALITY_DELAY,
            TimeUnit.MILLISECONDS);

    this.localityDelayConf = new LocalityDelayConf(localityDelayMs);

    this.timeoutMonitor = new SchedulerTimeoutMonitor();
    this.timeout = HiveConf.getTimeVar(conf, ConfVars.LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS,
            TimeUnit.MILLISECONDS);
    this.timeoutExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("LlapTaskSchedulerTimeoutMonitor").build());
    this.timeoutFuture = null;

    this.scheduledLoggingExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
            .setDaemon(true).setNameFormat("LlapTaskSchedulerTimedLogThread").build());

    String instanceId = HiveConf.getTrimmedVar(conf, ConfVars.LLAP_DAEMON_SERVICE_HOSTS);

    Preconditions.checkNotNull(instanceId, ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname + " must be defined");

    ExecutorService executorServiceRaw = Executors.newSingleThreadExecutor(
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("LlapSchedulerNodeEnabler").build());
    nodeEnabledExecutor = MoreExecutors.listeningDecorator(executorServiceRaw);

    ExecutorService delayedTaskSchedulerExecutorRaw = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
            .setDaemon(true).setNameFormat("LlapSchedulerDelayedTaskHandler").build());
    delayedTaskSchedulerExecutor = MoreExecutors.listeningDecorator(delayedTaskSchedulerExecutorRaw);

    ExecutorService schedulerExecutorServiceRaw = Executors.newSingleThreadExecutor(
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("LlapScheduler").build());
    schedulerExecutor = MoreExecutors.listeningDecorator(schedulerExecutorServiceRaw);

    if (initMetrics && !conf.getBoolean(ConfVars.HIVE_IN_TEST.varname, false)) {
        // Initialize the metrics system
        LlapMetricsSystem.initialize("LlapTaskScheduler");
        this.pauseMonitor = new JvmPauseMonitor(conf);
        pauseMonitor.start();
        String displayName = "LlapTaskSchedulerMetrics-" + MetricsUtils.getHostName();
        String sessionId = conf.get("llap.daemon.metrics.sessionid");
        // TODO: Not sure about the use of this. Should we instead use workerIdentity as sessionId?
        this.metrics = LlapTaskSchedulerMetrics.create(displayName, sessionId);
    } else {
        this.metrics = null;
        this.pauseMonitor = null;
    }

    String hostsString = HiveConf.getVar(conf, ConfVars.LLAP_DAEMON_SERVICE_HOSTS);
    LOG.info(
            "Running with configuration: hosts={}, numSchedulableTasksPerNode={}, nodeBlacklistConf={}, localityConf={}",
            hostsString, numSchedulableTasksPerNode, nodeBlacklistConf, localityDelayConf);

}

From source file:com.google.cloud.dataflow.sdk.util.GcsUtil.java

private static void executeBatches(List<BatchRequest> batches) throws IOException {
    ListeningExecutorService executor = MoreExecutors.listeningDecorator(
            MoreExecutors.getExitingExecutorService(new ThreadPoolExecutor(MAX_CONCURRENT_BATCHES,
                    MAX_CONCURRENT_BATCHES, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>())));

    List<ListenableFuture<Void>> futures = new LinkedList<>();
    for (final BatchRequest batch : batches) {
        futures.add(executor.submit(new Callable<Void>() {
            public Void call() throws IOException {
                batch.execute();//  w w  w  .  j a  v  a 2 s  . c om
                return null;
            }
        }));
    }

    try {
        Futures.allAsList(futures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IOException("Interrupted while executing batch GCS request", e);
    } catch (ExecutionException e) {
        throw new IOException("Error executing batch GCS request", e);
    } finally {
        executor.shutdown();
    }
}

From source file:org.nypl.simplified.app.Simplified.java

/**
 * @param account Library Account//from  w  ww. j  a  va2  s  . c o  m
 * @param context Context
 * @param in_adobe_drm Adobe Adept Excecutor
 * @return Books
 */
public static BooksType getBooks(final Account account, final Context context,
        final OptionType<AdobeAdeptExecutorType> in_adobe_drm) {

    final ListeningExecutorService exec_books = MoreExecutors
            .listeningDecorator(Simplified.namedThreadPool(1, "books", 19));
    final HTTPType http = HTTP.newHTTP();

    File base_accounts_dir = context.getFilesDir();
    if (account.getId() > 0) {
        base_accounts_dir = new File(context.getFilesDir(), account.getPathComponent());
    }

    final File accounts_dir = new File(base_accounts_dir, "accounts");

    final File base_dir = Simplified.getDiskDataDir(context);
    File base_library_dir = base_dir;
    if (account.getId() > 0) {
        base_library_dir = new File(base_dir, account.getPathComponent());
    }

    final File downloads_dir = new File(base_library_dir, "downloads");
    final File books_dir = new File(base_library_dir, "books");
    final File books_database_directory = new File(books_dir, "data");

    final DownloaderType downloader = DownloaderHTTP.newDownloader(exec_books, downloads_dir, http);

    final ListeningExecutorService exec_catalog_feeds = MoreExecutors
            .listeningDecorator(Simplified.namedThreadPool(1, "catalog-feed", 19));

    final OPDSJSONSerializerType in_json_serializer = OPDSJSONSerializer.newSerializer();
    final OPDSJSONParserType in_json_parser = OPDSJSONParser.newParser();

    final BookDatabaseType books_database = BookDatabase.Companion.newDatabase(context, in_json_serializer,
            in_json_parser, books_database_directory);

    final OPDSAcquisitionFeedEntryParserType in_entry_parser = OPDSAcquisitionFeedEntryParser
            .newParser(BookFormats.Companion.supportedBookMimeTypes());

    final OPDSFeedParserType p = OPDSFeedParser.newParser(in_entry_parser);
    final OPDSSearchParserType s = OPDSSearchParser.newParser();

    final FeedLoaderType feed_loader = Simplified.makeFeedLoader(exec_catalog_feeds, books_database, http, s,
            p);

    final AccountsDatabaseType accounts_database = AccountsDatabase.openDatabase(accounts_dir);

    String catalog = account.getCatalogUrl();
    String adobe = account.getCatalogUrl();

    if (!account.needsAuth() && account.getId() == 2 && Simplified.getSharedPrefs().contains("age13")) {
        if (Simplified.getSharedPrefs().getBoolean("age13")) {
            catalog = account.getCatalogUrl13AndOver();
            adobe = account.getCatalogUrl13AndOver();
        } else {
            catalog = account.getCatalogUrlUnder13();
            adobe = account.getCatalogUrlUnder13();
        }
    }

    final BooksControllerConfiguration books_config = new BooksControllerConfiguration(URI.create(catalog),
            URI.create(adobe));

    final URI loans_url_component = books_config.getCurrentRootFeedURI()
            .resolve(context.getResources().getString(R.string.feature_catalog_loans_uri_component));

    return BooksController.newBooks(context, exec_books, feed_loader, http, downloader, in_json_serializer,
            in_json_parser, in_adobe_drm, Simplified.getDocumentStore(account, context.getResources()),
            books_database, accounts_database, books_config, loans_url_component);

}

From source file:org.opennms.newts.gsod.ImportRunner.java

private Observable<Boolean> parMap(Observable<List<Sample>> samples, ExecutorService executorSvc,
        final MetricRegistry metrics, final Func1<List<Sample>, Boolean> insert) {
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(executorSvc);

    Observable<Boolean> o = samples.lift(new Operator<ListenableFuture<Boolean>, List<Sample>>() {

        @Override//from w ww  .  j  ava 2 s . c o m
        public Subscriber<? super List<Sample>> call(final Subscriber<? super ListenableFuture<Boolean>> s) {
            return new Subscriber<List<Sample>>() {

                @Override
                public void onCompleted() {
                    if (!s.isUnsubscribed()) {
                        s.onCompleted();
                    }
                    executor.shutdown();
                }

                @Override
                public void onError(Throwable e) {
                    if (!s.isUnsubscribed()) {
                        s.onError(e);
                    }
                }

                @Override
                public void onNext(final List<Sample> t) {
                    if (!s.isUnsubscribed()) {
                        try {
                            ListenableFuture<Boolean> f = executor.submit(new Callable<Boolean>() {

                                @Override
                                public Boolean call() throws Exception {
                                    return insert.call(t);
                                }

                            });
                            s.onNext(f);
                        } catch (Throwable ex) {
                            onError(ex);
                        }

                    }
                }
            };
        }

    }).observeOn(Schedulers.io()).map(new Func1<ListenableFuture<Boolean>, Boolean>() {

        @Override
        public Boolean call(ListenableFuture<Boolean> f) {
            try {
                return f.get();
            } catch (Throwable e) {
                throw Exceptions.propagate(e);
            }
        }

    });

    return o;
}

From source file:org.voltdb.RealVoltDB.java

/**
 * Initialize all the global components, then initialize all the m_sites.
 *///from www.j a  v a 2s. c  om
@Override
public void initialize(VoltDB.Configuration config) {
    synchronized (m_startAndStopLock) {
        // check that this is a 64 bit VM
        if (System.getProperty("java.vm.name").contains("64") == false) {
            hostLog.fatal("You are running on an unsupported (probably 32 bit) JVM. Exiting.");
            System.exit(-1);
        }
        consoleLog.l7dlog(Level.INFO, LogKeys.host_VoltDB_StartupString.name(), null);
        if (config.m_enableIV2) {
            consoleLog.warn("ENABLE IV2: " + config.m_enableIV2 + ". NOT SUPPORTED IN PRODUCTION.");
        }

        // If there's no deployment provide a default and put it under voltdbroot.
        if (config.m_pathToDeployment == null) {
            try {
                config.m_pathToDeployment = setupDefaultDeployment();
            } catch (IOException e) {
                VoltDB.crashLocalVoltDB("Failed to write default deployment.", false, null);
            }
        }

        // set the mode first thing
        m_mode = OperationMode.INITIALIZING;
        m_config = config;
        m_startMode = null;

        // set a bunch of things to null/empty/new for tests
        // which reusue the process
        m_clientInterfaces.clear();
        m_dtxns.clear();
        m_adminListener = null;
        m_commandLog = new DummyCommandLog();
        m_deployment = null;
        m_messenger = null;
        m_startMode = null;
        m_statsAgent = new StatsAgent();
        m_asyncCompilerAgent = new AsyncCompilerAgent();
        m_faultManager = null;
        m_validateConfiguredNumberOfPartitionsOnMailboxUpdate = false;
        m_snapshotCompletionMonitor = null;
        m_catalogContext = null;
        m_partitionCountStats = null;
        m_ioStats = null;
        m_memoryStats = null;
        m_statsManager = null;
        m_restoreAgent = null;
        m_siteTracker = null;
        m_mailboxTracker = null;
        m_recoveryStartTime = System.currentTimeMillis();
        m_hostIdWithStartupCatalog = 0;
        m_pathToStartupCatalog = m_config.m_pathToCatalog;
        m_replicationActive = false;

        // set up site structure
        m_localSites = new COWMap<Long, ExecutionSite>();
        m_siteThreads = new HashMap<Long, Thread>();
        m_runners = new ArrayList<ExecutionSiteRunner>();

        m_computationService = MoreExecutors.listeningDecorator(Executors
                .newFixedThreadPool(Math.max(2, CoreUtils.availableProcessors() / 4), new ThreadFactory() {
                    private int threadIndex = 0;

                    @Override
                    public synchronized Thread newThread(Runnable r) {
                        Thread t = new Thread(null, r, "Computation service thread - " + threadIndex++, 131072);
                        t.setDaemon(true);
                        return t;
                    }

                }));

        // determine if this is a rejoining node
        // (used for license check and later the actual rejoin)
        boolean isRejoin = false;
        if (config.m_startAction == START_ACTION.REJOIN || config.m_startAction == START_ACTION.LIVE_REJOIN) {
            isRejoin = true;
        }
        m_rejoining = isRejoin;

        // Set std-out/err to use the UTF-8 encoding and fail if UTF-8 isn't supported
        try {
            System.setOut(new PrintStream(System.out, true, "UTF-8"));
            System.setErr(new PrintStream(System.err, true, "UTF-8"));
        } catch (UnsupportedEncodingException e) {
            hostLog.fatal(
                    "Support for the UTF-8 encoding is required for VoltDB. This means you are likely running an unsupported JVM. Exiting.");
            System.exit(-1);
        }

        m_snapshotCompletionMonitor = new SnapshotCompletionMonitor();

        readBuildInfo(config.m_isEnterprise ? "Enterprise Edition" : "Community Edition");

        // start up the response sampler if asked to by setting the env var
        // VOLTDB_RESPONSE_SAMPLE_PATH to a valid path
        ResponseSampler.initializeIfEnabled();

        buildClusterMesh(isRejoin);

        //Start validating the build string in the background
        final Future<?> buildStringValidation = validateBuildString(getBuildString(), m_messenger.getZK());

        m_mailboxPublisher = new MailboxPublisher(VoltZK.mailboxes + "/" + m_messenger.getHostId());
        final int numberOfNodes = readDeploymentAndCreateStarterCatalogContext();
        if (!isRejoin) {
            m_messenger.waitForGroupJoin(numberOfNodes);
        }

        m_faultManager = new FaultDistributor(this);
        m_faultManager.registerFaultHandler(SiteFailureFault.SITE_FAILURE_CATALOG, m_faultHandler,
                FaultType.SITE_FAILURE);
        if (!m_faultManager.testPartitionDetectionDirectory(
                m_catalogContext.cluster.getFaultsnapshots().get("CLUSTER_PARTITION"))) {
            VoltDB.crashLocalVoltDB(
                    "Unable to create partition detection snapshot directory at"
                            + m_catalogContext.cluster.getFaultsnapshots().get("CLUSTER_PARTITION"),
                    false, null);
        }

        // Create the thread pool here. It's needed by buildClusterMesh()
        m_periodicWorkThread = CoreUtils.getScheduledThreadPoolExecutor("Periodic Work", 1, 1024 * 128);

        m_licenseApi = MiscUtils.licenseApiFactory(m_config.m_pathToLicense);
        if (m_licenseApi == null) {
            VoltDB.crashLocalVoltDB(
                    "Failed to initialize license verifier. " + "See previous log message for details.", false,
                    null);
        }

        // Create the GlobalServiceElector.  Do this here so we can register the MPI with it
        // when we construct it below
        m_globalServiceElector = new GlobalServiceElector(m_messenger.getZK(), m_messenger.getHostId());

        /*
         * Construct all the mailboxes for things that need to be globally addressable so they can be published
         * in one atomic shot.
         *
         * The starting state for partition assignments are statically derived from the host id generated
         * by host messenger and the k-factor/host count/sites per host. This starting state
         * is published to ZK as the toplogy metadata node.
         *
         * On rejoin the rejoining node has to inspect the topology meta node to find out what is missing
         * and then update the topology listing itself as a replacement for one of the missing host ids.
         * Then it does a compare and set of the topology.
         */
        ArrayDeque<Mailbox> siteMailboxes = null;
        ClusterConfig clusterConfig = null;
        DtxnInitiatorMailbox initiatorMailbox = null;
        long initiatorHSId = 0;
        JSONObject topo = getTopology(isRejoin);
        MpInitiator mpi = null;
        try {
            // IV2 mailbox stuff
            if (isIV2Enabled()) {
                ClusterConfig iv2config = new ClusterConfig(topo);
                m_cartographer = new Cartographer(m_messenger.getZK(), iv2config.getPartitionCount());
                if (isRejoin) {
                    List<Integer> partitionsToReplace = m_cartographer.getIv2PartitionsToReplace(topo);
                    m_iv2Initiators = createIv2Initiators(partitionsToReplace);
                } else {
                    List<Integer> partitions = ClusterConfig.partitionsForHost(topo, m_messenger.getHostId());
                    m_iv2Initiators = createIv2Initiators(partitions);
                }
                // each node has an MPInitiator (and exactly 1 node has the master MPI).
                long mpiBuddyHSId = m_iv2Initiators.get(0).getInitiatorHSId();
                mpi = new MpInitiator(m_messenger, mpiBuddyHSId);
                m_iv2Initiators.add(mpi);
            }

            /*
             * Start mailbox tracker early here because it is required
             * on rejoin to find the hosts that are missing from the cluster
             */
            m_mailboxTracker = new MailboxTracker(m_messenger.getZK(), this);
            m_mailboxTracker.start();
            /*
             * Will count this down at the right point on regular startup as well as rejoin
             */
            CountDownLatch rejoinCompleteLatch = new CountDownLatch(1);
            Pair<ArrayDeque<Mailbox>, ClusterConfig> p;
            if (isRejoin) {
                /*
                 * Need to lock the topology metadata
                 * so that it can be changed atomically with publishing the mailbox node
                 * for this process on a rejoin.
                 */
                createRejoinBarrierAndWatchdog(rejoinCompleteLatch);

                p = createMailboxesForSitesRejoin(topo);
            } else {
                p = createMailboxesForSitesStartup(topo);
            }

            siteMailboxes = p.getFirst();
            clusterConfig = p.getSecond();
            // This will set up site tracker
            initiatorHSId = registerInitiatorMailbox();
            final long statsHSId = m_messenger.getHSIdForLocalSite(HostMessenger.STATS_SITE_ID);
            m_messenger.generateMailboxId(statsHSId);
            hostLog.info("Registering stats mailbox id " + CoreUtils.hsIdToString(statsHSId));
            m_mailboxPublisher.registerMailbox(MailboxType.StatsAgent, new MailboxNodeContent(statsHSId, null));

            if (isRejoin && isIV2Enabled()) {
                // Make a list of HDIds to rejoin
                List<Long> hsidsToRejoin = new ArrayList<Long>();
                for (Initiator init : m_iv2Initiators) {
                    if (init.isRejoinable()) {
                        hsidsToRejoin.add(init.getInitiatorHSId());
                    }
                }
                SnapshotSaveAPI.recoveringSiteCount.set(hsidsToRejoin.size());
                hostLog.info("Set recovering site count to " + hsidsToRejoin.size());

                m_rejoinCoordinator = new SequentialRejoinCoordinator(m_messenger, hsidsToRejoin,
                        m_catalogContext.cluster.getVoltroot());
                m_messenger.registerMailbox(m_rejoinCoordinator);
                hostLog.info("Using iv2 community rejoin");
            } else if (isRejoin && m_config.m_startAction == START_ACTION.LIVE_REJOIN) {
                SnapshotSaveAPI.recoveringSiteCount.set(siteMailboxes.size());
                hostLog.info("Set recovering site count to " + siteMailboxes.size());
                // Construct and publish rejoin coordinator mailbox
                ArrayList<Long> sites = new ArrayList<Long>();
                for (Mailbox siteMailbox : siteMailboxes) {
                    sites.add(siteMailbox.getHSId());
                }

                m_rejoinCoordinator = new SequentialRejoinCoordinator(m_messenger, sites,
                        m_catalogContext.cluster.getVoltroot());
                m_messenger.registerMailbox(m_rejoinCoordinator);
                m_mailboxPublisher.registerMailbox(MailboxType.OTHER,
                        new MailboxNodeContent(m_rejoinCoordinator.getHSId(), null));
            } else if (isRejoin) {
                SnapshotSaveAPI.recoveringSiteCount.set(siteMailboxes.size());
            }

            // All mailboxes should be set up, publish it
            m_mailboxPublisher.publish(m_messenger.getZK());

            /*
             * Now that we have published our changes to the toplogy it is safe for
             * another node to come in and manipulate the toplogy metadata
             */
            rejoinCompleteLatch.countDown();
            if (isRejoin) {
                m_messenger.getZK().delete(VoltZK.rejoinLock, -1, new ZKUtil.VoidCallback(), null);
            }
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
        }

        /*
         * Before this barrier pretty much every remotely visible mailbox id has to have been
         * registered with host messenger and published with mailbox publisher
         */
        boolean siteTrackerInit = false;
        for (int ii = 0; ii < 4000; ii++) {
            boolean predicate = true;
            if (isRejoin) {
                predicate = !m_siteTracker.getAllHosts().contains(m_messenger.getHostId());
            } else {
                predicate = m_siteTracker.getAllHosts().size() < m_deployment.getCluster().getHostcount();
            }
            if (predicate) {
                try {
                    Thread.sleep(5);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            } else {
                siteTrackerInit = true;
                break;
            }
        }
        if (!siteTrackerInit) {
            VoltDB.crashLocalVoltDB("Failed to initialize site tracker with all hosts before timeout", true,
                    null);
        }

        initiatorMailbox = createInitiatorMailbox(initiatorHSId);

        // do the many init tasks in the Inits class
        Inits inits = new Inits(this, 1);
        inits.doInitializationWork();

        if (config.m_backend.isIPC) {
            int eeCount = m_siteTracker.getLocalSites().length;
            if (config.m_ipcPorts.size() != eeCount) {
                hostLog.fatal("Specified an IPC backend but only supplied " + config.m_ipcPorts.size()
                        + " backend ports when " + eeCount + " are required");
                System.exit(-1);
            }
        }

        collectLocalNetworkMetadata();

        /*
         * Construct an adhoc planner for the initial catalog
         */
        final CatalogSpecificPlanner csp = new CatalogSpecificPlanner(m_asyncCompilerAgent, m_catalogContext);

        /*
         * Configure and start all the IV2 sites
         */
        if (isIV2Enabled()) {
            try {
                m_leaderAppointer = new LeaderAppointer(m_messenger.getZK(), clusterConfig.getPartitionCount(),
                        m_deployment.getCluster().getKfactor(), topo, mpi);
                m_globalServiceElector.registerService(m_leaderAppointer);

                for (Initiator iv2init : m_iv2Initiators) {
                    iv2init.configure(getBackendTargetType(), m_serializedCatalog, m_catalogContext,
                            m_deployment.getCluster().getKfactor(), csp, clusterConfig.getPartitionCount(),
                            m_rejoining);
                }
            } catch (Exception e) {
                Throwable toLog = e;
                if (e instanceof ExecutionException) {
                    toLog = ((ExecutionException) e).getCause();
                }
                VoltDB.crashLocalVoltDB("Error configuring IV2 initiator.", true, toLog);
            }
        } else {
            /*
             * Create execution sites runners (and threads) for all exec
             * sites except the first one.  This allows the sites to be set
             * up in the thread that will end up running them.  Cache the
             * first Site from the catalog and only do the setup once the
             * other threads have been started.
             */
            Mailbox localThreadMailbox = siteMailboxes.poll();
            ((org.voltcore.messaging.SiteMailbox) localThreadMailbox).setCommandLog(m_commandLog);
            m_currentThreadSite = null;
            for (Mailbox mailbox : siteMailboxes) {
                long site = mailbox.getHSId();
                int sitesHostId = SiteTracker.getHostForSite(site);

                // start a local site
                if (sitesHostId == m_myHostId) {
                    ((org.voltcore.messaging.SiteMailbox) mailbox).setCommandLog(m_commandLog);
                    ExecutionSiteRunner runner = new ExecutionSiteRunner(mailbox, m_catalogContext,
                            m_serializedCatalog, m_rejoining, m_replicationActive, hostLog,
                            m_configuredNumberOfPartitions, csp);
                    m_runners.add(runner);
                    Thread runnerThread = new Thread(runner,
                            "Site " + org.voltcore.utils.CoreUtils.hsIdToString(site));
                    runnerThread.start();
                    log.l7dlog(Level.TRACE, LogKeys.org_voltdb_VoltDB_CreatingThreadForSite.name(),
                            new Object[] { site }, null);
                    m_siteThreads.put(site, runnerThread);
                }
            }

            /*
             * Now that the runners have been started and are doing setup of the other sites in parallel
             * this thread can set up its own execution site.
             */
            try {
                ExecutionSite siteObj = new ExecutionSite(VoltDB.instance(), localThreadMailbox,
                        m_serializedCatalog, null, m_rejoining, m_replicationActive,
                        m_catalogContext.m_transactionId, m_configuredNumberOfPartitions, csp);
                m_localSites.put(localThreadMailbox.getHSId(), siteObj);
                m_currentThreadSite = siteObj;
            } catch (Exception e) {
                VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
            }
            /*
             * Stop and wait for the runners to finish setting up and then put
             * the constructed ExecutionSites in the local site map.
             */
            for (ExecutionSiteRunner runner : m_runners) {
                try {
                    runner.m_siteIsLoaded.await();
                } catch (InterruptedException e) {
                    VoltDB.crashLocalVoltDB("Unable to wait on starting execution site.", true, e);
                }
                assert (runner.m_siteObj != null);
                m_localSites.put(runner.m_siteId, runner.m_siteObj);
            }
        }

        // Start the GlobalServiceElector.  Not sure where this will actually belong.
        try {
            m_globalServiceElector.start();
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB("Unable to start GlobalServiceElector", true, e);
        }

        /*
         * At this point all of the execution sites have been published to m_localSites
         * It is possible that while they were being created the mailbox tracker found additional
         * sites, but was unable to deliver the notification to some or all of the execution sites.
         * Since notifying them of new sites is idempotent (version number check), let's do that here so there
         * are no lost updates for additional sites. But... it must be done from the
         * mailbox tracker thread or there is a race with failure detection and handling.
         * Generally speaking it seems like retrieving a reference to a site tracker not via a message
         * from the mailbox tracker thread that builds the site tracker is bug. If it isn't delivered to you by
         * a site tracker then you lose sequential consistency.
         */
        try {
            m_mailboxTracker.executeTask(new Runnable() {
                @Override
                public void run() {
                    for (ExecutionSite es : m_localSites.values()) {
                        es.notifySitesAdded(m_siteTracker);
                    }
                }
            }).get();
        } catch (InterruptedException e) {
            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
        } catch (ExecutionException e) {
            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
        }

        // Create the client interface
        int portOffset = 0;
        for (int i = 0; i < 1; i++) {
            // create DTXN and CI for each local non-EE site
            SimpleDtxnInitiator initiator = new SimpleDtxnInitiator(initiatorMailbox, m_catalogContext,
                    m_messenger, m_myHostId, m_myHostId, // fake initiator ID
                    m_config.m_timestampTestingSalt);

            try {
                ClientInterface ci = ClientInterface.create(m_messenger, m_catalogContext,
                        m_config.m_replicationRole, initiator, m_cartographer,
                        clusterConfig.getPartitionCount(), config.m_port + portOffset,
                        config.m_adminPort + portOffset, m_config.m_timestampTestingSalt);
                portOffset += 2;
                m_clientInterfaces.add(ci);
            } catch (Exception e) {
                VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
            }
            portOffset += 2;
            m_dtxns.add(initiator);
        }

        m_partitionCountStats = new PartitionCountStats(clusterConfig.getPartitionCount());
        m_statsAgent.registerStatsSource(SysProcSelector.PARTITIONCOUNT, 0, m_partitionCountStats);
        m_ioStats = new IOStats();
        m_statsAgent.registerStatsSource(SysProcSelector.IOSTATS, 0, m_ioStats);
        m_memoryStats = new MemoryStats();
        m_statsAgent.registerStatsSource(SysProcSelector.MEMORY, 0, m_memoryStats);
        if (isIV2Enabled()) {
            m_statsAgent.registerStatsSource(SysProcSelector.TOPO, 0, m_cartographer);
        }
        // Create the statistics manager and register it to JMX registry
        m_statsManager = null;
        try {
            final Class<?> statsManagerClass = Class.forName("org.voltdb.management.JMXStatsManager");
            m_statsManager = (StatsManager) statsManagerClass.newInstance();
            m_statsManager.initialize(new ArrayList<Long>(m_localSites.keySet()));
        } catch (Exception e) {
        }

        try {
            m_snapshotCompletionMonitor.init(m_messenger.getZK());
        } catch (Exception e) {
            hostLog.fatal("Error initializing snapshot completion monitor", e);
            VoltDB.crashLocalVoltDB("Error initializing snapshot completion monitor", true, e);
        }

        if (m_commandLog != null && isRejoin) {
            m_commandLog.initForRejoin(m_catalogContext, Long.MIN_VALUE, true);
        }

        /*
         * Make sure the build string successfully validated
         * before continuing to do operations
         * that might return wrongs answers or lose data.
         */
        try {
            buildStringValidation.get();
        } catch (Exception e) {
            VoltDB.crashLocalVoltDB("Failed to validate cluster build string", false, e);
        }

        if (!isRejoin) {
            try {
                m_messenger.waitForAllHostsToBeReady(m_deployment.getCluster().getHostcount());
            } catch (Exception e) {
                hostLog.fatal("Failed to announce ready state.");
                VoltDB.crashLocalVoltDB("Failed to announce ready state.", false, null);
            }
        }
        m_validateConfiguredNumberOfPartitionsOnMailboxUpdate = true;
        if (m_siteTracker.m_numberOfPartitions != m_configuredNumberOfPartitions) {
            for (Map.Entry<Integer, ImmutableList<Long>> entry : m_siteTracker.m_partitionsToSitesImmutable
                    .entrySet()) {
                hostLog.info(entry.getKey() + " -- " + CoreUtils.hsIdCollectionToString(entry.getValue()));
            }
            VoltDB.crashGlobalVoltDB(
                    "Mismatch between configured number of partitions (" + m_configuredNumberOfPartitions
                            + ") and actual (" + m_siteTracker.m_numberOfPartitions + ")",
                    true, null);
        }

        heartbeatThread = new HeartbeatThread(m_clientInterfaces);
        heartbeatThread.start();
        schedulePeriodicWorks();

        // print out a bunch of useful system info
        logDebuggingInfo(m_config.m_adminPort, m_config.m_httpPort, m_httpPortExtraLogMessage, m_jsonEnabled);

        if (clusterConfig.getReplicationFactor() == 0) {
            hostLog.warn("Running without redundancy (k=0) is not recommended for production use.");
        }

        assert (m_clientInterfaces.size() > 0);
        ClientInterface ci = m_clientInterfaces.get(0);
        ci.initializeSnapshotDaemon(m_messenger.getZK());

        // set additional restore agent stuff
        TransactionInitiator initiator = m_dtxns.get(0);
        if (m_restoreAgent != null) {
            m_restoreAgent.setCatalogContext(m_catalogContext);
            m_restoreAgent.setSiteTracker(m_siteTracker);
            m_restoreAgent.setInitiator(initiator);
        }
    }
}

From source file:io.mycat.MycatServer.java

public void startup() throws IOException {

    SystemConfig system = config.getSystem();
    int processorCount = system.getProcessors();

    //init RouteStrategyFactory first
    RouteStrategyFactory.init();//from ww w. j a va2 s .co  m

    // server startup
    LOGGER.info(NAME + " is ready to startup ...");
    String inf = "Startup processors ...,total processors:" + system.getProcessors() + ",aio thread pool size:"
            + system.getProcessorExecutor() + "    \r\n each process allocated socket buffer pool "
            + " bytes ,a page size:" + system.getBufferPoolPageSize()
            + "  a page's chunk number(PageSize/ChunkSize) is:"
            + (system.getBufferPoolPageSize() / system.getBufferPoolChunkSize()) + "  buffer page's number is:"
            + system.getBufferPoolPageNumber();
    LOGGER.info(inf);
    LOGGER.info("sysconfig params:" + system.toString());

    // startup manager
    ManagerConnectionFactory mf = new ManagerConnectionFactory();
    ServerConnectionFactory sf = new ServerConnectionFactory();
    SocketAcceptor manager = null;
    SocketAcceptor server = null;
    aio = (system.getUsingAIO() == 1);

    // startup processors
    int threadPoolSize = system.getProcessorExecutor();
    processors = new NIOProcessor[processorCount];
    // a page size
    int bufferPoolPageSize = system.getBufferPoolPageSize();
    // total page number 
    short bufferPoolPageNumber = system.getBufferPoolPageNumber();
    //minimum allocation unit
    short bufferPoolChunkSize = system.getBufferPoolChunkSize();

    int socketBufferLocalPercent = system.getProcessorBufferLocalPercent();
    int bufferPoolType = system.getProcessorBufferPoolType();

    switch (bufferPoolType) {
    case 0:
        bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber,
                system.getFrontSocketSoRcvbuf());

        totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
        break;
    case 1:
        /**
         * todo ???
         *
         * bytebufferarena6bytebufferlist?list?
         * ?bytebufferlistbytebufferchunk??list?
         * ?bytebufferchunkpage?????
         * pagesizebytebufferarena??bytebufferlist?bytebufferchunkbuffer
         * bufferPoolChunkSize?bytebufferchunk?page
         * bufferPoolPageNumber?bytebufferlistbytebufferchunk
         */

        totalNetWorkBufferSize = 6 * bufferPoolPageSize * bufferPoolPageNumber;
        break;
    case 2:
        bufferPool = new NettyBufferPool(bufferPoolChunkSize);
        LOGGER.info("Use Netty Buffer Pool");

        break;
    default:
        bufferPool = new DirectByteBufferPool(bufferPoolPageSize, bufferPoolChunkSize, bufferPoolPageNumber,
                system.getFrontSocketSoRcvbuf());
        ;
        totalNetWorkBufferSize = bufferPoolPageSize * bufferPoolPageNumber;
    }

    /**
    * Off Heap For Merge/Order/Group/Limit ?
    */
    if (system.getUseOffHeapForMerge() == 1) {
        try {
            myCatMemory = new MyCatMemory(system, totalNetWorkBufferSize);
        } catch (NoSuchFieldException e) {
            LOGGER.error("NoSuchFieldException", e);
        } catch (IllegalAccessException e) {
            LOGGER.error("Error", e);
        }
    }
    businessExecutor = ExecutorUtil.create("BusinessExecutor", threadPoolSize);
    sequenceExecutor = ExecutorUtil.create("SequenceExecutor", threadPoolSize);
    timerExecutor = ExecutorUtil.create("Timer", system.getTimerExecutor());
    listeningExecutorService = MoreExecutors.listeningDecorator(businessExecutor);

    for (int i = 0; i < processors.length; i++) {
        processors[i] = new NIOProcessor("Processor" + i, bufferPool, businessExecutor);
    }

    if (aio) {
        LOGGER.info("using aio network handler ");
        asyncChannelGroups = new AsynchronousChannelGroup[processorCount];
        // startup connector
        connector = new AIOConnector();
        for (int i = 0; i < processors.length; i++) {
            asyncChannelGroups[i] = AsynchronousChannelGroup.withFixedThreadPool(processorCount,
                    new ThreadFactory() {
                        private int inx = 1;

                        @Override
                        public Thread newThread(Runnable r) {
                            Thread th = new Thread(r);
                            //TODO
                            th.setName(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "AIO" + (inx++));
                            LOGGER.info("created new AIO thread " + th.getName());
                            return th;
                        }
                    });
        }
        manager = new AIOAcceptor(NAME + "Manager", system.getBindIp(), system.getManagerPort(), mf,
                this.asyncChannelGroups[0]);

        // startup server

        server = new AIOAcceptor(NAME + "Server", system.getBindIp(), system.getServerPort(), sf,
                this.asyncChannelGroups[0]);

    } else {
        LOGGER.info("using nio network handler ");

        NIOReactorPool reactorPool = new NIOReactorPool(
                DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", processors.length);
        connector = new NIOConnector(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool);
        ((NIOConnector) connector).start();

        manager = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Manager",
                system.getBindIp(), system.getManagerPort(), mf, reactorPool);

        server = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + "Server",
                system.getBindIp(), system.getServerPort(), sf, reactorPool);
    }
    // manager start
    manager.start();
    LOGGER.info(manager.getName() + " is started and listening on " + manager.getPort());
    server.start();

    // server started
    LOGGER.info(server.getName() + " is started and listening on " + server.getPort());

    LOGGER.info("===============================================");

    // init datahost
    Map<String, PhysicalDBPool> dataHosts = config.getDataHosts();
    LOGGER.info("Initialize dataHost ...");
    for (PhysicalDBPool node : dataHosts.values()) {
        String index = dnIndexProperties.getProperty(node.getHostName(), "0");
        if (!"0".equals(index)) {
            LOGGER.info("init datahost: " + node.getHostName() + "  to use datasource index:" + index);
        }
        node.init(Integer.parseInt(index));
        node.startHeartbeat();
    }

    long dataNodeIldeCheckPeriod = system.getDataNodeIdleCheckPeriod();

    heartbeatScheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD, TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(processorCheck(), 0L, system.getProcessorCheckPeriod(),
            TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L,
            dataNodeIldeCheckPeriod, TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataNodeHeartbeat(), 0L, system.getDataNodeHeartbeatPeriod(),
            TimeUnit.MILLISECONDS);
    heartbeatScheduler.scheduleAtFixedRate(dataSourceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD,
            TimeUnit.MILLISECONDS);
    scheduler.schedule(catletClassClear(), 30000, TimeUnit.MILLISECONDS);

    if (system.getCheckTableConsistency() == 1) {
        scheduler.scheduleAtFixedRate(tableStructureCheck(), 0L, system.getCheckTableConsistencyPeriod(),
                TimeUnit.MILLISECONDS);
    }

    if (system.getUseSqlStat() == 1) {
        scheduler.scheduleAtFixedRate(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD,
                TimeUnit.MILLISECONDS);
    }

    if (system.getUseGlobleTableCheck() == 1) { // ??
        scheduler.scheduleAtFixedRate(glableTableConsistencyCheck(), 0L, system.getGlableTableCheckPeriod(),
                TimeUnit.MILLISECONDS);
    }

    //??
    scheduler.scheduleAtFixedRate(resultSetMapClear(), 0L, system.getClearBigSqLResultSetMapMs(),
            TimeUnit.MILLISECONDS);

    //        new Thread(tableStructureCheck()).start();

    //XA Init recovery Log
    LOGGER.info("===============================================");
    LOGGER.info("Perform XA recovery log ...");
    performXARecoveryLog();

    if (isUseZkSwitch()) {
        //??zkdnindex?zk
        initZkDnindex();
    }
    initRuleData();

    startup.set(true);
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchRefresh.java

@SuppressWarnings("checkstyle:methodname")
private void _process(final List<QualifiedName> qNames, final Supplier<ListenableFuture<Void>> supplier,
        final String requestName, final boolean delete, final int queueSize) {
    if (isElasticSearchMetacatRefreshAlreadyRunning.compareAndSet(false, true)) {
        final long start = registry.clock().wallTime();
        try {/* w ww  .j  av  a 2s  .  c o  m*/
            log.info("Start: Full refresh of metacat index in elastic search. Processing {} ...", qNames);
            final MetacatRequestContext context = MetacatRequestContext.builder().userName("admin")
                    .clientAppName("elasticSearchRefresher").apiUri("esRefresh").scheme("internal").build();
            MetacatContextManager.setContext(context);
            refreshMarker = Instant.now();
            refreshMarkerText = refreshMarker.toString();
            service = MoreExecutors
                    .listeningDecorator(newFixedThreadPool(10, "elasticsearch-refresher-%d", queueSize));
            esService = MoreExecutors
                    .listeningDecorator(newFixedThreadPool(5, "elasticsearch-refresher-es-%d", queueSize));
            supplier.get().get(24, TimeUnit.HOURS);
            log.info("End: Full refresh of metacat index in elastic search");
            if (delete) {
                deleteUnmarkedEntities(qNames, config.getElasticSearchRefreshExcludeQualifiedNames());
            }
        } catch (Exception e) {
            log.error("Full refresh of metacat index failed", e);
            registry.counter(registry.createId(Metrics.CounterElasticSearchRefresh.getMetricName())
                    .withTags(Metrics.tagStatusFailureMap)).increment();
        } finally {
            try {
                shutdown(service);
                shutdown(esService);
            } finally {
                isElasticSearchMetacatRefreshAlreadyRunning.set(false);
                final long duration = registry.clock().wallTime() - start;
                this.registry.timer(Metrics.TimerElasticSearchRefresh.getMetricName() + "." + requestName)
                        .record(duration, TimeUnit.MILLISECONDS);
                log.info("### Time taken to complete {} is {} ms", requestName, duration);
            }
        }

    } else {
        log.info("Full refresh of metacat index is already running.");
        registry.counter(registry.createId(Metrics.CounterElasticSearchRefreshAlreadyRunning.getMetricName()))
                .increment();
    }
}

From source file:com.sri.ai.util.concurrent.BranchAndMerge.java

private static ListeningExecutorService newExecutorService() {
    ListeningExecutorService result = null;
    int nThreads = 0; // i.e. not allowed by default

    // Running concurrently is optional.
    if (AICUtilConfiguration.isBranchAndMergeThreadingEnabled()) {
        if (AICUtilConfiguration.isBranchAndMergeUseNumberProcessorsForThreadPoolSize()) {
            nThreads = Runtime.getRuntime().availableProcessors();
            int delta = AICUtilConfiguration.getBranchAndMergeDeltaNumberProcessorsForThreadPoolSize();
            nThreads += delta;/*from   w w w. jav  a2s.c  o m*/
            if (nThreads < 1) {
                nThreads = 1;
            }
        } else {
            nThreads = AICUtilConfiguration.getBranchAndMergeFixedThreadPoolSize();
        }
    }

    if (nThreads > 0) {
        ThreadFactory threadFactory = new ThreadFactoryBuilder()
                // Do this so its simple to identify worker threads
                .setNameFormat(_threadIdentifierPrefix + "%s")
                // This is a service so want the worker threads
                // to be Daemons so that the JVM can exit normally
                // without needing any special calls to this service.
                .setDaemon(true).build();

        // We'll use fixed pool size for the worker threads as this
        // makes it easier to determine degrading. 
        ExecutorService executorService = Executors.newFixedThreadPool(nThreads, threadFactory);
        result = MoreExecutors.listeningDecorator(executorService);

        _sharedExecutorNumberWorkerThreads = nThreads;
        _sharedExecutorActiveWorkerThreads = new AtomicInteger();
    }

    return result;
}

From source file:org.apache.brooklyn.location.jclouds.JcloudsUtil.java

public static String getFirstReachableAddress(NodeMetadata node, Duration timeout) {
    final int port = node.getLoginPort();
    List<HostAndPort> sockets = FluentIterable
            .from(Iterables.concat(node.getPublicAddresses(), node.getPrivateAddresses()))
            .transform(new Function<String, HostAndPort>() {
                @Override/*ww w  .jav a  2 s .c  om*/
                public HostAndPort apply(String input) {
                    return HostAndPort.fromParts(input, port);
                }
            }).toList();

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
    try {
        ReachableSocketFinder finder = new ReachableSocketFinder(executor);
        HostAndPort result = finder.findOpenSocketOnNode(sockets, timeout);
        return result.getHostText();
    } catch (Exception e) {
        Exceptions.propagateIfFatal(e);
        throw new IllegalStateException("Unable to connect SshClient to " + node
                + "; check that the node is accessible and that the SSH key exists and is correctly configured, including any passphrase defined",
                e);
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.qpid.server.store.berkeleydb.replication.ReplicatedEnvironmentFacade.java

public ReplicatedEnvironmentFacade(ReplicatedEnvironmentConfiguration configuration) {
    _environmentDirectory = new File(configuration.getStorePath());
    if (!_environmentDirectory.exists()) {
        if (!_environmentDirectory.mkdirs()) {
            throw new IllegalArgumentException(
                    "Environment path " + _environmentDirectory + " could not be read or created. "
                            + "Ensure the path is correct and that the permissions are correct.");
        }//from  w  w w.ja v  a 2  s  . c o  m
    } else if (_environmentDirectory.isFile()) {
        throw new IllegalArgumentException("Environment path " + _environmentDirectory
                + " exists as a file - not a directory. " + "Ensure the path is correct.");

    } else {
        LOGGER.debug("Environment at path " + _environmentDirectory + " already exists.");
    }

    _configuration = configuration;

    _masterTransferTimeout = configuration.getFacadeParameter(MASTER_TRANSFER_TIMEOUT_PROPERTY_NAME,
            DEFAULT_MASTER_TRANSFER_TIMEOUT);
    _dbPingSocketTimeout = configuration.getFacadeParameter(DB_PING_SOCKET_TIMEOUT_PROPERTY_NAME,
            DEFAULT_DB_PING_SOCKET_TIMEOUT);
    _remoteNodeMonitorInterval = configuration.getFacadeParameter(REMOTE_NODE_MONITOR_INTERVAL_PROPERTY_NAME,
            DEFAULT_REMOTE_NODE_MONITOR_INTERVAL);
    _remoteNodeMonitorTimeout = configuration.getFacadeParameter(REMOTE_NODE_MONITOR_TIMEOUT_PROPERTY_NAME,
            DEFAULT_REMOTE_NODE_MONITOR_TIMEOUT);
    _environmentRestartRetryLimit = configuration.getFacadeParameter(
            ENVIRONMENT_RESTART_RETRY_LIMIT_PROPERTY_NAME, DEFAULT_ENVIRONMENT_RESTART_RETRY_LIMIT);
    _executorShutdownTimeout = configuration.getFacadeParameter(EXECUTOR_SHUTDOWN_TIMEOUT_PROPERTY_NAME,
            DEFAULT_EXECUTOR_SHUTDOWN_TIMEOUT);
    _logHandlerCleanerProtectedFilesLimit = _configuration.getFacadeParameter(
            LOG_HANDLER_CLEANER_PROTECTED_FILES_LIMIT_PROPERTY_NAME,
            DEFAULT_LOG_HANDLER_CLEANER_PROTECTED_FILES_LIMIT);

    _defaultDurability = new Durability(LOCAL_TRANSACTION_SYNCHRONIZATION_POLICY,
            REMOTE_TRANSACTION_SYNCHRONIZATION_POLICY, REPLICA_REPLICA_ACKNOWLEDGMENT_POLICY);
    _prettyGroupNodeName = _configuration.getGroupName() + ":" + _configuration.getName();

    // we rely on this executor being single-threaded as we need to restart and mutate the environment from one thread only
    _environmentJobExecutor = Executors
            .newSingleThreadExecutor(new DaemonThreadFactory("Environment-" + _prettyGroupNodeName));
    _stateChangeExecutor = MoreExecutors.listeningDecorator(
            Executors.newSingleThreadExecutor(new DaemonThreadFactory("StateChange-" + _prettyGroupNodeName)));
    _groupChangeExecutor = new ScheduledThreadPoolExecutor(2,
            new DaemonThreadFactory("Group-Change-Learner:" + _prettyGroupNodeName));

    // create environment in a separate thread to avoid renaming of the current thread by JE
    EnvHomeRegistry.getInstance().registerHome(_environmentDirectory);
    boolean success = false;
    try {
        createEnvironment(true, new Runnable() {
            @Override
            public void run() {
                populateExistingRemoteReplicationNodes();
                int numberOfRemoteNodes = _remoteReplicationNodes.size();
                if (numberOfRemoteNodes > 0) {
                    int newPoolSize = numberOfRemoteNodes + 1 /* for this node */
                            + 1 /* for coordination */;
                    _groupChangeExecutor.setCorePoolSize(newPoolSize);
                    LOGGER.debug("Setting group change executor core pool size to {}", newPoolSize);
                }
                _groupChangeExecutor.submit(new RemoteNodeStateLearner());
            }
        });
        success = true;
    } finally {
        if (!success) {
            EnvHomeRegistry.getInstance().deregisterHome(_environmentDirectory);
        }
    }
}