Example usage for org.apache.cassandra.db SystemKeyspace finishStartup

List of usage examples for org.apache.cassandra.db SystemKeyspace finishStartup

Introduction

In this page you can find the example usage for org.apache.cassandra.db SystemKeyspace finishStartup.

Prototype

public static void finishStartup() 

Source Link

Usage

From source file:info.archinnov.achilles.embedded.AchillesCassandraDaemon.java

License:Apache License

/**
 * Override the default setup process to speed up bootstrap
 *
 * - disable JMX/*from ww w.  ja v a 2 s . c  om*/
 * - disable legacy schema migration
 * - no pre-3.0 hints migration
 * - no pre-3.0 batch entries migration
 * - disable auto compaction on all keyspaces (your test data should fit in memory!!!)
 * - disable metrics
 * - disable GCInspector
 * - disable mlock
 * - disable Thrift server
 * - disable startup checks (Jemalloc, validLaunchDate, JMXPorts, JvmOptions, JnaInitialization, initSigarLibrary, dataDirs, SSTablesFormat, SystemKeyspaceState, Datacenter, Rack)
 * - disable materialized view rebuild (you should clean data folder between each test anyway)
 * - disable the SizeEstimatesRecorder (estimate SSTable size, who cares for unit testing ?)
 */
@Override
protected void setup() {
    // Delete any failed snapshot deletions on Windows - see CASSANDRA-9658
    if (FBUtilities.isWindows())
        WindowsFailedSnapshotTracker.deleteOldSnapshots();

    ThreadAwareSecurityManager.install();

    Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
        StorageMetrics.exceptions.inc();
        logger.error("Exception in thread {}", t, e);
        Tracing.trace("Exception in thread {}", t, e);
        for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
            JVMStabilityInspector.inspectThrowable(e2);

            if (e2 instanceof FSError) {
                if (e2 != e) // make sure FSError gets logged exactly once.
                    logger.error("Exception in thread {}", t, e2);
                FileUtils.handleFSError((FSError) e2);
            }

            if (e2 instanceof CorruptSSTableException) {
                if (e2 != e)
                    logger.error("Exception in thread " + t, e2);
                FileUtils.handleCorruptSSTable((CorruptSSTableException) e2);
            }
        }
    });

    // Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
    StorageService.instance.populateTokenMetadata();

    // load schema from disk
    Schema.instance.loadFromDisk();

    try {
        // clean up debris in the rest of the keyspaces
        for (String keyspaceName : Schema.instance.getKeyspaces()) {
            // Skip system as we've already cleaned it
            if (keyspaceName.equals(SystemKeyspace.NAME))
                continue;

            for (CFMetaData cfm : Schema.instance.getTablesAndViews(keyspaceName))
                ColumnFamilyStore.scrubDataDirectories(cfm);
        }
    } catch (StartupException startupEx) {
        logger.error("***** Startup exception : " + startupEx.getLocalizedMessage());
        throw new RuntimeException(startupEx);
    }

    Keyspace.setInitialized();

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (logger.isDebugEnabled())
            logger.debug("opening keyspace {}", keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    try {
        loadRowAndKeyCacheAsync().get();
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        logger.warn("Error loading key or row cache", t);
    }

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
    StorageService.instance.populateTokenMetadata();

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        System.err.println(e.getMessage()
                + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
        exitOrFail(1, "Fatal configuration error", e);
    }

    // Native transport
    nativeTransportService = new NativeTransportService();

    completeSetup();
}

From source file:org.wildfly.extension.cassandra.WildflyCassandraDaemon.java

License:Apache License

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 *
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws IOException/*from w w w. j  av a2s  .co m*/
 */
protected void setup() {
    // log warnings for different kinds of sub-optimal JVMs.  tldr use 64-bit Oracle >= 1.6u32
    if (!DatabaseDescriptor.hasLargeAddressSpace())
        CassandraLogger.LOGGER.infof(
                "32bit JVM detected.  It is recommended to run Cassandra on a 64bit JVM for better performance.");
    String javaVersion = System.getProperty("java.version");
    String javaVmName = System.getProperty("java.vm.name");
    CassandraLogger.LOGGER.infof("JVM vendor/version: {}/{}", javaVmName, javaVersion);
    if (javaVmName.contains("OpenJDK")) {
        // There is essentially no QA done on OpenJDK builds, and
        // clusters running OpenJDK have seen many heap and load issues.
        CassandraLogger.LOGGER
                .warn("OpenJDK is not recommended. Please upgrade to the newest Oracle Java release");
    } else if (!javaVmName.contains("HotSpot")) {
        CassandraLogger.LOGGER.warn(
                "Non-Oracle JVM detected.  Some features, such as immediate unmap of compacted SSTables, may not work as intended");
    }
    /*   else
       {
    String[] java_version = javaVersion.split("_");
    String java_major = java_version[0];
    int java_minor;
    try
    {
        java_minor = (java_version.length > 1) ? Integer.parseInt(java_version[1]) : 0;
    }
    catch (NumberFormatException e)
    {
        // have only seen this with java7 so far but no doubt there are other ways to break this
        CassandraLogger.LOGGER.infof("Unable to parse java version {}", Arrays.toString(java_version));
        java_minor = 32;
    }
       }
    */
    CassandraLogger.LOGGER.infof("Heap size: {}/{}", Runtime.getRuntime().totalMemory(),
            Runtime.getRuntime().maxMemory());
    for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans())
        CassandraLogger.LOGGER.infof("{} {}: {}", pool.getName(), pool.getType(), pool.getPeakUsage());
    CassandraLogger.LOGGER.infof("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        public void uncaughtException(Thread t, Throwable e) {
            StorageMetrics.exceptions.inc();
            CassandraLogger.LOGGER.error("Exception in thread " + t, e);
            Tracing.trace("Exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError)
                    exitThread.start();

                if (e2 instanceof FSError) {
                    if (e2 != e) // make sure FSError gets logged exactly once.
                        CassandraLogger.LOGGER.error("Exception in thread " + t, e2);
                    FileUtils.handleFSError((FSError) e2);
                }
            }
        }
    });

    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays
            .asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation()));
    for (String dataDir : dirs) {
        CassandraLogger.LOGGER.debugf("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists())
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String
                    .format("Directory %s is not accessible.", dataDir);
    }

    if (CacheService.instance == null) // should never happen
        throw new RuntimeException("Failed to initialize Cache Service.");

    // check the system keyspace to keep user from shooting self in foot by changing partitioner, cluster name, etc.
    // we do a one-off scrub of the system keyspace first; we can't load the list of the rest of the keyspaces,
    // until system keyspace is opened.
    for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(Keyspace.SYSTEM_KS).values())
        ColumnFamilyStore.scrubDataDirectories(Keyspace.SYSTEM_KS, cfm.cfName);
    try {
        SystemKeyspace.checkHealth();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal exception during initialization", e);
    }

    // load keyspace descriptions.
    DatabaseDescriptor.loadSchemas();

    try {
        LeveledManifest.maybeMigrateManifests();
    } catch (IOException e) {
        throw new RuntimeException(
                "Could not migrate old leveled manifest. Move away the .json file in the data directory", e);
    }

    // clean up compaction leftovers
    Map<Pair<String, String>, Map<Integer, UUID>> unfinishedCompactions = SystemKeyspace
            .getUnfinishedCompactions();
    for (Pair<String, String> kscf : unfinishedCompactions.keySet())
        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(kscf.left, kscf.right,
                unfinishedCompactions.get(kscf));
    SystemKeyspace.discardCompactionsInProgress();

    // clean up debris in the rest of the keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        // Skip system as we've already cleaned it
        if (keyspaceName.equals(Keyspace.SYSTEM_KS))
            continue;

        for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(keyspaceName).values())
            ColumnFamilyStore.scrubDataDirectories(keyspaceName, cfm.cfName);
    }

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (CassandraLogger.LOGGER.isDebugEnabled())
            CassandraLogger.LOGGER.debug("opening keyspace " + keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    if (CacheService.instance.keyCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) key cache.",
                CacheService.instance.keyCache.size());

    if (CacheService.instance.rowCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) row cache.",
                CacheService.instance.rowCache.size());

    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        CassandraLogger.LOGGER.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }

    // MeteredFlusher can block if flush queue fills up, so don't put on scheduledTasks
    // Start it before commit log, so memtables can flush during commit log replay
    StorageService.optionalTasks.scheduleWithFixedDelay(new MeteredFlusher(), 1000, 1000,
            TimeUnit.MILLISECONDS);

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // enable auto compaction
    for (Keyspace keyspace : Keyspace.all()) {
        for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
            for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.enableAutoCompaction();
            }
        }
    }
    // start compactions in five minutes (if no flushes have occurred by then to do so)
    Runnable runnable = new Runnable() {
        public void run() {
            for (Keyspace keyspaceName : Keyspace.all()) {
                for (ColumnFamilyStore cf : keyspaceName.getColumnFamilyStores()) {
                    for (ColumnFamilyStore store : cf.concatWithIndexes())
                        CompactionManager.instance.submitBackground(store);
                }
            }
        }
    };
    StorageService.optionalTasks.schedule(runnable, 5 * 60, TimeUnit.SECONDS);

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal configuration error", e);
    }

    Mx4jTool.maybeLoad();

    // Metrics
    String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
    if (metricsReporterConfigFile != null) {
        CassandraLogger.LOGGER.infof("Trying to load metrics-reporter-config from file: {}",
                metricsReporterConfigFile);
        try {
            String reportFileLocation = WildflyCassandraDaemon.class.getClassLoader()
                    .getResource(metricsReporterConfigFile).getFile();
            ReporterConfig.loadFromFile(reportFileLocation).enableAll();
        } catch (Exception e) {
            CassandraLogger.LOGGER
                    .warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
        }
    }

    if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress()))
        waitForGossipToSettle();

    // Thift
    InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress();
    int rpcPort = DatabaseDescriptor.getRpcPort();
    thriftServer = new ThriftServer(rpcAddr, rpcPort);
    StorageService.instance.registerThriftServer(thriftServer);

    // Native transport
    InetAddress nativeAddr = DatabaseDescriptor.getNativeTransportAddress();
    int nativePort = DatabaseDescriptor.getNativeTransportPort();
    nativeServer = new org.apache.cassandra.transport.Server(nativeAddr, nativePort);
    StorageService.instance.registerNativeServer(nativeServer);
}