Example usage for org.apache.cassandra.db Keyspace all

List of usage examples for org.apache.cassandra.db Keyspace all

Introduction

In this page you can find the example usage for org.apache.cassandra.db Keyspace all.

Prototype

public static Iterable<Keyspace> all() 

Source Link

Usage

From source file:org.elassandra.index.ElasticsearchIndexMetrics.java

License:Apache License

/**
 * Creates metrics for given {@link ColumnFamilyStore}.
 *
 * @param cfs ColumnFamilyStore to measure metrics
 *///from  w  w  w. j  a  va  2 s.  com
public ElasticsearchIndexMetrics(final ColumnFamilyStore cfs) {
    factory = new ColumnFamilyMetricNameFactory(cfs);

    samplers = Maps.newHashMap();
    for (Sampler sampler : Sampler.values()) {
        samplers.put(sampler, new TopKSampler<ByteBuffer>());
    }

    memtableColumnsCount = createColumnFamilyGauge("MemtableColumnsCount", new Gauge<Long>() {
        public Long getValue() {
            return cfs.getTracker().getView().getCurrentMemtable().getOperations();
        }
    });
    memtableOnHeapSize = createColumnFamilyGauge("MemtableOnHeapSize", new Gauge<Long>() {
        public Long getValue() {
            return cfs.getTracker().getView().getCurrentMemtable().getAllocator().onHeap().owns();
        }
    });
    memtableOffHeapSize = createColumnFamilyGauge("MemtableOffHeapSize", new Gauge<Long>() {
        public Long getValue() {
            return cfs.getTracker().getView().getCurrentMemtable().getAllocator().offHeap().owns();
        }
    });
    memtableLiveDataSize = createColumnFamilyGauge("MemtableLiveDataSize", new Gauge<Long>() {
        public Long getValue() {
            return cfs.getTracker().getView().getCurrentMemtable().getLiveDataSize();
        }
    });
    allMemtablesOnHeapSize = createColumnFamilyGauge("AllMemtablesHeapSize", new Gauge<Long>() {
        public Long getValue() {
            long size = 0;
            for (ColumnFamilyStore cfs2 : cfs.concatWithIndexes())
                size += cfs2.getTracker().getView().getCurrentMemtable().getAllocator().onHeap().owns();
            return size;
        }
    });
    allMemtablesOffHeapSize = createColumnFamilyGauge("AllMemtablesOffHeapSize", new Gauge<Long>() {
        public Long getValue() {
            long size = 0;
            for (ColumnFamilyStore cfs2 : cfs.concatWithIndexes())
                size += cfs2.getTracker().getView().getCurrentMemtable().getAllocator().offHeap().owns();
            return size;
        }
    });
    allMemtablesLiveDataSize = createColumnFamilyGauge("AllMemtablesLiveDataSize", new Gauge<Long>() {
        public Long getValue() {
            long size = 0;
            for (ColumnFamilyStore cfs2 : cfs.concatWithIndexes())
                size += cfs2.getTracker().getView().getCurrentMemtable().getLiveDataSize();
            return size;
        }
    });
    memtableSwitchCount = createColumnFamilyCounter("MemtableSwitchCount");
    estimatedRowSizeHistogram = Metrics.register(factory.createMetricName("EstimatedRowSizeHistogram"),
            new Gauge<long[]>() {
                public long[] getValue() {
                    return combineHistograms(cfs.getSSTables(), new GetHistogram() {
                        public EstimatedHistogram getHistogram(SSTableReader reader) {
                            return reader.getEstimatedRowSize();
                        }
                    });
                }
            });
    estimatedRowCount = Metrics.register(factory.createMetricName("EstimatedRowCount"), new Gauge<Long>() {
        public Long getValue() {
            long memtablePartitions = 0;
            for (Memtable memtable : cfs.getTracker().getView().getAllMemtables())
                memtablePartitions += memtable.partitionCount();
            return SSTableReader.getApproximateKeyCount(cfs.getSSTables()) + memtablePartitions;
        }
    });
    estimatedColumnCountHistogram = Metrics.register(factory.createMetricName("EstimatedColumnCountHistogram"),
            new Gauge<long[]>() {
                public long[] getValue() {
                    return combineHistograms(cfs.getSSTables(), new GetHistogram() {
                        public EstimatedHistogram getHistogram(SSTableReader reader) {
                            return reader.getEstimatedColumnCount();
                        }
                    });
                }
            });
    sstablesPerReadHistogram = createColumnFamilyHistogram("SSTablesPerReadHistogram",
            cfs.keyspace.metric.sstablesPerReadHistogram, true);
    compressionRatio = createColumnFamilyGauge("CompressionRatio", new Gauge<Double>() {
        public Double getValue() {
            double sum = 0;
            int total = 0;
            for (SSTableReader sstable : cfs.getSSTables()) {
                if (sstable.getCompressionRatio() != MetadataCollector.NO_COMPRESSION_RATIO) {
                    sum += sstable.getCompressionRatio();
                    total++;
                }
            }
            return total != 0 ? sum / total : 0;
        }
    }, new Gauge<Double>() // global gauge
    {
        public Double getValue() {
            double sum = 0;
            int total = 0;
            for (Keyspace keyspace : Keyspace.all()) {
                for (SSTableReader sstable : keyspace.getAllSSTables()) {
                    if (sstable.getCompressionRatio() != MetadataCollector.NO_COMPRESSION_RATIO) {
                        sum += sstable.getCompressionRatio();
                        total++;
                    }
                }
            }
            return total != 0 ? sum / total : 0;
        }
    });
    readLatency = new LatencyMetrics(factory, "Read", cfs.keyspace.metric.readLatency, globalReadLatency);
    writeLatency = new LatencyMetrics(factory, "Write", cfs.keyspace.metric.writeLatency, globalWriteLatency);
    rangeLatency = new LatencyMetrics(factory, "Range", cfs.keyspace.metric.rangeLatency, globalRangeLatency);
    pendingFlushes = createColumnFamilyCounter("PendingFlushes");
    pendingCompactions = createColumnFamilyGauge("PendingCompactions", new Gauge<Integer>() {
        public Integer getValue() {
            return cfs.getCompactionStrategy().getEstimatedRemainingTasks();
        }
    });
    liveSSTableCount = createColumnFamilyGauge("LiveSSTableCount", new Gauge<Integer>() {
        public Integer getValue() {
            return cfs.getTracker().getSSTables().size();
        }
    });
    liveDiskSpaceUsed = createColumnFamilyCounter("LiveDiskSpaceUsed");
    totalDiskSpaceUsed = createColumnFamilyCounter("TotalDiskSpaceUsed");
    minRowSize = createColumnFamilyGauge("MinRowSize", new Gauge<Long>() {
        public Long getValue() {
            long min = 0;
            for (SSTableReader sstable : cfs.getSSTables()) {
                if (min == 0 || sstable.getEstimatedRowSize().min() < min)
                    min = sstable.getEstimatedRowSize().min();
            }
            return min;
        }
    }, new Gauge<Long>() // global gauge
    {
        public Long getValue() {
            long min = Long.MAX_VALUE;
            for (Metric cfGauge : allColumnFamilyMetrics.get("MinRowSize")) {
                min = Math.min(min, ((Gauge<? extends Number>) cfGauge).getValue().longValue());
            }
            return min;
        }
    });
    maxRowSize = createColumnFamilyGauge("MaxRowSize", new Gauge<Long>() {
        public Long getValue() {
            long max = 0;
            for (SSTableReader sstable : cfs.getSSTables()) {
                if (sstable.getEstimatedRowSize().max() > max)
                    max = sstable.getEstimatedRowSize().max();
            }
            return max;
        }
    }, new Gauge<Long>() // global gauge
    {
        public Long getValue() {
            long max = 0;
            for (Metric cfGauge : allColumnFamilyMetrics.get("MaxRowSize")) {
                max = Math.max(max, ((Gauge<? extends Number>) cfGauge).getValue().longValue());
            }
            return max;
        }
    });
    meanRowSize = createColumnFamilyGauge("MeanRowSize", new Gauge<Long>() {
        public Long getValue() {
            long sum = 0;
            long count = 0;
            for (SSTableReader sstable : cfs.getSSTables()) {
                long n = sstable.getEstimatedRowSize().count();
                sum += sstable.getEstimatedRowSize().mean() * n;
                count += n;
            }
            return count > 0 ? sum / count : 0;
        }
    }, new Gauge<Long>() // global gauge
    {
        public Long getValue() {
            long sum = 0;
            long count = 0;
            for (Keyspace keyspace : Keyspace.all()) {
                for (SSTableReader sstable : keyspace.getAllSSTables()) {
                    long n = sstable.getEstimatedRowSize().count();
                    sum += sstable.getEstimatedRowSize().mean() * n;
                    count += n;
                }
            }
            return count > 0 ? sum / count : 0;
        }
    });
    bloomFilterFalsePositives = createColumnFamilyGauge("BloomFilterFalsePositives", new Gauge<Long>() {
        public Long getValue() {
            long count = 0L;
            for (SSTableReader sstable : cfs.getSSTables())
                count += sstable.getBloomFilterFalsePositiveCount();
            return count;
        }
    });
    recentBloomFilterFalsePositives = createColumnFamilyGauge("RecentBloomFilterFalsePositives",
            new Gauge<Long>() {
                public Long getValue() {
                    long count = 0L;
                    for (SSTableReader sstable : cfs.getSSTables())
                        count += sstable.getRecentBloomFilterFalsePositiveCount();
                    return count;
                }
            });
    bloomFilterFalseRatio = createColumnFamilyGauge("BloomFilterFalseRatio", new Gauge<Double>() {
        public Double getValue() {
            long falseCount = 0L;
            long trueCount = 0L;
            for (SSTableReader sstable : cfs.getSSTables()) {
                falseCount += sstable.getBloomFilterFalsePositiveCount();
                trueCount += sstable.getBloomFilterTruePositiveCount();
            }
            if (falseCount == 0L && trueCount == 0L)
                return 0d;
            return (double) falseCount / (trueCount + falseCount);
        }
    }, new Gauge<Double>() // global gauge
    {
        public Double getValue() {
            long falseCount = 0L;
            long trueCount = 0L;
            for (Keyspace keyspace : Keyspace.all()) {
                for (SSTableReader sstable : keyspace.getAllSSTables()) {
                    falseCount += sstable.getBloomFilterFalsePositiveCount();
                    trueCount += sstable.getBloomFilterTruePositiveCount();
                }
            }
            if (falseCount == 0L && trueCount == 0L)
                return 0d;
            return (double) falseCount / (trueCount + falseCount);
        }
    });
    recentBloomFilterFalseRatio = createColumnFamilyGauge("RecentBloomFilterFalseRatio", new Gauge<Double>() {
        public Double getValue() {
            long falseCount = 0L;
            long trueCount = 0L;
            for (SSTableReader sstable : cfs.getSSTables()) {
                falseCount += sstable.getRecentBloomFilterFalsePositiveCount();
                trueCount += sstable.getRecentBloomFilterTruePositiveCount();
            }
            if (falseCount == 0L && trueCount == 0L)
                return 0d;
            return (double) falseCount / (trueCount + falseCount);
        }
    }, new Gauge<Double>() // global gauge
    {
        public Double getValue() {
            long falseCount = 0L;
            long trueCount = 0L;
            for (Keyspace keyspace : Keyspace.all()) {
                for (SSTableReader sstable : keyspace.getAllSSTables()) {
                    falseCount += sstable.getRecentBloomFilterFalsePositiveCount();
                    trueCount += sstable.getRecentBloomFilterTruePositiveCount();
                }
            }
            if (falseCount == 0L && trueCount == 0L)
                return 0d;
            return (double) falseCount / (trueCount + falseCount);
        }
    });
    bloomFilterDiskSpaceUsed = createColumnFamilyGauge("BloomFilterDiskSpaceUsed", new Gauge<Long>() {
        public Long getValue() {
            long total = 0;
            for (SSTableReader sst : cfs.getSSTables())
                total += sst.getBloomFilterSerializedSize();
            return total;
        }
    });
    bloomFilterOffHeapMemoryUsed = createColumnFamilyGauge("BloomFilterOffHeapMemoryUsed", new Gauge<Long>() {
        public Long getValue() {
            long total = 0;
            for (SSTableReader sst : cfs.getSSTables())
                total += sst.getBloomFilterOffHeapSize();
            return total;
        }
    });
    indexSummaryOffHeapMemoryUsed = createColumnFamilyGauge("IndexSummaryOffHeapMemoryUsed", new Gauge<Long>() {
        public Long getValue() {
            long total = 0;
            for (SSTableReader sst : cfs.getSSTables())
                total += sst.getIndexSummaryOffHeapSize();
            return total;
        }
    });
    compressionMetadataOffHeapMemoryUsed = createColumnFamilyGauge("CompressionMetadataOffHeapMemoryUsed",
            new Gauge<Long>() {
                public Long getValue() {
                    long total = 0;
                    for (SSTableReader sst : cfs.getSSTables())
                        total += sst.getCompressionMetadataOffHeapSize();
                    return total;
                }
            });
    speculativeRetries = createColumnFamilyCounter("SpeculativeRetries");
    keyCacheHitRate = Metrics.register(factory.createMetricName("KeyCacheHitRate"), new RatioGauge() {
        @Override
        public Ratio getRatio() {
            return Ratio.of(getNumerator(), getDenominator());
        }

        protected double getNumerator() {
            long hits = 0L;
            for (SSTableReader sstable : cfs.getSSTables())
                hits += sstable.getKeyCacheHit();
            return hits;
        }

        protected double getDenominator() {
            long requests = 0L;
            for (SSTableReader sstable : cfs.getSSTables())
                requests += sstable.getKeyCacheRequest();
            return Math.max(requests, 1); // to avoid NaN.
        }
    });
    tombstoneScannedHistogram = createColumnFamilyHistogram("TombstoneScannedHistogram",
            cfs.keyspace.metric.tombstoneScannedHistogram, false);
    liveScannedHistogram = createColumnFamilyHistogram("LiveScannedHistogram",
            cfs.keyspace.metric.liveScannedHistogram, false);
    colUpdateTimeDeltaHistogram = createColumnFamilyHistogram("ColUpdateTimeDeltaHistogram",
            cfs.keyspace.metric.colUpdateTimeDeltaHistogram, false);
    coordinatorReadLatency = Metrics.timer(factory.createMetricName("CoordinatorReadLatency"));
    coordinatorScanLatency = Metrics.timer(factory.createMetricName("CoordinatorScanLatency"));
    waitingOnFreeMemtableSpace = Metrics.histogram(factory.createMetricName("WaitingOnFreeMemtableSpace"),
            false);

    trueSnapshotsSize = createColumnFamilyGauge("SnapshotsSize", new Gauge<Long>() {
        public Long getValue() {
            return cfs.trueSnapshotsSize();
        }
    });
    rowCacheHitOutOfRange = createColumnFamilyCounter("RowCacheHitOutOfRange");
    rowCacheHit = createColumnFamilyCounter("RowCacheHit");
    rowCacheMiss = createColumnFamilyCounter("RowCacheMiss");

    casPrepare = new LatencyMetrics(factory, "CasPrepare", cfs.keyspace.metric.casPrepare);
    casPropose = new LatencyMetrics(factory, "CasPropose", cfs.keyspace.metric.casPropose);
    casCommit = new LatencyMetrics(factory, "CasCommit", cfs.keyspace.metric.casCommit);
}

From source file:org.wildfly.extension.cassandra.WildflyCassandraDaemon.java

License:Apache License

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 *
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws IOException/*  w  ww. j  a  v  a 2s.c  o m*/
 */
protected void setup() {
    // log warnings for different kinds of sub-optimal JVMs.  tldr use 64-bit Oracle >= 1.6u32
    if (!DatabaseDescriptor.hasLargeAddressSpace())
        CassandraLogger.LOGGER.infof(
                "32bit JVM detected.  It is recommended to run Cassandra on a 64bit JVM for better performance.");
    String javaVersion = System.getProperty("java.version");
    String javaVmName = System.getProperty("java.vm.name");
    CassandraLogger.LOGGER.infof("JVM vendor/version: {}/{}", javaVmName, javaVersion);
    if (javaVmName.contains("OpenJDK")) {
        // There is essentially no QA done on OpenJDK builds, and
        // clusters running OpenJDK have seen many heap and load issues.
        CassandraLogger.LOGGER
                .warn("OpenJDK is not recommended. Please upgrade to the newest Oracle Java release");
    } else if (!javaVmName.contains("HotSpot")) {
        CassandraLogger.LOGGER.warn(
                "Non-Oracle JVM detected.  Some features, such as immediate unmap of compacted SSTables, may not work as intended");
    }
    /*   else
       {
    String[] java_version = javaVersion.split("_");
    String java_major = java_version[0];
    int java_minor;
    try
    {
        java_minor = (java_version.length > 1) ? Integer.parseInt(java_version[1]) : 0;
    }
    catch (NumberFormatException e)
    {
        // have only seen this with java7 so far but no doubt there are other ways to break this
        CassandraLogger.LOGGER.infof("Unable to parse java version {}", Arrays.toString(java_version));
        java_minor = 32;
    }
       }
    */
    CassandraLogger.LOGGER.infof("Heap size: {}/{}", Runtime.getRuntime().totalMemory(),
            Runtime.getRuntime().maxMemory());
    for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans())
        CassandraLogger.LOGGER.infof("{} {}: {}", pool.getName(), pool.getType(), pool.getPeakUsage());
    CassandraLogger.LOGGER.infof("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        public void uncaughtException(Thread t, Throwable e) {
            StorageMetrics.exceptions.inc();
            CassandraLogger.LOGGER.error("Exception in thread " + t, e);
            Tracing.trace("Exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError)
                    exitThread.start();

                if (e2 instanceof FSError) {
                    if (e2 != e) // make sure FSError gets logged exactly once.
                        CassandraLogger.LOGGER.error("Exception in thread " + t, e2);
                    FileUtils.handleFSError((FSError) e2);
                }
            }
        }
    });

    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays
            .asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation()));
    for (String dataDir : dirs) {
        CassandraLogger.LOGGER.debugf("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists())
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String
                    .format("Directory %s is not accessible.", dataDir);
    }

    if (CacheService.instance == null) // should never happen
        throw new RuntimeException("Failed to initialize Cache Service.");

    // check the system keyspace to keep user from shooting self in foot by changing partitioner, cluster name, etc.
    // we do a one-off scrub of the system keyspace first; we can't load the list of the rest of the keyspaces,
    // until system keyspace is opened.
    for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(Keyspace.SYSTEM_KS).values())
        ColumnFamilyStore.scrubDataDirectories(Keyspace.SYSTEM_KS, cfm.cfName);
    try {
        SystemKeyspace.checkHealth();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal exception during initialization", e);
    }

    // load keyspace descriptions.
    DatabaseDescriptor.loadSchemas();

    try {
        LeveledManifest.maybeMigrateManifests();
    } catch (IOException e) {
        throw new RuntimeException(
                "Could not migrate old leveled manifest. Move away the .json file in the data directory", e);
    }

    // clean up compaction leftovers
    Map<Pair<String, String>, Map<Integer, UUID>> unfinishedCompactions = SystemKeyspace
            .getUnfinishedCompactions();
    for (Pair<String, String> kscf : unfinishedCompactions.keySet())
        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(kscf.left, kscf.right,
                unfinishedCompactions.get(kscf));
    SystemKeyspace.discardCompactionsInProgress();

    // clean up debris in the rest of the keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        // Skip system as we've already cleaned it
        if (keyspaceName.equals(Keyspace.SYSTEM_KS))
            continue;

        for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(keyspaceName).values())
            ColumnFamilyStore.scrubDataDirectories(keyspaceName, cfm.cfName);
    }

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (CassandraLogger.LOGGER.isDebugEnabled())
            CassandraLogger.LOGGER.debug("opening keyspace " + keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    if (CacheService.instance.keyCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) key cache.",
                CacheService.instance.keyCache.size());

    if (CacheService.instance.rowCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) row cache.",
                CacheService.instance.rowCache.size());

    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        CassandraLogger.LOGGER.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }

    // MeteredFlusher can block if flush queue fills up, so don't put on scheduledTasks
    // Start it before commit log, so memtables can flush during commit log replay
    StorageService.optionalTasks.scheduleWithFixedDelay(new MeteredFlusher(), 1000, 1000,
            TimeUnit.MILLISECONDS);

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // enable auto compaction
    for (Keyspace keyspace : Keyspace.all()) {
        for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
            for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.enableAutoCompaction();
            }
        }
    }
    // start compactions in five minutes (if no flushes have occurred by then to do so)
    Runnable runnable = new Runnable() {
        public void run() {
            for (Keyspace keyspaceName : Keyspace.all()) {
                for (ColumnFamilyStore cf : keyspaceName.getColumnFamilyStores()) {
                    for (ColumnFamilyStore store : cf.concatWithIndexes())
                        CompactionManager.instance.submitBackground(store);
                }
            }
        }
    };
    StorageService.optionalTasks.schedule(runnable, 5 * 60, TimeUnit.SECONDS);

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal configuration error", e);
    }

    Mx4jTool.maybeLoad();

    // Metrics
    String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
    if (metricsReporterConfigFile != null) {
        CassandraLogger.LOGGER.infof("Trying to load metrics-reporter-config from file: {}",
                metricsReporterConfigFile);
        try {
            String reportFileLocation = WildflyCassandraDaemon.class.getClassLoader()
                    .getResource(metricsReporterConfigFile).getFile();
            ReporterConfig.loadFromFile(reportFileLocation).enableAll();
        } catch (Exception e) {
            CassandraLogger.LOGGER
                    .warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
        }
    }

    if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress()))
        waitForGossipToSettle();

    // Thift
    InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress();
    int rpcPort = DatabaseDescriptor.getRpcPort();
    thriftServer = new ThriftServer(rpcAddr, rpcPort);
    StorageService.instance.registerThriftServer(thriftServer);

    // Native transport
    InetAddress nativeAddr = DatabaseDescriptor.getNativeTransportAddress();
    int nativePort = DatabaseDescriptor.getNativeTransportPort();
    nativeServer = new org.apache.cassandra.transport.Server(nativeAddr, nativePort);
    StorageService.instance.registerNativeServer(nativeServer);
}