Example usage for org.apache.cassandra.config DatabaseDescriptor getRpcAddress

List of usage examples for org.apache.cassandra.config DatabaseDescriptor getRpcAddress

Introduction

In this page you can find the example usage for org.apache.cassandra.config DatabaseDescriptor getRpcAddress.

Prototype

public static InetAddress getRpcAddress() 

Source Link

Document

This is the address used to bind for the native protocol to communicate with clients.

Usage

From source file:com.meteogroup.cassandra.embedded.EmbeddedCassandraLoader.java

License:Open Source License

public static void setupCassandra() throws Exception {
    System.setProperty("cassandra.config.loader", EmbeddedConfigurationLoader.class.getCanonicalName());
    cassandraDaemon = new CassandraDaemon();
    cassandraDaemon.init(null);//  w w  w .j  a v  a2  s . co  m
    cassandraDaemon.start();

    cassandraHost = DatabaseDescriptor.getRpcAddress().getHostName();
    cassandraNativePort = DatabaseDescriptor.getNativeTransportPort();
}

From source file:com.savoirtech.bundles.cassandra.AbstractCassandraDaemon.java

License:Apache License

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 * <p/>/*w ww  . j  av a  2s.  c  o  m*/
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws java.io.IOException
 */
protected void setup() {
    logger.info("JVM vendor/version: {}/{}", System.getProperty("java.vm.name"),
            System.getProperty("java.version"));
    logger.info("Heap size: {}/{}", Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory());
    logger.info("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();

    listenPort = DatabaseDescriptor.getRpcPort();
    listenAddr = DatabaseDescriptor.getRpcAddress();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        public void uncaughtException(Thread t, Throwable e) {
            exceptions.incrementAndGet();
            logger.error("Exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError) {
                    System.exit(100);
                }
            }
        }
    });

    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()),
            Arrays.asList(new String[] { DatabaseDescriptor.getCommitLogLocation(),
                    DatabaseDescriptor.getSavedCachesLocation() }));
    for (String dataDir : dirs) {
        logger.debug("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists()) {
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String
                    .format("Directory %s is not accessible.", dataDir);
        }
    }

    // Migrate sstables from pre-#2749 to the correct location
    if (Directories.sstablesNeedsMigration()) {
        Directories.migrateSSTables();
    }

    if (CacheService.instance == null) // should never happen
    {
        throw new RuntimeException("Failed to initialize Cache Service.");
    }

    // check the system table to keep user from shooting self in foot by changing partitioner, cluster name, etc.
    // we do a one-off scrub of the system table first; we can't load the list of the rest of the tables,
    // until system table is opened.
    for (CFMetaData cfm : Schema.instance.getTableMetaData(Table.SYSTEM_KS).values()) {
        ColumnFamilyStore.scrubDataDirectories(Table.SYSTEM_KS, cfm.cfName);
    }
    try {
        SystemTable.checkHealth();
    } catch (ConfigurationException e) {
        logger.error("Fatal exception during initialization", e);
        System.exit(100);
    }

    // load keyspace descriptions.
    try {
        DatabaseDescriptor.loadSchemas();
    } catch (IOException e) {
        logger.error("Fatal exception during initialization", e);
        System.exit(100);
    }

    // clean up debris in the rest of the tables
    for (String table : Schema.instance.getTables()) {
        for (CFMetaData cfm : Schema.instance.getTableMetaData(table).values()) {
            ColumnFamilyStore.scrubDataDirectories(table, cfm.cfName);
        }
    }

    // initialize keyspaces
    for (String table : Schema.instance.getTables()) {
        if (logger.isDebugEnabled()) {
            logger.debug("opening keyspace " + table);
        }
        Table.open(table);
    }

    if (CacheService.instance.keyCache.size() > 0) {
        logger.info("completed pre-loading ({} keys) key cache.", CacheService.instance.keyCache.size());
    }

    if (CacheService.instance.rowCache.size() > 0) {
        logger.info("completed pre-loading ({} keys) row cache.", CacheService.instance.rowCache.size());
    }

    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        logger.error("Fatal configuration error", e);
        System.err.println(e.getMessage()
                + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
        System.exit(1);
    }

    SystemTable.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        logger.error("Fatal configuration error", e);
        System.err.println(e.getMessage()
                + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
        System.exit(1);
    }

    Mx4jTool.maybeLoad();
}

From source file:org.apache.storm.cassandra.testtools.EmbeddedCassandraResource.java

License:Open Source License

public EmbeddedCassandraResource() {
    try {/*  ww  w  .  j  a v a  2s .c o  m*/
        prepare();
        cassandraDaemon = new CassandraDaemon();
        cassandraDaemon.init(null);
        host = DatabaseDescriptor.getRpcAddress().getHostName();
        nativeTransportPort = DatabaseDescriptor.getNativeTransportPort();
    } catch (Exception e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:org.meteogroup.cassandra.embedded.EmbeddedCassandraLoader.java

License:Open Source License

public static void setupCassandra() throws Exception {
    System.setProperty("cassandra.config.loader", EmbeddedConfigurationLoader.class.getCanonicalName());
    cassandraDaemon = new CassandraDaemon(true);
    cassandraDaemon.activate();// ww w. j  a  v  a  2  s .c o  m
    cassandraDaemon.start();

    cassandraHost = DatabaseDescriptor.getRpcAddress().getHostName();
    cassandraRpcPort = DatabaseDescriptor.getRpcPort();
    cassandraNativePort = DatabaseDescriptor.getNativeTransportPort();
    cassandraNativeSSLPort = DatabaseDescriptor.getNativeTransportPort();
    cassandraStoragePort = DatabaseDescriptor.getStoragePort();
    cassandraStorageSSLPort = DatabaseDescriptor.getSSLStoragePort();
}

From source file:org.springframework.cassandra.test.integration.EmbeddedCassandraServerHelper.java

License:Apache License

/**
 * Get embedded cassandra host.//w w w . ja va 2s.  c  o m
 *
 * @return the cassandra host
 */
public static String getHost() {
    return DatabaseDescriptor.getRpcAddress().getHostName();
}

From source file:org.wildfly.extension.cassandra.WildflyCassandraDaemon.java

License:Apache License

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 *
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws IOException//from  w  ww  . ja v a 2s  .  co m
 */
protected void setup() {
    // log warnings for different kinds of sub-optimal JVMs.  tldr use 64-bit Oracle >= 1.6u32
    if (!DatabaseDescriptor.hasLargeAddressSpace())
        CassandraLogger.LOGGER.infof(
                "32bit JVM detected.  It is recommended to run Cassandra on a 64bit JVM for better performance.");
    String javaVersion = System.getProperty("java.version");
    String javaVmName = System.getProperty("java.vm.name");
    CassandraLogger.LOGGER.infof("JVM vendor/version: {}/{}", javaVmName, javaVersion);
    if (javaVmName.contains("OpenJDK")) {
        // There is essentially no QA done on OpenJDK builds, and
        // clusters running OpenJDK have seen many heap and load issues.
        CassandraLogger.LOGGER
                .warn("OpenJDK is not recommended. Please upgrade to the newest Oracle Java release");
    } else if (!javaVmName.contains("HotSpot")) {
        CassandraLogger.LOGGER.warn(
                "Non-Oracle JVM detected.  Some features, such as immediate unmap of compacted SSTables, may not work as intended");
    }
    /*   else
       {
    String[] java_version = javaVersion.split("_");
    String java_major = java_version[0];
    int java_minor;
    try
    {
        java_minor = (java_version.length > 1) ? Integer.parseInt(java_version[1]) : 0;
    }
    catch (NumberFormatException e)
    {
        // have only seen this with java7 so far but no doubt there are other ways to break this
        CassandraLogger.LOGGER.infof("Unable to parse java version {}", Arrays.toString(java_version));
        java_minor = 32;
    }
       }
    */
    CassandraLogger.LOGGER.infof("Heap size: {}/{}", Runtime.getRuntime().totalMemory(),
            Runtime.getRuntime().maxMemory());
    for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans())
        CassandraLogger.LOGGER.infof("{} {}: {}", pool.getName(), pool.getType(), pool.getPeakUsage());
    CassandraLogger.LOGGER.infof("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        public void uncaughtException(Thread t, Throwable e) {
            StorageMetrics.exceptions.inc();
            CassandraLogger.LOGGER.error("Exception in thread " + t, e);
            Tracing.trace("Exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError)
                    exitThread.start();

                if (e2 instanceof FSError) {
                    if (e2 != e) // make sure FSError gets logged exactly once.
                        CassandraLogger.LOGGER.error("Exception in thread " + t, e2);
                    FileUtils.handleFSError((FSError) e2);
                }
            }
        }
    });

    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays
            .asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation()));
    for (String dataDir : dirs) {
        CassandraLogger.LOGGER.debugf("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists())
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String
                    .format("Directory %s is not accessible.", dataDir);
    }

    if (CacheService.instance == null) // should never happen
        throw new RuntimeException("Failed to initialize Cache Service.");

    // check the system keyspace to keep user from shooting self in foot by changing partitioner, cluster name, etc.
    // we do a one-off scrub of the system keyspace first; we can't load the list of the rest of the keyspaces,
    // until system keyspace is opened.
    for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(Keyspace.SYSTEM_KS).values())
        ColumnFamilyStore.scrubDataDirectories(Keyspace.SYSTEM_KS, cfm.cfName);
    try {
        SystemKeyspace.checkHealth();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal exception during initialization", e);
    }

    // load keyspace descriptions.
    DatabaseDescriptor.loadSchemas();

    try {
        LeveledManifest.maybeMigrateManifests();
    } catch (IOException e) {
        throw new RuntimeException(
                "Could not migrate old leveled manifest. Move away the .json file in the data directory", e);
    }

    // clean up compaction leftovers
    Map<Pair<String, String>, Map<Integer, UUID>> unfinishedCompactions = SystemKeyspace
            .getUnfinishedCompactions();
    for (Pair<String, String> kscf : unfinishedCompactions.keySet())
        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(kscf.left, kscf.right,
                unfinishedCompactions.get(kscf));
    SystemKeyspace.discardCompactionsInProgress();

    // clean up debris in the rest of the keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        // Skip system as we've already cleaned it
        if (keyspaceName.equals(Keyspace.SYSTEM_KS))
            continue;

        for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(keyspaceName).values())
            ColumnFamilyStore.scrubDataDirectories(keyspaceName, cfm.cfName);
    }

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (CassandraLogger.LOGGER.isDebugEnabled())
            CassandraLogger.LOGGER.debug("opening keyspace " + keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    if (CacheService.instance.keyCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) key cache.",
                CacheService.instance.keyCache.size());

    if (CacheService.instance.rowCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) row cache.",
                CacheService.instance.rowCache.size());

    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        CassandraLogger.LOGGER.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }

    // MeteredFlusher can block if flush queue fills up, so don't put on scheduledTasks
    // Start it before commit log, so memtables can flush during commit log replay
    StorageService.optionalTasks.scheduleWithFixedDelay(new MeteredFlusher(), 1000, 1000,
            TimeUnit.MILLISECONDS);

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // enable auto compaction
    for (Keyspace keyspace : Keyspace.all()) {
        for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
            for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.enableAutoCompaction();
            }
        }
    }
    // start compactions in five minutes (if no flushes have occurred by then to do so)
    Runnable runnable = new Runnable() {
        public void run() {
            for (Keyspace keyspaceName : Keyspace.all()) {
                for (ColumnFamilyStore cf : keyspaceName.getColumnFamilyStores()) {
                    for (ColumnFamilyStore store : cf.concatWithIndexes())
                        CompactionManager.instance.submitBackground(store);
                }
            }
        }
    };
    StorageService.optionalTasks.schedule(runnable, 5 * 60, TimeUnit.SECONDS);

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal configuration error", e);
    }

    Mx4jTool.maybeLoad();

    // Metrics
    String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
    if (metricsReporterConfigFile != null) {
        CassandraLogger.LOGGER.infof("Trying to load metrics-reporter-config from file: {}",
                metricsReporterConfigFile);
        try {
            String reportFileLocation = WildflyCassandraDaemon.class.getClassLoader()
                    .getResource(metricsReporterConfigFile).getFile();
            ReporterConfig.loadFromFile(reportFileLocation).enableAll();
        } catch (Exception e) {
            CassandraLogger.LOGGER
                    .warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
        }
    }

    if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress()))
        waitForGossipToSettle();

    // Thift
    InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress();
    int rpcPort = DatabaseDescriptor.getRpcPort();
    thriftServer = new ThriftServer(rpcAddr, rpcPort);
    StorageService.instance.registerThriftServer(thriftServer);

    // Native transport
    InetAddress nativeAddr = DatabaseDescriptor.getNativeTransportAddress();
    int nativePort = DatabaseDescriptor.getNativeTransportPort();
    nativeServer = new org.apache.cassandra.transport.Server(nativeAddr, nativePort);
    StorageService.instance.registerNativeServer(nativeServer);
}