Example usage for org.apache.cassandra.config DatabaseDescriptor getAllDataFileLocations

List of usage examples for org.apache.cassandra.config DatabaseDescriptor getAllDataFileLocations

Introduction

In this page you can find the example usage for org.apache.cassandra.config DatabaseDescriptor getAllDataFileLocations.

Prototype

public static String[] getAllDataFileLocations() 

Source Link

Usage

From source file:org.kiji.schema.cassandra.TestingCassandraFactory.java

License:Apache License

/**
 * Ensure that the EmbeddedCassandraService for unit tests is running.  If it is not, then start
 * it./*from  w  ww  .  j a  v a 2  s.c  om*/
 */
private void startEmbeddedCassandraServiceIfNotRunningAndOpenSession() throws Exception {
    LOG.debug("Ready to start a C* service if necessary...");
    if (null != mCassandraSession) {
        LOG.debug("C* is already running, no need to start the service.");
        //Preconditions.checkNotNull(mCassandraSession);
        return;
    }

    LOG.debug("Starting EmbeddedCassandra!");
    try {
        LOG.info("Starting EmbeddedCassandraService...");
        // Use a custom YAML file that specifies different ports from normal for RPC and thrift.
        InputStream yamlStream = getClass().getResourceAsStream("/cassandra.yaml");
        LOG.debug("Checking that we can load cassandra.yaml as a stream...");
        Preconditions.checkNotNull(yamlStream, "Unable to load resource /cassandra.yaml as a stream");
        LOG.debug("Looks good to load it as a stream!");

        // Update cassandra.yaml to use available ports.
        String cassandraYaml = IOUtils.toString(yamlStream);

        final int storagePort = findOpenPort(); // Normally 7000.
        final int sslStoragePort = findOpenPort(); // Normally 7001.
        final int nativeTransportPort = findOpenPort(); // Normally 9042.
        final int rpcPort = findOpenPort(); // Normally 9160.

        cassandraYaml = updateCassandraYamlWithPort(cassandraYaml, "__STORAGE_PORT__", storagePort);

        cassandraYaml = updateCassandraYamlWithPort(cassandraYaml, "__SSL_STORAGE_PORT__", sslStoragePort);

        cassandraYaml = updateCassandraYamlWithPort(cassandraYaml, "__NATIVE_TRANSPORT_PORT__",
                nativeTransportPort);

        cassandraYaml = updateCassandraYamlWithPort(cassandraYaml, "__RPC_PORT__", rpcPort);

        // Write out the YAML contents to a temp file.
        File yamlFile = File.createTempFile("cassandra", ".yaml");
        LOG.info("Writing cassandra.yaml to {}", yamlFile);
        final BufferedWriter bw = new BufferedWriter(new FileWriter(yamlFile));
        try {
            bw.write(cassandraYaml);
        } finally {
            bw.close();
        }

        Preconditions.checkArgument(yamlFile.exists());
        System.setProperty("cassandra.config", "file:" + yamlFile.getAbsolutePath());
        System.setProperty("cassandra-foreground", "true");

        // Make sure that all of the directories for the commit log, data, and caches are empty.
        // Thank goodness there are methods to get this information (versus parsing the YAML
        // directly).
        ArrayList<String> directoriesToDelete = new ArrayList<String>(
                Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()));
        directoriesToDelete.add(DatabaseDescriptor.getCommitLogLocation());
        directoriesToDelete.add(DatabaseDescriptor.getSavedCachesLocation());
        for (String dirName : directoriesToDelete) {
            FileUtils.deleteDirectory(new File(dirName));
        }
        EmbeddedCassandraService embeddedCassandraService = new EmbeddedCassandraService();
        embeddedCassandraService.start();

    } catch (IOException ioe) {
        throw new KijiIOException("Cannot start embedded C* service!");
    }

    try {
        // Use different port from normal here to avoid conflicts with any locally-running C* cluster.
        // Port settings are controlled in "cassandra.yaml" in test resources.
        // Also change the timeouts and retry policies.  Since we have only a single thread here for
        // this test process, it can slow down dramatically if it has to do a compaction (see
        // SCHEMA-959 and SCHEMA-969 for examples of the flakiness this case cause in unit tests).

        // No builder for `SocketOptions`:
        final SocketOptions socketOptions = new SocketOptions();
        // Setting this to 0 disables read timeouts.
        socketOptions.setReadTimeoutMillis(0);
        // This defaults to 5 s.  Increase to a minute.
        socketOptions.setConnectTimeoutMillis(60 * 1000);

        Cluster cluster = Cluster.builder().addContactPoints(DatabaseDescriptor.getListenAddress())
                .withPort(DatabaseDescriptor.getNativeTransportPort()).withSocketOptions(socketOptions)
                // Let's at least log all of the retries so we can see what is happening.
                .withRetryPolicy(new LoggingRetryPolicy(Policies.defaultRetryPolicy()))
                // The default reconnection policy (exponential) looks fine.
                .build();
        mCassandraSession = cluster.connect();
    } catch (Exception exc) {
        throw new KijiIOException("Started embedded C* service, but cannot connect to cluster. " + exc);
    }
}

From source file:org.meteogroup.cassandra.embedded.EmbeddedCassandraLoader.java

License:Open Source License

public static void tearDownCassandra() throws IOException {
    cassandraHost = null;/*from   ww w.  ja  va2s. c om*/
    cassandraNativePort = -1;
    if (cassandraDaemon != null) {
        cassandraDaemon.deactivate();
    }

    EmbeddedConfigurationLoader.reset();
    for (String dataFileLocation : DatabaseDescriptor.getAllDataFileLocations()) {
        Files.walkFileTree(Paths.get(dataFileLocation), new Deleter());
    }
}

From source file:org.normandra.CassandraTestUtil.java

License:Apache License

private static void clear() throws IOException {
    final String[] directoryNames = { DatabaseDescriptor.getCommitLogLocation(), };
    for (final String dirName : directoryNames) {
        final File dir = new File(dirName).getCanonicalFile();
        logger.info("Creating commit log at [" + dir + "].");
        if (dir.exists()) {
            FileUtils.deleteDirectory(dir);
        }//from w w w  . j a v  a  2  s.co  m
    }

    for (final String dirName : DatabaseDescriptor.getAllDataFileLocations()) {
        final File dir = new File(dirName).getCanonicalFile();
        logger.info("Creating data location at [" + dir + "].");
        if (dir.exists()) {
            FileUtils.deleteDirectory(dir);
        }
    }
}

From source file:org.springframework.cassandra.test.integration.EmbeddedCassandraServerHelper.java

License:Apache License

private static void cleanup() throws IOException {

    // clean up commitlog and data locations
    rmdirs(DatabaseDescriptor.getCommitLogLocation());
    rmdirs(DatabaseDescriptor.getAllDataFileLocations());
}

From source file:org.springframework.data.cassandra.test.util.EmbeddedCassandraServerHelper.java

License:Apache License

private static void cleanup() throws IOException {

    // clean up commit log and data locations
    rmdirs(DatabaseDescriptor.getCommitLogLocation());
    rmdirs(DatabaseDescriptor.getAllDataFileLocations());
}

From source file:org.usergrid.standalone.cassandra.EmbeddedServerHelper.java

License:Apache License

public static void cleanup() throws IOException {
    // clean up commitlog
    String[] directoryNames = { DatabaseDescriptor.getCommitLogLocation(), };
    for (String dirName : directoryNames) {
        File dir = new File(dirName);
        if (!dir.exists()) {
            throw new RuntimeException("No such directory: " + dir.getAbsolutePath());
        }// www. j a  v a2 s .c  om
        FileUtils.deleteRecursive(dir);
    }

    // clean up data directory which are stored as data directory/table/data
    // files
    for (String dirName : DatabaseDescriptor.getAllDataFileLocations()) {
        File dir = new File(dirName);
        if (!dir.exists()) {
            throw new RuntimeException("No such directory: " + dir.getAbsolutePath());
        }
        FileUtils.deleteRecursive(dir);
    }
}

From source file:org.wildfly.extension.cassandra.WildflyCassandraDaemon.java

License:Apache License

/**
 * This is a hook for concrete daemons to initialize themselves suitably.
 *
 * Subclasses should override this to finish the job (listening on ports, etc.)
 *
 * @throws IOException//from  w w w. j a  v a 2 s.com
 */
protected void setup() {
    // log warnings for different kinds of sub-optimal JVMs.  tldr use 64-bit Oracle >= 1.6u32
    if (!DatabaseDescriptor.hasLargeAddressSpace())
        CassandraLogger.LOGGER.infof(
                "32bit JVM detected.  It is recommended to run Cassandra on a 64bit JVM for better performance.");
    String javaVersion = System.getProperty("java.version");
    String javaVmName = System.getProperty("java.vm.name");
    CassandraLogger.LOGGER.infof("JVM vendor/version: {}/{}", javaVmName, javaVersion);
    if (javaVmName.contains("OpenJDK")) {
        // There is essentially no QA done on OpenJDK builds, and
        // clusters running OpenJDK have seen many heap and load issues.
        CassandraLogger.LOGGER
                .warn("OpenJDK is not recommended. Please upgrade to the newest Oracle Java release");
    } else if (!javaVmName.contains("HotSpot")) {
        CassandraLogger.LOGGER.warn(
                "Non-Oracle JVM detected.  Some features, such as immediate unmap of compacted SSTables, may not work as intended");
    }
    /*   else
       {
    String[] java_version = javaVersion.split("_");
    String java_major = java_version[0];
    int java_minor;
    try
    {
        java_minor = (java_version.length > 1) ? Integer.parseInt(java_version[1]) : 0;
    }
    catch (NumberFormatException e)
    {
        // have only seen this with java7 so far but no doubt there are other ways to break this
        CassandraLogger.LOGGER.infof("Unable to parse java version {}", Arrays.toString(java_version));
        java_minor = 32;
    }
       }
    */
    CassandraLogger.LOGGER.infof("Heap size: {}/{}", Runtime.getRuntime().totalMemory(),
            Runtime.getRuntime().maxMemory());
    for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans())
        CassandraLogger.LOGGER.infof("{} {}: {}", pool.getName(), pool.getType(), pool.getPeakUsage());
    CassandraLogger.LOGGER.infof("Classpath: {}", System.getProperty("java.class.path"));
    CLibrary.tryMlockall();

    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
        public void uncaughtException(Thread t, Throwable e) {
            StorageMetrics.exceptions.inc();
            CassandraLogger.LOGGER.error("Exception in thread " + t, e);
            Tracing.trace("Exception in thread " + t, e);
            for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
                // some code, like FileChannel.map, will wrap an OutOfMemoryError in another exception
                if (e2 instanceof OutOfMemoryError)
                    exitThread.start();

                if (e2 instanceof FSError) {
                    if (e2 != e) // make sure FSError gets logged exactly once.
                        CassandraLogger.LOGGER.error("Exception in thread " + t, e2);
                    FileUtils.handleFSError((FSError) e2);
                }
            }
        }
    });

    // check all directories(data, commitlog, saved cache) for existence and permission
    Iterable<String> dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays
            .asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation()));
    for (String dataDir : dirs) {
        CassandraLogger.LOGGER.debugf("Checking directory {}", dataDir);
        File dir = new File(dataDir);
        if (dir.exists())
            assert dir.isDirectory() && dir.canRead() && dir.canWrite() && dir.canExecute() : String
                    .format("Directory %s is not accessible.", dataDir);
    }

    if (CacheService.instance == null) // should never happen
        throw new RuntimeException("Failed to initialize Cache Service.");

    // check the system keyspace to keep user from shooting self in foot by changing partitioner, cluster name, etc.
    // we do a one-off scrub of the system keyspace first; we can't load the list of the rest of the keyspaces,
    // until system keyspace is opened.
    for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(Keyspace.SYSTEM_KS).values())
        ColumnFamilyStore.scrubDataDirectories(Keyspace.SYSTEM_KS, cfm.cfName);
    try {
        SystemKeyspace.checkHealth();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal exception during initialization", e);
    }

    // load keyspace descriptions.
    DatabaseDescriptor.loadSchemas();

    try {
        LeveledManifest.maybeMigrateManifests();
    } catch (IOException e) {
        throw new RuntimeException(
                "Could not migrate old leveled manifest. Move away the .json file in the data directory", e);
    }

    // clean up compaction leftovers
    Map<Pair<String, String>, Map<Integer, UUID>> unfinishedCompactions = SystemKeyspace
            .getUnfinishedCompactions();
    for (Pair<String, String> kscf : unfinishedCompactions.keySet())
        ColumnFamilyStore.removeUnfinishedCompactionLeftovers(kscf.left, kscf.right,
                unfinishedCompactions.get(kscf));
    SystemKeyspace.discardCompactionsInProgress();

    // clean up debris in the rest of the keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        // Skip system as we've already cleaned it
        if (keyspaceName.equals(Keyspace.SYSTEM_KS))
            continue;

        for (CFMetaData cfm : Schema.instance.getKeyspaceMetaData(keyspaceName).values())
            ColumnFamilyStore.scrubDataDirectories(keyspaceName, cfm.cfName);
    }

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (CassandraLogger.LOGGER.isDebugEnabled())
            CassandraLogger.LOGGER.debug("opening keyspace " + keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    if (CacheService.instance.keyCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) key cache.",
                CacheService.instance.keyCache.size());

    if (CacheService.instance.rowCache.size() > 0)
        CassandraLogger.LOGGER.infof("completed pre-loading ({} keys) row cache.",
                CacheService.instance.rowCache.size());

    try {
        GCInspector.instance.start();
    } catch (Throwable t) {
        CassandraLogger.LOGGER.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
    }

    // MeteredFlusher can block if flush queue fills up, so don't put on scheduledTasks
    // Start it before commit log, so memtables can flush during commit log replay
    StorageService.optionalTasks.scheduleWithFixedDelay(new MeteredFlusher(), 1000, 1000,
            TimeUnit.MILLISECONDS);

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // enable auto compaction
    for (Keyspace keyspace : Keyspace.all()) {
        for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
            for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.enableAutoCompaction();
            }
        }
    }
    // start compactions in five minutes (if no flushes have occurred by then to do so)
    Runnable runnable = new Runnable() {
        public void run() {
            for (Keyspace keyspaceName : Keyspace.all()) {
                for (ColumnFamilyStore cf : keyspaceName.getColumnFamilyStores()) {
                    for (ColumnFamilyStore store : cf.concatWithIndexes())
                        CompactionManager.instance.submitBackground(store);
                }
            }
        }
    };
    StorageService.optionalTasks.schedule(runnable, 5 * 60, TimeUnit.SECONDS);

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        throw new RuntimeException("Fatal configuration error", e);
    }

    Mx4jTool.maybeLoad();

    // Metrics
    String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
    if (metricsReporterConfigFile != null) {
        CassandraLogger.LOGGER.infof("Trying to load metrics-reporter-config from file: {}",
                metricsReporterConfigFile);
        try {
            String reportFileLocation = WildflyCassandraDaemon.class.getClassLoader()
                    .getResource(metricsReporterConfigFile).getFile();
            ReporterConfig.loadFromFile(reportFileLocation).enableAll();
        } catch (Exception e) {
            CassandraLogger.LOGGER
                    .warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
        }
    }

    if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress()))
        waitForGossipToSettle();

    // Thift
    InetAddress rpcAddr = DatabaseDescriptor.getRpcAddress();
    int rpcPort = DatabaseDescriptor.getRpcPort();
    thriftServer = new ThriftServer(rpcAddr, rpcPort);
    StorageService.instance.registerThriftServer(thriftServer);

    // Native transport
    InetAddress nativeAddr = DatabaseDescriptor.getNativeTransportAddress();
    int nativePort = DatabaseDescriptor.getNativeTransportPort();
    nativeServer = new org.apache.cassandra.transport.Server(nativeAddr, nativePort);
    StorageService.instance.registerNativeServer(nativeServer);
}