Example usage for org.apache.cassandra.db Keyspace open

List of usage examples for org.apache.cassandra.db Keyspace open

Introduction

In this page you can find the example usage for org.apache.cassandra.db Keyspace open.

Prototype

public static Keyspace open(String keyspaceName) 

Source Link

Usage

From source file:com.cloudian.support.RowFinder.java

License:Apache License

private void find() throws IOException {

    // get ColumnFamilyStore instance
    System.out.println("Opening keyspace " + ksName + " ...");
    Keyspace keyspace = Keyspace.open(ksName);
    System.out.println("Opened keyspace " + ksName);

    System.out.println("Getting column family " + cfName + " ...");
    ColumnFamilyStore cfStore = keyspace.getColumnFamilyStore(cfName);
    Collection<SSTableReader> ssTables = cfStore.getSSTables();
    System.out.println("Got column family " + cfName);

    ByteBuffer buff = ByteBufferUtil.bytes(this.rowKey);
    IPartitioner<?> partitioner = cfStore.partitioner;

    System.out.println(this.rowKey + " is included in the following files");
    System.out.println("==============================================================================");
    System.out.println("FINE_NAME, COLUMN_INFO, CONTIGUOUS_TOMBSTONED_COLUMNS(over " + threshold + ")");
    System.out.println("==============================================================================");
    for (SSTableReader reader : ssTables) {

        if (reader.getBloomFilter().isPresent(buff)) {

            // seek to row key
            RandomAccessReader dfile = reader.openDataReader();
            RowIndexEntry entry = reader.getPosition(partitioner.decorateKey(buff), SSTableReader.Operator.EQ);
            if (entry == null)
                continue;
            dfile.seek(entry.position);/*  www .j  a  v a2 s .  co m*/

            // read some
            ByteBufferUtil.readWithShortLength(dfile);
            if (reader.descriptor.version.hasRowSizeAndColumnCount)
                dfile.readLong();
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
            int columnCount = reader.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt()
                    : Integer.MAX_VALUE;

            // get iterator
            Iterator<OnDiskAtom> atomIterator = reader.metadata.getOnDiskIterator(dfile, columnCount,
                    reader.descriptor.version);

            // iterate
            System.out.print(new File(reader.getFilename()).getName());
            boolean isContiguous = false;
            int contiguousTombstonedColumns = 0;
            String contiguousTombstonedColumnsStart = null;
            int live = 0;
            int deleted = 0;
            int rangeTombstone = 0;
            StringBuffer sb = new StringBuffer();
            while (atomIterator.hasNext()) {

                OnDiskAtom atom = atomIterator.next();

                if (atom instanceof Column) {

                    if (atom instanceof DeletedColumn) {

                        deleted++;

                        if (!isContiguous) {

                            isContiguous = true;
                            contiguousTombstonedColumnsStart = ByteBufferUtil.string(atom.name());

                        }

                        contiguousTombstonedColumns++;

                    } else {

                        live++;

                        if (isContiguous) {

                            // print
                            if (contiguousTombstonedColumns >= this.threshold) {

                                sb.append(", [" + contiguousTombstonedColumnsStart + "|"
                                        + contiguousTombstonedColumns + "]");

                            }

                            // reset
                            contiguousTombstonedColumns = 0;
                            contiguousTombstonedColumnsStart = null;
                            isContiguous = false;

                        }

                    }

                } else if (atom instanceof RangeTombstone) {

                    rangeTombstone++;

                    int localDeletionTime = atom.getLocalDeletionTime();
                    ByteBuffer min = ((RangeTombstone) atom).min;
                    ByteBuffer max = ((RangeTombstone) atom).max;
                    String minString = ByteBufferUtil.string(min);
                    String maxString = ByteBufferUtil.string(max);

                    sb.append(", [" + minString + ", " + maxString + "(" + localDeletionTime + ")]");

                }

                // if it ends with finished columns
                if (contiguousTombstonedColumns >= this.threshold) {

                    sb.append(
                            ", [" + contiguousTombstonedColumnsStart + "|" + contiguousTombstonedColumns + "]");

                }

            }

            System.out.print(", (live, deleted, range tombstone)=(" + live + ", " + deleted + ", "
                    + rangeTombstone + ")");
            System.out.println(sb.toString());

        }

    }

}

From source file:com.cloudian.support.SSTableGarbageChecker.java

License:Apache License

/**
 * @param args/*from  w w w .  j  a va 2  s. c  o  m*/
 */
public static void main(String[] args) {

    String usage = "Usage: checksstablegarbage (-t|-g) SSTableFile1 (SSTableFile2 ... SSTableFileN)";
    String showOption = (args.length > 0 && args[0].startsWith("-")) ? args[0] : null;

    if (args.length == 0 || args.length == 1 && showOption != null) {

        System.err.println("At lest 1 SSTableFile needs to be provided");
        System.err.println(usage);
        System.exit(1);

    }

    if (showOption != null) {

        if (!showOption.equals(SHOW_OPTION_TOMBSTONED) && !showOption.equals(SHOW_OPTION_GCABLE)) {

            System.err.println(showOption + " is not a valid option");
            System.err.println(usage);
            System.exit(1);

        }

    }

    System.out.println("Loading schema ...");
    DatabaseDescriptor.loadSchemas();
    System.out.println("Loaded schema");
    if (Schema.instance.getCFMetaData(ksName, cfName) == null) {
        System.err.println(cfName + " in " + ksName + " does not exist");
        System.exit(1);
    }

    File[] ssTableFiles = (showOption != null) ? new File[args.length - 1] : new File[args.length];
    ssTableFilePaths = new String[ssTableFiles.length];
    for (int i = (showOption != null) ? 1 : 0; i < args.length; i++) {
        int j = (showOption != null) ? i - 1 : i;
        ssTableFiles[j] = new File(args[i]);
        ssTableFilePaths[j] = ssTableFiles[j].getAbsolutePath();
    }
    Arrays.sort(ssTableFilePaths);

    descriptors = new Descriptor[ssTableFiles.length];
    Collection<SSTableReader> ssTables = null;
    StringBuffer message = new StringBuffer();
    for (int i = ssTableFiles.length - 1; i >= 0; i--) {

        System.out.println("Opening " + ssTableFiles[i].getName() + "...");

        if (!ssTableFiles[i].exists()) {
            System.err.println(args[i] + " does not exist");
            System.exit(1);
        } else {

            descriptors[i] = Descriptor.fromFilename(ssTableFiles[i].getAbsolutePath());

            if (i == ssTableFiles.length - 1) {

                ksName = descriptors[i].ksname;
                cfName = descriptors[i].cfname;

                // get ColumnFamilyStore instance
                System.out.println("Opening keyspace " + ksName + " ...");
                Keyspace keyspace = Keyspace.open(ksName);
                System.out.println("Opened keyspace " + ksName + " ...");

                System.out.println("Getting column family " + cfName + " ...");
                cfStore = keyspace.getColumnFamilyStore(cfName);
                ssTables = cfStore.getSSTables();
                System.out.println("Got column family " + cfName + " ...");

                // get gc grace seconds
                gcGraceSeconds = cfStore.metadata.getGcGraceSeconds();
                gcBefore = (int) (System.currentTimeMillis() / 1000) - gcGraceSeconds;

                message.append("[KS, CF, gc_grace(gcBefore, now)] = [" + ksName + DELIMITER + cfName + DELIMITER
                        + gcGraceSeconds + "(" + gcBefore + ", " + System.currentTimeMillis() + ")]");

            } else {

                boolean theSameCf = cfName.equals(descriptors[i].cfname);
                if (!theSameCf) {

                    System.err.println("All SSTableFiles have to belong to the same column family");
                    System.err.println(args[i] + " does not a member of " + cfName);
                    System.exit(1);

                }

            }

            SSTableReader found = null;
            for (SSTableReader reader : ssTables) {

                if (reader.getFilename().equals(ssTableFiles[i].getAbsolutePath())) {

                    found = reader;
                    break;

                }

            }

            if (found != null) {

                ssTableReaders.add(found);

            } else {

                System.err.println("Can not find SSTableReader for " + ssTableFiles[i].getAbsolutePath());
                if (!ssTableFiles[i].exists()) {
                    System.err.println(ssTableFiles[i].getName() + " does not exist anymore.");
                }
                System.err.println("Loaded SSTable files are the followings:");
                for (SSTableReader reader : ssTables) {
                    System.err.println(reader.getFilename());
                }
                System.exit(1);

            }

        }

        System.out.println("Opened " + ssTableFiles[i].getName());

    }

    System.out.println(message.toString());

    try {

        SSTableGarbageChecker.checkCompacted(showOption);

    } catch (IOException e) {
        System.err.println("Check failed due to " + e.getMessage());
        e.printStackTrace();
        System.exit(1);
    }

    // Successful
    System.exit(0);

}

From source file:com.stratio.cassandra.lucene.IndexPagingState.java

License:Apache License

private Expression expression(ReadCommand command) throws ReflectiveOperationException {

    // Try with custom expressions
    for (Expression expression : command.rowFilter().getExpressions()) {
        if (expression.isCustom()) {
            return expression;
        }/*from w w  w  .  ja va 2 s. co m*/
    }

    // Try with dummy column
    ColumnFamilyStore cfs = Keyspace.open(command.metadata().ksName)
            .getColumnFamilyStore(command.metadata().cfName);
    for (Expression expression : command.rowFilter().getExpressions()) {
        for (org.apache.cassandra.index.Index index : cfs.indexManager.listIndexes()) {
            if (index instanceof Index
                    && index.supportsExpression(expression.column(), expression.operator())) {
                return expression;
            }
        }
    }

    return null;
}

From source file:com.stratio.cassandra.lucene.IndexQueryHandler.java

License:Apache License

private ResultMessage process(SelectStatement select, QueryState state, QueryOptions options,
        RowFilter.CustomExpression expression) throws ReflectiveOperationException {

    // Validate expression
    ColumnFamilyStore cfs = Keyspace.open(select.keyspace()).getColumnFamilyStore(select.columnFamily());
    Index index = (Index) cfs.indexManager.getIndex(expression.getTargetIndex());
    Search search = index.validate(expression);

    // Check paging
    int limit = select.getLimit(options);
    int page = getPageSize(select, options);
    if (search.isTopK()) {
        if (limit == Integer.MAX_VALUE) { // Avoid unlimited
            throw new InvalidRequestException(
                    "Top-k searches don't support paging, so a cautious LIMIT clause should be provided "
                            + "to prevent excessive memory consumption.");
        } else if (page < limit) {
            String json = UTF8Type.instance.compose(expression.getValue());
            logger.warn("Disabling paging of {} rows per page for top-k search requesting {} rows: {}", page,
                    limit, json);/*  ww w.j ava2  s.com*/
            return executeWithoutPaging(select, state, options);
        }
    }

    // Process
    return execute(select, state, options);
}

From source file:edu.dprg.morphous.MoveSSTableTest.java

License:Apache License

@Test
public void testMoveSSTablesBetweenDifferentColumnFamilies() throws Exception {
    final String ks1 = "testkeyspace_move_sstables";
    final String cfName1 = "table1_copy";
    final String cfName2 = "table1";

    List<KSMetaData> schema = new ArrayList<KSMetaData>();

    // A whole bucket of shorthand
    Class<? extends AbstractReplicationStrategy> simple = SimpleStrategy.class;
    Map<String, String> opts_rf1 = KSMetaData.optsWithRF(1);

    schema.add(KSMetaData.testMetadata(ks1, simple, opts_rf1,
            new CFMetaData(ks1, cfName1, ColumnFamilyType.Standard, BytesType.instance, null),
            new CFMetaData(ks1, cfName2, ColumnFamilyType.Standard, BytesType.instance, null)));

    for (KSMetaData ksm : schema) {
        MigrationManager.announceNewKeyspace(ksm);
    }/* ww  w. j av a  2s .  c  o m*/

    Keyspace keyspace = Keyspace.open(ks1);
    ColumnFamilyStore cfs1 = keyspace.getColumnFamilyStore(cfName1);
    cfs1.truncateBlocking();
    ColumnFamilyStore cfs2 = keyspace.getColumnFamilyStore(cfName2);

    cfs1.truncateBlocking();
    cfs2.truncateBlocking();

    for (int i = 1; i <= 500; i++) {
        ByteBuffer key = ByteBufferUtil.bytes("key-cf1-" + i);
        RowMutation rm = new RowMutation(ks1, key);
        rm.add(cfName1, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("cf1-value" + i), 0);
        rm.apply();
    }
    cfs1.forceBlockingFlush();

    for (int i = 1; i <= 100; i++) {
        ByteBuffer key = ByteBufferUtil.bytes("key-cf2-" + i);
        RowMutation rm = new RowMutation(ks1, key);
        rm.add(cfName2, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("cf2-value" + i), 0);
        rm.apply();
    }
    cfs2.forceBlockingFlush();

    SlicePredicate sp = new SlicePredicate();
    sp.setColumn_names(Arrays.asList(ByteBufferUtil.bytes("Column1")));

    List<Row> rows1 = cfs1.getRangeSlice(Util.range("", ""), null,
            ThriftValidation.asIFilter(sp, cfs1.metadata, null), 1000, System.currentTimeMillis(), true, false);
    assertEquals(500, rows1.size());

    List<Row> rows2 = cfs2.getRangeSlice(Util.range("", ""), null,
            ThriftValidation.asIFilter(sp, cfs2.metadata, null), 1000, System.currentTimeMillis(), true, false);
    assertEquals(100, rows2.size());

    ColumnFamily cf = cfs1.getColumnFamily(
            QueryFilter.getIdentityFilter(Util.dk("key-cf1-1"), cfName1, System.currentTimeMillis()));
    assertNotNull(cf);

    new AtomicSwitchMorphousTaskHandler().swapSSTablesBetweenCfs(cfs1, cfs2);

    cfs1.reload();
    rows1 = cfs1.getRangeSlice(Util.range("", ""), null, ThriftValidation.asIFilter(sp, cfs1.metadata, null),
            1000, System.currentTimeMillis(), true, false);
    assertEquals(100, rows1.size());

    cfs2.reload();
    rows2 = cfs2.getRangeSlice(Util.range("", ""), null, ThriftValidation.asIFilter(sp, cfs2.metadata, null),
            1000, System.currentTimeMillis(), true, false);
    assertEquals(500, rows2.size());
}

From source file:edu.dprg.morphous.MoveSSTableTest.java

License:Apache License

@Test
public void testMigrateColumnFamilyDefinitionToUseNewPartitonKey() throws Exception {
    String ksName = "testkeyspace_migrate_cf";
    String[] cfName = { "cf0", "cf1" };

    edu.uiuc.dprg.morphous.Util.executeCql3Statement("CREATE KEYSPACE " + ksName
            + " WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};");
    edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "CREATE TABLE " + ksName + "." + cfName[0] + " ( col0 varchar PRIMARY KEY, col1 varchar);");
    edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "CREATE TABLE " + ksName + "." + cfName[1] + " ( col0 varchar, col1 varchar PRIMARY KEY);");

    Keyspace ks = Keyspace.open(ksName);
    ColumnFamilyStore cfs0 = ks.getColumnFamilyStore(cfName[0]);
    ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(cfName[1]);

    for (int i = 0; i < 2; i++) {
        for (int j = 0; j < 100; j++) {
            edu.uiuc.dprg.morphous.Util.executeCql3Statement(String.format("INSERT INTO " + ksName + "."
                    + cfName[i] + " (col0, col1) VALUES ('cf%d-col0-%03d', 'cf%d-col1-%03d');", i, j, i, j));
        }/*from   ww w.  j  a  v  a2s.c  o  m*/
    }

    CqlResult selectCf0 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");
    assertEquals(100, selectCf0.rows.size());

    CqlResult selectCf1 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[1] + ";");
    assertEquals(100, selectCf1.rows.size());

    // Flush Memtables out to SSTables
    cfs0.forceBlockingFlush();
    cfs1.forceBlockingFlush();

    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[0]
                + String.format(" WHERE col0 = 'cf0-col0-%03d';", i);
        logger.info("Executing query {}", query);
        selectCf0 = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, selectCf0.rows.size());
    }

    AtomicSwitchMorphousTaskHandler handler = new AtomicSwitchMorphousTaskHandler();
    handler.swapSSTablesBetweenCfs(cfs0, cfs1);
    Morphous.instance().migrateColumnFamilyDefinitionToUseNewPartitonKey(ksName, cfName[1], "col0");
    Morphous.instance().migrateColumnFamilyDefinitionToUseNewPartitonKey(ksName, cfName[0], "col1");

    cfs0.reload();
    cfs1.reload();

    selectCf0 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");
    assertEquals(100, selectCf0.rows.size());

    selectCf1 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[1] + ";");
    assertEquals(100, selectCf1.rows.size());

    List<CqlRow> rows = edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "SELECT * FROM system.schema_columns where keyspace_name = '" + ksName + "';").rows;

    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[0]
                + String.format(" WHERE col1 = 'cf1-col1-%03d';", i);
        logger.info("Executing query {}", query);
        selectCf1 = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, selectCf1.rows.size());
    }
    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[1]
                + String.format(" WHERE col0 = 'cf0-col0-%03d';", i);
        logger.info("Executing query {}", query);
        selectCf1 = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, selectCf1.rows.size());
    }
}

From source file:edu.dprg.morphous.MoveSSTableTest.java

License:Apache License

@Test
public void testMigrateColumnFamilyDefinitionToUseNewPartitonKey2() throws Exception {
    String ksName = "testkeyspace_migrate_cf_2";
    String[] cfName = { "cf0", "cf1" };

    edu.uiuc.dprg.morphous.Util.executeCql3Statement("CREATE KEYSPACE " + ksName
            + " WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};");
    edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "CREATE TABLE " + ksName + "." + cfName[0] + " ( col0 int PRIMARY KEY, col1 int, col2 varchar);");
    edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "CREATE TABLE " + ksName + "." + cfName[1] + " ( col0 int, col1 int PRIMARY KEY, col2 varchar);");

    Keyspace ks = Keyspace.open(ksName);
    ColumnFamilyStore cfs0 = ks.getColumnFamilyStore(cfName[0]);
    ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(cfName[1]);

    for (int i = 0; i < 2; i++) {
        for (int j = 0; j < 100; j++) {
            edu.uiuc.dprg.morphous.Util.executeCql3Statement(String.format(
                    "INSERT INTO " + ksName + "." + cfName[i]
                            + " (col0, col1, col2) VALUES (%d, %d, 'cf%d-col2-%03d');",
                    j + i * 1000, 100 + j + i * 1000, i, j));
        }//from w  ww  .j  av a2  s .  c o  m
    }

    CqlResult selectCf0 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");
    assertEquals(100, selectCf0.rows.size());

    CqlResult selectCf1 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[1] + ";");
    assertEquals(100, selectCf1.rows.size());

    // Flush Memtables out to SSTables
    cfs0.forceBlockingFlush();
    cfs1.forceBlockingFlush();

    CqlResult originalResult;
    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[0] + String.format(" WHERE col0 = %d;", i);
        logger.info("Executing query {}", query);
        originalResult = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, originalResult.rows.size());
    }

    originalResult = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");

    AtomicSwitchMorphousTaskHandler handler = new AtomicSwitchMorphousTaskHandler();
    handler.swapSSTablesBetweenCfs(cfs0, cfs1);
    Morphous.instance().migrateColumnFamilyDefinitionToUseNewPartitonKey(ksName, cfName[1], "col0");
    Morphous.instance().migrateColumnFamilyDefinitionToUseNewPartitonKey(ksName, cfName[0], "col1");

    cfs0.reload();
    cfs1.reload();

    selectCf0 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");
    assertEquals(100, selectCf0.rows.size());

    selectCf1 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[1] + ";");
    assertEquals(100, selectCf1.rows.size());

    List<CqlRow> rows = edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "SELECT * FROM system.schema_columns where keyspace_name = '" + ksName + "';").rows;

    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[0]
                + String.format(" WHERE col1 = %d;", i + 100 + 1000);
        logger.info("Executing query {}", query);
        selectCf1 = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, selectCf1.rows.size());
    }
    for (int i = 0; i < 100; i++) {
        String query = "SELECT * FROM " + ksName + "." + cfName[1] + String.format(" WHERE col0 = %d;", i);
        logger.info("Executing query {}", query);
        selectCf1 = edu.uiuc.dprg.morphous.Util.executeCql3Statement(query);
        assertEquals(1, selectCf1.rows.size());
    }
}

From source file:edu.dprg.morphous.MoveSSTableTest.java

License:Apache License

@Test
public void testCreateTempTable() {
    String ksName = "testkeyspace_create_temp_table";
    String[] cfName = { "cf0", "cf1" };

    edu.uiuc.dprg.morphous.Util.executeCql3Statement("CREATE KEYSPACE " + ksName
            + " WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};");
    edu.uiuc.dprg.morphous.Util.executeCql3Statement(
            "CREATE TABLE " + ksName + "." + cfName[0] + " ( col0 varchar PRIMARY KEY, col1 varchar);");

    Keyspace ks = Keyspace.open(ksName);
    ColumnFamilyStore cfs0 = ks.getColumnFamilyStore(cfName[0]);

    CFMetaData newCfm = Morphous.instance().createNewCFMetaDataFromOldCFMetaDataWithNewCFNameAndNewPartitonKey(
            cfs0.metadata, cfName[1], "col1");
    Morphous.instance().createNewColumnFamilyWithCFMetaData(newCfm);

    ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(cfName[1]);

    for (int i = 0; i < 2; i++) {
        for (int j = 0; j < 100; j++) {
            edu.uiuc.dprg.morphous.Util.executeCql3Statement(String.format("INSERT INTO " + ksName + "."
                    + cfName[i] + " (col0, col1) VALUES ('cf%d-col0-%03d', 'cf%d-col1-%03d');", i, j, i, j));
        }//from   w  w  w . j a  v  a  2s .c  om
    }

    CqlResult selectCf0 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[0] + ";");
    assertEquals(100, selectCf0.rows.size());

    CqlResult selectCf1 = edu.uiuc.dprg.morphous.Util
            .executeCql3Statement("SELECT * FROM " + ksName + "." + cfName[1] + ";");
    assertEquals(100, selectCf1.rows.size());

    for (int j = 0; j < 100; j++) {
        CqlResult selectCf = edu.uiuc.dprg.morphous.Util.executeCql3Statement("SELECT * FROM " + ksName + "."
                + cfName[1] + " WHERE col1 = '" + String.format("cf1-col1-%03d", j) + "';");
        assertEquals(1, selectCf.rows.size());
    }
}

From source file:edu.dprg.morphous.MoveSSTableTest.java

License:Apache License

@Deprecated
@SuppressWarnings("rawtypes")
public static void doInsertOnTemporaryCFForRangesChoosingEndpointManually(String ksName, String originalCfName,
        String tempCfName, Collection<Range<Token>> ranges) {
    for (Range<Token> range : ranges) {
        ColumnFamilyStore originalCfs = Keyspace.open(ksName).getColumnFamilyStore(originalCfName);
        ColumnFamilyStore tempCfs = Keyspace.open(ksName).getColumnFamilyStore(originalCfName);
        ColumnFamilyStore.AbstractScanIterator iterator = edu.uiuc.dprg.morphous.Util
                .invokePrivateMethodWithReflection(originalCfs, "getSequentialIterator",
                        DataRange.forKeyRange(range), System.currentTimeMillis());

        while (iterator.hasNext()) {
            Row row = iterator.next();// w  w  w.j  a va  2 s. co m
            ColumnFamily data = row.cf;
            ColumnFamily tempData = TreeMapBackedSortedColumns.factory.create(ksName, tempCfName);
            tempData.addAll(data, null);

            ByteBuffer newKey = tempData
                    .getColumn(tempData.metadata().partitionKeyColumns().get(0).name.asReadOnlyBuffer())
                    .value();
            InetAddress destinationNode = edu.uiuc.dprg.morphous.Util.getNthReplicaNodeForKey(ksName, newKey,
                    1);

            RowMutation rm = new RowMutation(newKey, tempData);
            MessageOut<RowMutation> message = rm.createMessage();
            MessagingService.instance().sendRR(message, destinationNode); //TODO Maybe use more robust way to send message
        }
    }
}

From source file:info.archinnov.achilles.embedded.AchillesCassandraDaemon.java

License:Apache License

/**
 * Override the default setup process to speed up bootstrap
 *
 * - disable JMX//from   w w w  .  j  av a2s .  c om
 * - disable legacy schema migration
 * - no pre-3.0 hints migration
 * - no pre-3.0 batch entries migration
 * - disable auto compaction on all keyspaces (your test data should fit in memory!!!)
 * - disable metrics
 * - disable GCInspector
 * - disable mlock
 * - disable Thrift server
 * - disable startup checks (Jemalloc, validLaunchDate, JMXPorts, JvmOptions, JnaInitialization, initSigarLibrary, dataDirs, SSTablesFormat, SystemKeyspaceState, Datacenter, Rack)
 * - disable materialized view rebuild (you should clean data folder between each test anyway)
 * - disable the SizeEstimatesRecorder (estimate SSTable size, who cares for unit testing ?)
 */
@Override
protected void setup() {
    // Delete any failed snapshot deletions on Windows - see CASSANDRA-9658
    if (FBUtilities.isWindows())
        WindowsFailedSnapshotTracker.deleteOldSnapshots();

    ThreadAwareSecurityManager.install();

    Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
        StorageMetrics.exceptions.inc();
        logger.error("Exception in thread {}", t, e);
        Tracing.trace("Exception in thread {}", t, e);
        for (Throwable e2 = e; e2 != null; e2 = e2.getCause()) {
            JVMStabilityInspector.inspectThrowable(e2);

            if (e2 instanceof FSError) {
                if (e2 != e) // make sure FSError gets logged exactly once.
                    logger.error("Exception in thread {}", t, e2);
                FileUtils.handleFSError((FSError) e2);
            }

            if (e2 instanceof CorruptSSTableException) {
                if (e2 != e)
                    logger.error("Exception in thread " + t, e2);
                FileUtils.handleCorruptSSTable((CorruptSSTableException) e2);
            }
        }
    });

    // Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
    StorageService.instance.populateTokenMetadata();

    // load schema from disk
    Schema.instance.loadFromDisk();

    try {
        // clean up debris in the rest of the keyspaces
        for (String keyspaceName : Schema.instance.getKeyspaces()) {
            // Skip system as we've already cleaned it
            if (keyspaceName.equals(SystemKeyspace.NAME))
                continue;

            for (CFMetaData cfm : Schema.instance.getTablesAndViews(keyspaceName))
                ColumnFamilyStore.scrubDataDirectories(cfm);
        }
    } catch (StartupException startupEx) {
        logger.error("***** Startup exception : " + startupEx.getLocalizedMessage());
        throw new RuntimeException(startupEx);
    }

    Keyspace.setInitialized();

    // initialize keyspaces
    for (String keyspaceName : Schema.instance.getKeyspaces()) {
        if (logger.isDebugEnabled())
            logger.debug("opening keyspace {}", keyspaceName);
        // disable auto compaction until commit log replay ends
        for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
            for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
                store.disableAutoCompaction();
            }
        }
    }

    try {
        loadRowAndKeyCacheAsync().get();
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        logger.warn("Error loading key or row cache", t);
    }

    // replay the log if necessary
    try {
        CommitLog.instance.recover();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
    StorageService.instance.populateTokenMetadata();

    SystemKeyspace.finishStartup();

    // start server internals
    StorageService.instance.registerDaemon(this);
    try {
        StorageService.instance.initServer();
    } catch (ConfigurationException e) {
        System.err.println(e.getMessage()
                + "\nFatal configuration error; unable to start server.  See log for stacktrace.");
        exitOrFail(1, "Fatal configuration error", e);
    }

    // Native transport
    nativeTransportService = new NativeTransportService();

    completeSetup();
}