Example usage for org.apache.cassandra.db ConsistencyLevel LOCAL_QUORUM

List of usage examples for org.apache.cassandra.db ConsistencyLevel LOCAL_QUORUM

Introduction

In this page you can find the example usage for org.apache.cassandra.db ConsistencyLevel LOCAL_QUORUM.

Prototype

ConsistencyLevel LOCAL_QUORUM

To view the source code for org.apache.cassandra.db ConsistencyLevel LOCAL_QUORUM.

Click Source Link

Usage

From source file:com.protectwise.cassandra.retrospect.deletion.RuleBasedDeletionConvictor.java

License:Apache License

/**
 * Keyed by rulename, then by column name, then contains a list of 2-element arrays of ranges for that column.
 * This is not typed, everything is byte buffers, type is collapsed at testing time.
 *
 * @param statement// w  w  w.java2  s.com
 * @return
 * @throws ConfigurationException
 */
public static Map<ByteBuffer, Map<ByteBuffer, List<ByteBuffer[]>>> parseRules(String statement)
        throws ConfigurationException {
    UntypedResultSet rawRuleData = null;
    try {
        if (!QueryHelper.hasStartedCQL()) {
            // Yuck, exceptions for control flow.  This will be caught upstream during compaction as a signal that
            // we should move to spooked mode.  Outside of compaction the exception will bubble up and be presented
            // to the user (though it seems extremely unlikely)
            throw new ConfigurationException(
                    "Node is not fully joined, so we cannot read deletion rules.  Falling back to standard compaction");
        }
        rawRuleData = QueryProcessor.process(statement, ConsistencyLevel.LOCAL_QUORUM);
    } catch (RequestExecutionException e) {
        ConfigurationException ce = new ConfigurationException(
                "Unable to query for rule data.  The failed statement was " + statement, e);
        throw ce;
    }

    Map<String, ColumnSpecification> cols = new HashMap<>();
    for (ColumnSpecification cs : rawRuleData.metadata()) {
        cols.put(cs.name.toString(), cs);
    }

    if (!cols.containsKey("column") || !cols.containsKey("rulename") || !cols.containsKey("range")) {
        throw new ConfigurationException(
                "The select statement must return the columns 'column', 'rulename', and 'range'");
    }

    CQL3Type columnType = cols.get("column").type.asCQL3Type();
    if (!columnType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException(
                "The 'column' column must be a text type.  Found " + columnType.toString());
    }

    //  Validate that "range" is of type tuple<text,text>, ugh.
    CQL3Type rangeType = cols.get("range").type.asCQL3Type();
    if (!(rangeType instanceof CQL3Type.Tuple)) {
        throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                + cols.get("column").type.getSerializer().getType());
    }
    List<AbstractType<?>> subtypes = ((TupleType) ((CQL3Type.Tuple) rangeType).getType()).allTypes();
    if (subtypes.size() != 2) {
        throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                + cols.get("column").type.getSerializer().getType());
    }
    for (AbstractType<?> t : subtypes) {
        if (!t.asCQL3Type().equals(CQL3Type.Native.TEXT)) {
            throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                    + cols.get("column").type.getSerializer().getType());
        }
    }

    Iterator<UntypedResultSet.Row> resultIterator = rawRuleData.iterator();

    Map<ByteBuffer, Map<ByteBuffer, List<ByteBuffer[]>>> rules = new HashMap<>();
    while (resultIterator.hasNext()) {
        UntypedResultSet.Row row = resultIterator.next();
        ByteBuffer rulename = row.getBlob("rulename");
        Map<ByteBuffer, List<ByteBuffer[]>> rule;
        if (!rules.containsKey(rulename)) {
            rule = new HashMap<>();
            rules.put(rulename, rule);
        } else {
            rule = rules.get(rulename);
        }

        ByteBuffer column = row.getBlob("column");
        List<ByteBuffer[]> ranges;
        if (rule.containsKey(column)) {
            ranges = rule.get(column);
        } else {
            ranges = new ArrayList<>();
            rule.put(column, ranges);
        }
        ByteBuffer[] rawRange = ((TupleType) rangeType.getType()).split(row.getBlob("range"));
        ranges.add(rawRange);
        if (logger.isDebugEnabled()) {
            logger.debug("Rule {} on column {} is range {} to {} (now {} ranges on this column)",
                    PrintHelper.bufToString(rulename), PrintHelper.bufToString(column),
                    PrintHelper.bufToString(rawRange[0]), PrintHelper.bufToString(rawRange[1]), ranges.size());
        }
    }

    return rules;
}

From source file:com.protectwise.cassandra.retrospect.deletion.RuleBasedLateTTLConvictor.java

License:Apache License

/**
 * Keyed by rulename, then by column name, then contains a list of 2-element arrays of ranges for that column.
 * This is not typed, everything is byte buffers, type is collapsed at testing time.
 *
 * @param statement/*from   w w  w .j  a  v  a 2  s  .c o m*/
 * @return
 * @throws ConfigurationException
 */
public static Map<ByteBuffer, Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long>> parseRules(String statement)
        throws ConfigurationException {
    UntypedResultSet rawRuleData = null;
    try {
        if (!QueryHelper.hasStartedCQL()) {
            // Yuck, exceptions for control flow.  This will be caught upstream during compaction as a signal that
            // we should move to spooked mode.  Outside of compaction the exception will bubble up and be presented
            // to the user (though it seems extremely unlikely)
            throw new ConfigurationException(
                    "Node is not fully joined, so we cannot read deletion rules.  Falling back to standard compaction");
        }
        rawRuleData = QueryProcessor.process(statement, ConsistencyLevel.LOCAL_QUORUM);
    } catch (RequestExecutionException e) {
        ConfigurationException ce = new ConfigurationException(
                "Unable to query for rule data, the failed statement was " + statement, e);
        throw ce;
    }

    Map<String, ColumnSpecification> cols = new HashMap<>();
    for (ColumnSpecification cs : rawRuleData.metadata()) {
        cols.put(cs.name.toString(), cs);
    }

    if (!cols.containsKey("column") || !cols.containsKey("rulename") || !cols.containsKey("range_lower")
            || !cols.containsKey("range_upper") || !cols.containsKey("ttl")) {
        throw new ConfigurationException(
                "The select statement must return the columns 'column', 'rulename', 'range', and 'ttl'");
    }

    CQL3Type columnType = cols.get("column").type.asCQL3Type();
    if (!columnType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException(
                "The 'column' column must be a text type.  Found " + columnType.toString());
    }

    //  Validate that "range" is of type tuple<text,text>, ugh.
    /*CQL3Type rangeType = cols.get("range").type.asCQL3Type();
    if (!(rangeType instanceof CQL3Type.Tuple))
    {
       throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
    }
    List<AbstractType<?>> subtypes = ((TupleType) ((CQL3Type.Tuple) rangeType).getType()).allTypes();
    if (subtypes.size() != 2)
    {
       throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
    }
    for (AbstractType<?> t : subtypes)
    {
       if (!t.asCQL3Type().equals(CQL3Type.Native.TEXT))
       {
    throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
       }
    }*/

    // validate that range, range_lower, range_upper
    CQL3Type rangeLowerType = cols.get("range_lower").type.asCQL3Type();
    if (!rangeLowerType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException("The column 'range_lower' must be of type text  Found "
                + cols.get("range_lower").type.getSerializer().getType());
    }

    CQL3Type rangeUpperType = cols.get("range_upper").type.asCQL3Type();
    if (!rangeLowerType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException("The column 'range' must be of type map<text,text>  Found "
                + cols.get("range_upper").type.getSerializer().getType());
    }

    // Validate that 'ttl' is of type bigint
    CQL3Type ttlType = cols.get("ttl").type.asCQL3Type();
    if (!ttlType.equals(CQL3Type.Native.BIGINT)) {
        throw new ConfigurationException(
                "The 'ttl' column must be a bigint type.  Found " + ttlType.toString());
    }

    Iterator<UntypedResultSet.Row> resultIterator = rawRuleData.iterator();

    Map<ByteBuffer, Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long>> rules = new HashMap<>();
    while (resultIterator.hasNext()) {
        UntypedResultSet.Row row = resultIterator.next();
        ByteBuffer rulename = row.getBlob("rulename");
        Map<ByteBuffer, List<ByteBuffer[]>> rule;
        Long ttl = row.getLong("ttl");
        if (!rules.containsKey(rulename)) {
            rule = new HashMap<>();
            rules.put(rulename, Pair.create(rule, ttl));
        } else {
            Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long> p = rules.get(rulename);
            if (!p.right.equals(ttl)) {
                throw new ConfigurationException("The 'ttl' value for rule " + PrintHelper.bufToString(rulename)
                        + " has inconsistent values between the columns and ranges of this rule.  The value of the TTL must be consistent for the entire rule.");
            }
            rule = p.left;
        }

        ByteBuffer column = row.getBlob("column");
        List<ByteBuffer[]> ranges;
        if (rule.containsKey(column)) {
            ranges = rule.get(column);
        } else {
            ranges = new ArrayList<>();
            rule.put(column, ranges);
        }
        ByteBuffer[] rawRange = new ByteBuffer[2];
        rawRange[0] = row.getBlob("range_lower");
        rawRange[1] = row.getBlob("range_upper");
        ranges.add(rawRange);
        if (logger.isDebugEnabled()) {
            logger.debug("Rule {} on column {} is range {} to {} (now {} ranges on this column)",
                    PrintHelper.bufToString(rulename), PrintHelper.bufToString(column),
                    PrintHelper.bufToString(rawRange[0]), PrintHelper.bufToString(rawRange[1]), ranges.size());
        }
    }

    return rules;
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public static ConsistencyLevel consistencyLevelFromString(String value) {
    switch (value.toUpperCase(Locale.ROOT)) {
    case "ANY":
        return ConsistencyLevel.ANY;
    case "ONE":
        return ConsistencyLevel.ONE;
    case "TWO":
        return ConsistencyLevel.TWO;
    case "THREE":
        return ConsistencyLevel.THREE;
    case "QUORUM":
        return ConsistencyLevel.QUORUM;
    case "ALL":
        return ConsistencyLevel.ALL;
    case "LOCAL_QUORUM":
        return ConsistencyLevel.LOCAL_QUORUM;
    case "EACH_QUORUM":
        return ConsistencyLevel.EACH_QUORUM;
    case "SERIAL":
        return ConsistencyLevel.SERIAL;
    case "LOCAL_SERIAL":
        return ConsistencyLevel.LOCAL_SERIAL;
    case "LOCAL_ONE":
        return ConsistencyLevel.LOCAL_ONE;
    default://from   w  w  w . ja  v a  2 s  . c o m
        throw new IllegalArgumentException("No write consistency match [" + value + "]");
    }
}

From source file:org.elasticsearch.action.WriteConsistencyLevel.java

License:Apache License

public ConsistencyLevel toCassandraConsistencyLevel() {
    switch (id) {
    case 1:/* www .j a  v  a  2  s. c  o  m*/
        return ConsistencyLevel.LOCAL_ONE;
    case 2:
        return ConsistencyLevel.LOCAL_QUORUM;
    case 3:
        return ConsistencyLevel.ALL;
    }
    return ConsistencyLevel.LOCAL_ONE;
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public MetaData readMetaDataAsRow() throws NoPersistedMetaDataException {
    UntypedResultSet result;//from www .j  a  v  a2 s  . c  om
    try {
        result = process(ConsistencyLevel.LOCAL_QUORUM,
                String.format("SELECT metadata FROM \"%s\".\"%s\" WHERE dc = ?", ELASTIC_ADMIN_KEYSPACE,
                        metaDataTableName),
                DatabaseDescriptor.getLocalDataCenter());
    } catch (RequestExecutionException | RequestValidationException e) {
        throw new NoPersistedMetaDataException(e);
    }
    Row row = result.one();
    if (row != null && row.has("metadata")) {
        return parseMetaDataString(row.getString("metadata"));
    }
    throw new NoPersistedMetaDataException();
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public void initializeMetaDataAsComment() {
    MetaData metadata = state().metaData();
    try {/*from ww w .  j a v a 2 s .com*/
        String metaDataString = MetaData.builder().toXContent(metadata, persistedParams);
        // initialize a first row if needed
        UntypedResultSet result = process(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL,
                String.format(
                        "INSERT INTO \"%s\".\"%s\" (dc,owner,version,metadata) VALUES (?,?,?,?) IF NOT EXISTS",
                        ELASTIC_ADMIN_KEYSPACE, metaDataTableName),
                DatabaseDescriptor.getLocalDataCenter(),
                UUID.fromString(StorageService.instance.getLocalHostId()), metadata.version(), metaDataString);

        Row row = result.one();
        boolean applied = false;
        if (row.has("[applied]")) {
            applied = row.getBoolean("[applied]");
        }
        if (applied) {
            logger.debug("Succefully initialize metadata metaData={}", metadata);
            writeMetaDataAsComment(metaDataString);
        }
    } catch (Exception e) {
        logger.error("Failed to initialize persisted metadata", e);
    }
}

From source file:org.elasticsearch.cluster.ClusterService.java

License:Apache License

@SuppressForbidden(reason = "toUpperCase() for consistency level")
public static ConsistencyLevel consistencyLevelFromString(String value) {
    switch (value.toUpperCase()) {
    case "ANY":
        return ConsistencyLevel.ANY;
    case "ONE":
        return ConsistencyLevel.ONE;
    case "TWO":
        return ConsistencyLevel.TWO;
    case "THREE":
        return ConsistencyLevel.THREE;
    case "QUORUM":
        return ConsistencyLevel.QUORUM;
    case "ALL":
        return ConsistencyLevel.ALL;
    case "LOCAL_QUORUM":
        return ConsistencyLevel.LOCAL_QUORUM;
    case "EACH_QUORUM":
        return ConsistencyLevel.EACH_QUORUM;
    case "SERIAL":
        return ConsistencyLevel.SERIAL;
    case "LOCAL_SERIAL":
        return ConsistencyLevel.LOCAL_SERIAL;
    case "LOCAL_ONE":
        return ConsistencyLevel.LOCAL_ONE;
    default://from   w  ww.  jav a 2  s .com
        throw new IllegalArgumentException("No write consistency match [" + value + "]");
    }
}