Example usage for org.apache.cassandra.cql3 UntypedResultSet metadata

List of usage examples for org.apache.cassandra.cql3 UntypedResultSet metadata

Introduction

In this page you can find the example usage for org.apache.cassandra.cql3 UntypedResultSet metadata.

Prototype

public abstract List<ColumnSpecification> metadata();

Source Link

Usage

From source file:com.protectwise.cassandra.retrospect.deletion.RuleBasedDeletionConvictor.java

License:Apache License

/**
 * Keyed by rulename, then by column name, then contains a list of 2-element arrays of ranges for that column.
 * This is not typed, everything is byte buffers, type is collapsed at testing time.
 *
 * @param statement//w w w.  java  2s  .  com
 * @return
 * @throws ConfigurationException
 */
public static Map<ByteBuffer, Map<ByteBuffer, List<ByteBuffer[]>>> parseRules(String statement)
        throws ConfigurationException {
    UntypedResultSet rawRuleData = null;
    try {
        if (!QueryHelper.hasStartedCQL()) {
            // Yuck, exceptions for control flow.  This will be caught upstream during compaction as a signal that
            // we should move to spooked mode.  Outside of compaction the exception will bubble up and be presented
            // to the user (though it seems extremely unlikely)
            throw new ConfigurationException(
                    "Node is not fully joined, so we cannot read deletion rules.  Falling back to standard compaction");
        }
        rawRuleData = QueryProcessor.process(statement, ConsistencyLevel.LOCAL_QUORUM);
    } catch (RequestExecutionException e) {
        ConfigurationException ce = new ConfigurationException(
                "Unable to query for rule data.  The failed statement was " + statement, e);
        throw ce;
    }

    Map<String, ColumnSpecification> cols = new HashMap<>();
    for (ColumnSpecification cs : rawRuleData.metadata()) {
        cols.put(cs.name.toString(), cs);
    }

    if (!cols.containsKey("column") || !cols.containsKey("rulename") || !cols.containsKey("range")) {
        throw new ConfigurationException(
                "The select statement must return the columns 'column', 'rulename', and 'range'");
    }

    CQL3Type columnType = cols.get("column").type.asCQL3Type();
    if (!columnType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException(
                "The 'column' column must be a text type.  Found " + columnType.toString());
    }

    //  Validate that "range" is of type tuple<text,text>, ugh.
    CQL3Type rangeType = cols.get("range").type.asCQL3Type();
    if (!(rangeType instanceof CQL3Type.Tuple)) {
        throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                + cols.get("column").type.getSerializer().getType());
    }
    List<AbstractType<?>> subtypes = ((TupleType) ((CQL3Type.Tuple) rangeType).getType()).allTypes();
    if (subtypes.size() != 2) {
        throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                + cols.get("column").type.getSerializer().getType());
    }
    for (AbstractType<?> t : subtypes) {
        if (!t.asCQL3Type().equals(CQL3Type.Native.TEXT)) {
            throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found "
                    + cols.get("column").type.getSerializer().getType());
        }
    }

    Iterator<UntypedResultSet.Row> resultIterator = rawRuleData.iterator();

    Map<ByteBuffer, Map<ByteBuffer, List<ByteBuffer[]>>> rules = new HashMap<>();
    while (resultIterator.hasNext()) {
        UntypedResultSet.Row row = resultIterator.next();
        ByteBuffer rulename = row.getBlob("rulename");
        Map<ByteBuffer, List<ByteBuffer[]>> rule;
        if (!rules.containsKey(rulename)) {
            rule = new HashMap<>();
            rules.put(rulename, rule);
        } else {
            rule = rules.get(rulename);
        }

        ByteBuffer column = row.getBlob("column");
        List<ByteBuffer[]> ranges;
        if (rule.containsKey(column)) {
            ranges = rule.get(column);
        } else {
            ranges = new ArrayList<>();
            rule.put(column, ranges);
        }
        ByteBuffer[] rawRange = ((TupleType) rangeType.getType()).split(row.getBlob("range"));
        ranges.add(rawRange);
        if (logger.isDebugEnabled()) {
            logger.debug("Rule {} on column {} is range {} to {} (now {} ranges on this column)",
                    PrintHelper.bufToString(rulename), PrintHelper.bufToString(column),
                    PrintHelper.bufToString(rawRange[0]), PrintHelper.bufToString(rawRange[1]), ranges.size());
        }
    }

    return rules;
}

From source file:com.protectwise.cassandra.retrospect.deletion.RuleBasedLateTTLConvictor.java

License:Apache License

/**
 * Keyed by rulename, then by column name, then contains a list of 2-element arrays of ranges for that column.
 * This is not typed, everything is byte buffers, type is collapsed at testing time.
 *
 * @param statement//from   w  w w.j a va2  s . c  o  m
 * @return
 * @throws ConfigurationException
 */
public static Map<ByteBuffer, Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long>> parseRules(String statement)
        throws ConfigurationException {
    UntypedResultSet rawRuleData = null;
    try {
        if (!QueryHelper.hasStartedCQL()) {
            // Yuck, exceptions for control flow.  This will be caught upstream during compaction as a signal that
            // we should move to spooked mode.  Outside of compaction the exception will bubble up and be presented
            // to the user (though it seems extremely unlikely)
            throw new ConfigurationException(
                    "Node is not fully joined, so we cannot read deletion rules.  Falling back to standard compaction");
        }
        rawRuleData = QueryProcessor.process(statement, ConsistencyLevel.LOCAL_QUORUM);
    } catch (RequestExecutionException e) {
        ConfigurationException ce = new ConfigurationException(
                "Unable to query for rule data, the failed statement was " + statement, e);
        throw ce;
    }

    Map<String, ColumnSpecification> cols = new HashMap<>();
    for (ColumnSpecification cs : rawRuleData.metadata()) {
        cols.put(cs.name.toString(), cs);
    }

    if (!cols.containsKey("column") || !cols.containsKey("rulename") || !cols.containsKey("range_lower")
            || !cols.containsKey("range_upper") || !cols.containsKey("ttl")) {
        throw new ConfigurationException(
                "The select statement must return the columns 'column', 'rulename', 'range', and 'ttl'");
    }

    CQL3Type columnType = cols.get("column").type.asCQL3Type();
    if (!columnType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException(
                "The 'column' column must be a text type.  Found " + columnType.toString());
    }

    //  Validate that "range" is of type tuple<text,text>, ugh.
    /*CQL3Type rangeType = cols.get("range").type.asCQL3Type();
    if (!(rangeType instanceof CQL3Type.Tuple))
    {
       throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
    }
    List<AbstractType<?>> subtypes = ((TupleType) ((CQL3Type.Tuple) rangeType).getType()).allTypes();
    if (subtypes.size() != 2)
    {
       throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
    }
    for (AbstractType<?> t : subtypes)
    {
       if (!t.asCQL3Type().equals(CQL3Type.Native.TEXT))
       {
    throw new ConfigurationException("The column 'range' must be of type tuple<text,text>  Found " + cols.get("column").type.getSerializer().getType());
       }
    }*/

    // validate that range, range_lower, range_upper
    CQL3Type rangeLowerType = cols.get("range_lower").type.asCQL3Type();
    if (!rangeLowerType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException("The column 'range_lower' must be of type text  Found "
                + cols.get("range_lower").type.getSerializer().getType());
    }

    CQL3Type rangeUpperType = cols.get("range_upper").type.asCQL3Type();
    if (!rangeLowerType.equals(CQL3Type.Native.TEXT)) {
        throw new ConfigurationException("The column 'range' must be of type map<text,text>  Found "
                + cols.get("range_upper").type.getSerializer().getType());
    }

    // Validate that 'ttl' is of type bigint
    CQL3Type ttlType = cols.get("ttl").type.asCQL3Type();
    if (!ttlType.equals(CQL3Type.Native.BIGINT)) {
        throw new ConfigurationException(
                "The 'ttl' column must be a bigint type.  Found " + ttlType.toString());
    }

    Iterator<UntypedResultSet.Row> resultIterator = rawRuleData.iterator();

    Map<ByteBuffer, Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long>> rules = new HashMap<>();
    while (resultIterator.hasNext()) {
        UntypedResultSet.Row row = resultIterator.next();
        ByteBuffer rulename = row.getBlob("rulename");
        Map<ByteBuffer, List<ByteBuffer[]>> rule;
        Long ttl = row.getLong("ttl");
        if (!rules.containsKey(rulename)) {
            rule = new HashMap<>();
            rules.put(rulename, Pair.create(rule, ttl));
        } else {
            Pair<Map<ByteBuffer, List<ByteBuffer[]>>, Long> p = rules.get(rulename);
            if (!p.right.equals(ttl)) {
                throw new ConfigurationException("The 'ttl' value for rule " + PrintHelper.bufToString(rulename)
                        + " has inconsistent values between the columns and ranges of this rule.  The value of the TTL must be consistent for the entire rule.");
            }
            rule = p.left;
        }

        ByteBuffer column = row.getBlob("column");
        List<ByteBuffer[]> ranges;
        if (rule.containsKey(column)) {
            ranges = rule.get(column);
        } else {
            ranges = new ArrayList<>();
            rule.put(column, ranges);
        }
        ByteBuffer[] rawRange = new ByteBuffer[2];
        rawRange[0] = row.getBlob("range_lower");
        rawRange[1] = row.getBlob("range_upper");
        ranges.add(rawRange);
        if (logger.isDebugEnabled()) {
            logger.debug("Rule {} on column {} is range {} to {} (now {} ranges on this column)",
                    PrintHelper.bufToString(rulename), PrintHelper.bufToString(column),
                    PrintHelper.bufToString(rawRange[0]), PrintHelper.bufToString(rawRange[1]), ranges.size());
        }
    }

    return rules;
}