Example usage for org.apache.cassandra.db.marshal AbstractType fromString

List of usage examples for org.apache.cassandra.db.marshal AbstractType fromString

Introduction

In this page you can find the example usage for org.apache.cassandra.db.marshal AbstractType fromString.

Prototype

public abstract ByteBuffer fromString(String source) throws MarshalException;

Source Link

Document

get a byte representation of the given string.

Usage

From source file:com.perpetumobile.bit.orm.cassandra.CliMain.java

License:Apache License

/**
 * Converts object represented as string into byte[] according to comparator
 * @param object - object to covert into byte array
 * @param comparator - comparator used to convert object
 * @return byte[] - object in the byte array representation
 *//*from  w  ww .j  a  v a 2s.  co m*/
private ByteBuffer getBytesAccordingToType(String object, AbstractType<?> comparator) {
    if (comparator == null) // default comparator is BytesType
        comparator = BytesType.instance;

    try {
        return comparator.fromString(object);
    } catch (MarshalException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.perpetumobile.bit.orm.cassandra.CliMain.java

License:Apache License

/**
 * Converts column value into byte[] according to validation class
 * @param columnName - column name to which value belongs
 * @param columnFamilyName - column family name
 * @param columnValue - actual column value
 * @return value in byte array representation
 *//*www .  ja  va 2 s .c o m*/
private ByteBuffer columnValueAsBytes(ByteBuffer columnName, String columnFamilyName, String columnValue) {
    CfDef columnFamilyDef = getCfDef(columnFamilyName);
    AbstractType<?> defaultValidator = getFormatType(columnFamilyDef.default_validation_class);

    for (ColumnDef columnDefinition : columnFamilyDef.getColumn_metadata()) {
        byte[] currentColumnName = columnDefinition.getName();

        if (ByteBufferUtil.compare(currentColumnName, columnName) == 0) {
            try {
                String validationClass = columnDefinition.getValidation_class();
                return getBytesAccordingToType(columnValue, getFormatType(validationClass));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    }

    return defaultValidator.fromString(columnValue);
}

From source file:com.protectwise.cassandra.db.compaction.example.ConfigurableDeleter.java

License:Apache License

protected ByteBuffer parseRuleValue(AbstractType<?> type, Object value) {
    if (value == null) {
        return null;
    }/*from   w w w  .  j  a  va  2  s.c om*/
    return type.fromString(value.toString());
}

From source file:com.stratio.deep.cassandra.cql.RangeUtils.java

License:Apache License

/**
 * Recursive function that splits a given token range to a given number of token ranges.
 *
 * @param range        the token range to be splitted.
 * @param partitioner  the cassandra partitioner.
 * @param bisectFactor the actual number of pieces the original token range will be splitted to.
 * @param accumulator  a token range accumulator (ne
 *//*from  w  w  w  .  j  a v a2  s. c o m*/
private static void bisectTokeRange(DeepTokenRange range, final IPartitioner partitioner,
        final int bisectFactor, final List<DeepTokenRange> accumulator) {

    final AbstractType tkValidator = partitioner.getTokenValidator();

    Token leftToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getStartToken()));
    Token rightToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getEndToken()));
    Token midToken = partitioner.midpoint(leftToken, rightToken);

    Comparable midpoint = (Comparable) tkValidator.compose(tkValidator.fromString(midToken.toString()));

    DeepTokenRange left = new DeepTokenRange(range.getStartToken(), midpoint, range.getReplicas());
    DeepTokenRange right = new DeepTokenRange(midpoint, range.getEndToken(), range.getReplicas());

    if (bisectFactor / 2 <= 1) {
        accumulator.add(left);
        accumulator.add(right);
    } else {
        bisectTokeRange(left, partitioner, bisectFactor / 2, accumulator);
        bisectTokeRange(right, partitioner, bisectFactor / 2, accumulator);
    }
}

From source file:kina.cql.RangeUtils.java

License:Apache License

/**
 * Recursive function that splits a given token range to a given number of tolen ranges.
 *
 * @param range the token range to be splitted.
 * @param partitioner the cassandra partitioner.
 * @param bisectFactor the actual number of pieces the original token range will be splitted to.
 * @param accumulator a token range accumulator (ne
 *//*from   w  ww . j  a  v  a2 s .  c  o m*/
private static void bisectTokeRange(Range range, final IPartitioner partitioner, final int bisectFactor,
        final List<Range> accumulator) {

    final AbstractType tkValidator = partitioner.getTokenValidator();

    Token leftToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getStartToken()));
    Token rightToken = partitioner.getTokenFactory().fromByteArray(tkValidator.decompose(range.getEndToken()));
    Token midToken = partitioner.midpoint(leftToken, rightToken);

    Comparable midpoint = (Comparable) tkValidator.compose(tkValidator.fromString(midToken.toString()));

    Range left = new Range(range.getStartToken(), midpoint, range.getReplicas());
    Range right = new Range(midpoint, range.getEndToken(), range.getReplicas());

    if (bisectFactor / 2 <= 1) {
        accumulator.add(left);
        accumulator.add(right);
    } else {
        bisectTokeRange(left, partitioner, bisectFactor / 2, accumulator);
        bisectTokeRange(right, partitioner, bisectFactor / 2, accumulator);
    }
}

From source file:org.apache.hadoop.hive.cassandra.CassandraPushdownPredicate.java

License:Apache License

private static ByteBuffer getIndexExpressionValue(ExprNodeConstantDesc constantDesc,
        PrimitiveObjectInspector poi, Object writable, AbstractType validator) {
    logger.info("Primitive Category: {}, Validation class: {}, CassandraType: {}",
            new Object[] { poi.getPrimitiveCategory(), validator.getClass().getName(),
                    LazyCassandraUtils.getCassandraType(poi) });
    switch (poi.getPrimitiveCategory()) {
    case TIMESTAMP:
        String dateString = new java.sql.Date(
                ((java.sql.Timestamp) poi.getPrimitiveJavaObject(writable)).getTime()).toString();
        return validator.fromString(dateString);
    case BINARY:/*from  w  ww .ja  v  a2s  .  c  o  m*/
        byte[] bytes = ((ByteArrayRef) poi.getPrimitiveJavaObject(writable)).getData();

        // this will only work if the value has been cast using one of the UDFs
        // UDFHexToBytes, UDFUuid, UDFDecimal, UDFVarint
        return ByteBuffer.wrap(bytes);
    default:
        return validator.fromString(constantDesc.getValue().toString());
    }
}

From source file:org.apache.hadoop.hive.cassandra.input.HiveCassandraStandardColumnInputFormat.java

License:Apache License

@Override
public RecordReader<BytesWritable, MapWritable> getRecordReader(InputSplit split, JobConf jobConf,
        final Reporter reporter) throws IOException {
    HiveCassandraStandardSplit cassandraSplit = (HiveCassandraStandardSplit) split;

    List<String> columns = CassandraColumnSerDe.parseColumnMapping(cassandraSplit.getColumnMapping());
    isTransposed = CassandraColumnSerDe.isTransposed(columns);

    List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);

    if (columns.size() < readColIDs.size()) {
        throw new IOException("Cannot read more columns than the given table contains.");
    }/*from w  w w.j av  a2s .  c o m*/

    org.apache.cassandra.hadoop.ColumnFamilySplit cfSplit = cassandraSplit.getSplit();
    Job job = new Job(jobConf);

    TaskAttemptContext tac = new TaskAttemptContext(job.getConfiguration(), new TaskAttemptID()) {
        @Override
        public void progress() {
            reporter.progress();
        }
    };

    SlicePredicate predicate = new SlicePredicate();

    if (isTransposed || readColIDs.size() == columns.size() || readColIDs.size() == 0) {
        SliceRange range = new SliceRange();
        AbstractType comparator = BytesType.instance;

        String comparatorType = jobConf.get(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_COMPARATOR);
        if (comparatorType != null && !comparatorType.equals("")) {
            try {
                comparator = TypeParser.parse(comparatorType);
            } catch (ConfigurationException ex) {
                throw new IOException("Comparator class not found.");
            } catch (SyntaxException e) {
                throw new IOException(e);
            }
        }

        String sliceStart = jobConf.get(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_START);
        String sliceEnd = jobConf.get(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_FINISH);
        String reversed = jobConf.get(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_RANGE_REVERSED);

        range.setStart(comparator.fromString(sliceStart == null ? "" : sliceStart));
        range.setFinish(comparator.fromString(sliceEnd == null ? "" : sliceEnd));
        range.setReversed(reversed == null ? false : reversed.equals("true"));
        range.setCount(cassandraSplit.getSlicePredicateSize());
        predicate.setSlice_range(range);
    } else {
        int iKey = columns.indexOf(CassandraColumnSerDe.CASSANDRA_KEY_COLUMN);
        predicate.setColumn_names(getColumnNames(iKey, columns, readColIDs));
    }

    try {

        boolean wideRows = false;
        if (isTransposed && tac.getConfiguration()
                .getBoolean(CassandraColumnSerDe.CASSANDRA_ENABLE_WIDEROW_ITERATOR, true)) {
            wideRows = true;
        }

        ConfigHelper.setInputColumnFamily(tac.getConfiguration(), cassandraSplit.getKeyspace(),
                cassandraSplit.getColumnFamily(), wideRows);

        ConfigHelper.setInputSlicePredicate(tac.getConfiguration(), predicate);
        ConfigHelper.setRangeBatchSize(tac.getConfiguration(), cassandraSplit.getRangeBatchSize());
        ConfigHelper.setInputRpcPort(tac.getConfiguration(), cassandraSplit.getPort() + "");
        ConfigHelper.setInputInitialAddress(tac.getConfiguration(), cassandraSplit.getHost());
        ConfigHelper.setInputPartitioner(tac.getConfiguration(), cassandraSplit.getPartitioner());
        // Set Split Size
        ConfigHelper.setInputSplitSize(tac.getConfiguration(), cassandraSplit.getSplitSize());

        LOG.info("Validators : " + tac.getConfiguration().get(CassandraColumnSerDe.CASSANDRA_VALIDATOR_TYPE));
        List<IndexExpression> indexExpr = parseFilterPredicate(jobConf);
        if (indexExpr != null) {
            //We have pushed down a filter from the Hive query, we can use this against secondary indexes
            ConfigHelper.setInputRange(tac.getConfiguration(), indexExpr);
        }

        CassandraHiveRecordReader rr = new CassandraHiveRecordReader(new ColumnFamilyRecordReader(),
                isTransposed);

        rr.initialize(cfSplit, tac);

        return rr;

    } catch (Exception ie) {
        throw new IOException(ie);
    }
}

From source file:org.apache.hadoop.hive.cassandra.serde.TableMapping.java

License:Apache License

protected boolean serialize(Object obj, ObjectInspector objInspector, ObjectInspector declaredObjInspector,
        int level) throws IOException {

    switch (objInspector.getCategory()) {
    case PRIMITIVE: {

        //Marshal to expected cassandra format
        AbstractType validator = LazyCassandraUtils
                .getCassandraType((PrimitiveObjectInspector) declaredObjInspector);

        if (validator instanceof BytesType) {
            BytesWritable bw = ((BinaryObjectInspector) objInspector).getPrimitiveWritableObject(obj);
            serializeStream.write(bw.getBytes(), 0, bw.getLength());
        } else {/*from w w  w  .  j a  va2 s  . c  om*/

            LazyUtils.writePrimitiveUTF8(serializeStream, obj, (PrimitiveObjectInspector) objInspector, escaped,
                    escapeChar, needsEscape);

            //convert from string to cassandra type
            if (!declaredObjInspector.getTypeName()
                    .equals(PrimitiveObjectInspectorUtils.stringTypeEntry.typeName)) {
                ByteBuffer bb = validator.fromString(serializeStream.toString());
                serializeStream.reset();
                serializeStream.write(ByteBufferUtil.getArray(bb));
            }
        }

        return true;
    }
    case LIST: {
        char separator = (char) separators[level];
        ListObjectInspector loi = (ListObjectInspector) objInspector;
        List<?> list = loi.getList(obj);
        ObjectInspector eoi = loi.getListElementObjectInspector();
        if (list == null) {
            return false;
        } else {
            for (int i = 0; i < list.size(); i++) {
                if (i > 0) {
                    serializeStream.write(separator);
                }
                serialize(list.get(i), eoi, PrimitiveObjectInspectorFactory.javaStringObjectInspector,
                        level + 1);
            }
        }
        return true;
    }
    case MAP: {
        char separator = (char) separators[level];
        char keyValueSeparator = (char) separators[level + 1];
        MapObjectInspector moi = (MapObjectInspector) objInspector;
        ObjectInspector koi = moi.getMapKeyObjectInspector();
        ObjectInspector voi = moi.getMapValueObjectInspector();

        Map<?, ?> map = moi.getMap(obj);
        if (map == null) {
            return false;
        } else {
            boolean first = true;
            for (Map.Entry<?, ?> entry : map.entrySet()) {
                if (first) {
                    first = false;
                } else {
                    serializeStream.write(separator);
                }
                serialize(entry.getKey(), koi, PrimitiveObjectInspectorFactory.javaStringObjectInspector,
                        level + 2);
                serializeStream.write(keyValueSeparator);
                serialize(entry.getValue(), voi, PrimitiveObjectInspectorFactory.javaStringObjectInspector,
                        level + 2);
            }
        }
        return true;
    }
    case STRUCT: {
        char separator = (char) separators[level];
        StructObjectInspector soi = (StructObjectInspector) objInspector;
        List<? extends StructField> fields = soi.getAllStructFieldRefs();
        List<Object> list = soi.getStructFieldsDataAsList(obj);
        if (list == null) {
            return false;
        } else {
            for (int i = 0; i < list.size(); i++) {
                if (i > 0) {
                    serializeStream.write(separator);
                }
                serialize(list.get(i), fields.get(i).getFieldObjectInspector(),
                        PrimitiveObjectInspectorFactory.javaStringObjectInspector, level + 1);
            }
        }
        return true;
    }
    }
    throw new RuntimeException("Unknown category type: " + objInspector.getCategory());
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

/**
 * Parse elastic _id (a value or a JSON array) to build a DocPrimaryKey or populate map.
 * @param ksName//from w  w  w .  j  a  v  a 2 s .  c o m
 * @param cfName
 * @param map
 * @param id
 */
public DocPrimaryKey parseElasticId(final String index, final String type, final String id,
        Map<String, Object> map) throws JsonParseException, JsonMappingException, IOException {
    IndexService indexService = indexServiceSafe(index);
    String ksName = indexService.settingsService().getSettings().get(IndexMetaData.SETTING_KEYSPACE, index);
    String cfName = typeToCfName(type);
    CFMetaData metadata = getCFMetaData(ksName, cfName);

    List<ColumnDefinition> partitionColumns = metadata.partitionKeyColumns();
    List<ColumnDefinition> clusteringColumns = metadata.clusteringColumns();
    int ptLen = partitionColumns.size();

    if (id.startsWith("[") && id.endsWith("]")) {
        // _id is JSON array of values.
        Object[] elements = jsonMapper.readValue(id, Object[].class);
        Object[] values = (map != null) ? null : new Object[elements.length];
        String[] names = (map != null) ? null : new String[elements.length];
        if (elements.length > ptLen + clusteringColumns.size())
            throw new JsonMappingException(
                    "_id=" + id + " longer than the primary key size=" + (ptLen + clusteringColumns.size()));

        for (int i = 0; i < elements.length; i++) {
            ColumnDefinition cd = (i < ptLen) ? partitionColumns.get(i) : clusteringColumns.get(i - ptLen);
            AbstractType<?> atype = cd.type;
            if (map == null) {
                names[i] = cd.name.toString();
                values[i] = atype.compose(atype.fromString(elements[i].toString()));
            } else {
                map.put(cd.name.toString(), atype.compose(atype.fromString(elements[i].toString())));
            }
        }
        return (map != null) ? null
                : new DocPrimaryKey(names, values,
                        (clusteringColumns.size() > 0 && elements.length == partitionColumns.size()));
    } else {
        // _id is a single columns, parse its value.
        AbstractType<?> atype = partitionColumns.get(0).type;
        if (map == null) {
            return new DocPrimaryKey(new String[] { partitionColumns.get(0).name.toString() },
                    new Object[] { atype.compose(atype.fromString(id)) }, clusteringColumns.size() != 0);
        } else {
            map.put(partitionColumns.get(0).name.toString(), atype.compose(atype.fromString(id)));
            return null;
        }
    }
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public DocPrimaryKey parseElasticRouting(final String index, final String type, final String routing)
        throws JsonParseException, JsonMappingException, IOException {
    IndexService indexService = indexServiceSafe(index);
    String ksName = indexService.settingsService().getSettings().get(IndexMetaData.SETTING_KEYSPACE, index);
    String cfName = typeToCfName(type);
    CFMetaData metadata = getCFMetaData(ksName, cfName);
    List<ColumnDefinition> partitionColumns = metadata.partitionKeyColumns();
    int ptLen = partitionColumns.size();
    if (routing.startsWith("[") && routing.endsWith("]")) {
        // _routing is JSON array of values.
        Object[] elements = jsonMapper.readValue(routing, Object[].class);
        Object[] values = new Object[elements.length];
        String[] names = new String[elements.length];
        if (elements.length != ptLen)
            throw new JsonMappingException(
                    "_routing=" + routing + " does not match the partition key size=" + ptLen);

        for (int i = 0; i < elements.length; i++) {
            ColumnDefinition cd = partitionColumns.get(i);
            AbstractType<?> atype = cd.type;
            names[i] = cd.name.toString();
            values[i] = atype.compose(atype.fromString(elements[i].toString()));
            i++;//from   w  ww  .jav a2s .  co m
        }
        return new DocPrimaryKey(names, values);
    } else {
        // _id is a single columns, parse its value.
        AbstractType<?> atype = partitionColumns.get(0).type;
        return new DocPrimaryKey(new String[] { partitionColumns.get(0).name.toString() },
                new Object[] { atype.compose(atype.fromString(routing)) });
    }
}