Example usage for org.apache.cassandra.utils ByteBufferUtil clone

List of usage examples for org.apache.cassandra.utils ByteBufferUtil clone

Introduction

In this page you can find the example usage for org.apache.cassandra.utils ByteBufferUtil clone.

Prototype

public static ByteBuffer clone(ByteBuffer buffer) 

Source Link

Usage

From source file:com.dse.pig.udfs.AbstractCassandraStorage.java

License:Apache License

/** get column meta data */
protected List<ColumnDef> getColumnMeta(Cassandra.Client client, boolean cassandraStorage,
        boolean includeCompactValueColumn) throws InvalidRequestException, UnavailableException,
        TimedOutException, SchemaDisagreementException, TException, CharacterCodingException,
        org.apache.cassandra.exceptions.InvalidRequestException, ConfigurationException, NotFoundException {
    String query = "SELECT column_name, " + "       validator, " + "       index_type "
            + "FROM system.schema_columns " + "WHERE keyspace_name = '%s' " + "  AND columnfamily_name = '%s'";

    CqlResult result = client.execute_cql3_query(
            ByteBufferUtil.bytes(String.format(query, keyspace, column_family)), Compression.NONE,
            ConsistencyLevel.ONE);//from w w w .  j a  v a  2 s.c  o  m

    List<CqlRow> rows = result.rows;
    List<ColumnDef> columnDefs = new ArrayList<ColumnDef>();
    if (rows == null || rows.isEmpty()) {
        // if CassandraStorage, just return the empty list
        if (cassandraStorage)
            return columnDefs;

        // otherwise for CqlStorage, check metadata for classic thrift tables
        CFDefinition cfDefinition = getCfDefinition(keyspace, column_family, client);
        for (ColumnIdentifier column : cfDefinition.metadata.keySet()) {
            ColumnDef cDef = new ColumnDef();
            String columnName = column.toString();
            String type = cfDefinition.metadata.get(column).type.toString();
            logger.debug("name: {}, type: {} ", columnName, type);
            cDef.name = ByteBufferUtil.bytes(columnName);
            cDef.validation_class = type;
            columnDefs.add(cDef);
        }
        // we may not need to include the value column for compact tables as we 
        // could have already processed it as schema_columnfamilies.value_alias
        if (columnDefs.size() == 0 && includeCompactValueColumn) {
            String value = cfDefinition.value != null ? cfDefinition.value.toString() : null;
            if ("value".equals(value)) {
                ColumnDef cDef = new ColumnDef();
                cDef.name = ByteBufferUtil.bytes(value);
                cDef.validation_class = cfDefinition.value.type.toString();
                columnDefs.add(cDef);
            }
        }
        return columnDefs;
    }

    Iterator<CqlRow> iterator = rows.iterator();
    while (iterator.hasNext()) {
        CqlRow row = iterator.next();
        ColumnDef cDef = new ColumnDef();
        cDef.setName(ByteBufferUtil.clone(row.getColumns().get(0).value));
        cDef.validation_class = ByteBufferUtil.string(row.getColumns().get(1).value);
        ByteBuffer indexType = row.getColumns().get(2).value;
        if (indexType != null)
            cDef.index_type = getIndexType(ByteBufferUtil.string(indexType));
        columnDefs.add(cDef);
    }
    return columnDefs;
}

From source file:com.spotify.hdfs2cass.cassandra.cql.CrunchCqlBulkRecordWriter.java

License:Apache License

@Override
public void write(final ByteBuffer ignoredKey, final CQLRecord record) {
    prepareWriter();/*from  w ww .  jav a  2  s. c om*/
    // To ensure Crunch doesn't reuse CQLSSTableWriter's objects
    List<ByteBuffer> bb = Lists.newArrayList();
    for (ByteBuffer v : record.getValues()) {
        bb.add(ByteBufferUtil.clone(v));
    }
    try {
        ((CQLSSTableWriter) writer).rawAddRow(bb);
        if (null != progress)
            progress.progress();
        if (null != context)
            HadoopCompat.progress(context);
    } catch (InvalidRequestException | IOException e) {
        LOG.error(e.getMessage());
        throw new CrunchRuntimeException("Error adding row : " + e.getMessage());
    }
}

From source file:com.spotify.hdfs2cass.cassandra.utils.CassandraRecordUtils.java

License:Open Source License

public static ByteBuffer toByteBuffer(final Object value) {
    if (value == null) {
        return ByteBufferUtil.EMPTY_BYTE_BUFFER;
    } else if (value instanceof CharSequence) {
        return ByteBufferUtil.bytes(value.toString());
    } else if (value instanceof Double) {
        return ByteBufferUtil.bytes((Double) value);
    } else if (value instanceof Float) {
        return ByteBufferUtil.bytes((Float) value);
    } else if (value instanceof Integer) {
        return ByteBufferUtil.bytes((Integer) value);
    } else if (value instanceof Long) {
        return ByteBufferUtil.bytes((Long) value);
    } else if (value instanceof ByteBuffer) {
        return ByteBufferUtil.clone((ByteBuffer) value);
    } else if (value instanceof GenericData.Array) {
        return serializeList((GenericData.Array) value);
    } else if (value instanceof SpecificRecord) {
        List<ByteBuffer> buffers = Lists.newArrayList();
        SpecificRecord record = (SpecificRecord) value;
        for (Schema.Field field : record.getSchema().getFields()) {
            buffers.add(toByteBuffer(record.get(field.pos())));
        }/*from   ww w .j  a  v a2 s  . c o  m*/
        return CompositeType.build(buffers.toArray(new ByteBuffer[0]));
    } else if (value instanceof Map) {
        return serializeMap((Map<?, ?>) value);
    } else if (value instanceof Set) {
        return serializeSet((Set<?>) value);
    } else if (value instanceof List) {
        return serializeList((List<?>) value);
    } else if (value instanceof UUID) {
        return ByteBufferUtil.bytes((UUID) value);
    }

    throw new CrunchRuntimeException("Can not transform field (class: " + value.getClass() + ") to ByteBuffer");
}

From source file:com.stratio.cassandra.index.util.Base256Serializer.java

License:Apache License

/**
 * Returns the {@code String} representation of the specified {@code ByteBuffer}.
 *
 * @param byteBuffer The {@code ByteBuffer} to be converted.
 * @return The {@code String} representation of the specified {@code ByteBuffer}.
 *///from   w  w w.  j a  v a  2 s. com
public static String string(ByteBuffer byteBuffer) {
    ByteBuffer bb = ByteBufferUtil.clone(byteBuffer);
    byte[] bytes = new byte[bb.remaining()];
    bb.get(bytes);
    return new String(chars(bytes));
}

From source file:com.stratio.cassandra.index.util.ByteBufferUtils.java

License:Apache License

/**
 * Returns the specified {@link java.nio.ByteBuffer} as a byte array.
 *
 * @param byteBuffer a {@link java.nio.ByteBuffer} to be converted to a byte array.
 * @return the byte array representation of the {@code byteBuffer}.
 *//*from  w w  w.  j  a va 2  s.c om*/
public static byte[] asArray(ByteBuffer byteBuffer) {
    ByteBuffer bb = ByteBufferUtil.clone(byteBuffer);
    byte[] bytes = new byte[bb.remaining()];
    bb.get(bytes);
    return bytes;
}

From source file:com.stratio.cassandra.lucene.column.ColumnsMapper.java

License:Apache License

private void addColumns(Columns columns, ColumnBuilder builder, AbstractType type, ByteBuffer value) {
    if (type.isCollection()) {
        value = ByteBufferUtil.clone(value);
        CollectionType<?> collectionType = (CollectionType<?>) type;
        switch (collectionType.kind) {
        case SET: {
            AbstractType<?> nameType = collectionType.nameComparator();
            int colSize = CollectionSerializer.readCollectionSize(value, Server.CURRENT_VERSION);
            for (int j = 0; j < colSize; j++) {
                ByteBuffer itemValue = CollectionSerializer.readValue(value, Server.CURRENT_VERSION);
                addColumns(columns, builder, nameType, itemValue);
            }//from w ww.  ja  v a 2 s. c o  m
            break;
        }
        case LIST: {
            AbstractType<?> valueType = collectionType.valueComparator();
            int colSize = CollectionSerializer.readCollectionSize(value, Server.CURRENT_VERSION);
            for (int j = 0; j < colSize; j++) {
                ByteBuffer itemValue = CollectionSerializer.readValue(value, Server.CURRENT_VERSION);
                addColumns(columns, builder, valueType, itemValue);
            }
            break;
        }
        case MAP: {
            AbstractType<?> keyType = collectionType.nameComparator();
            AbstractType<?> valueType = collectionType.valueComparator();
            int colSize = MapSerializer.readCollectionSize(value, Server.CURRENT_VERSION);
            for (int j = 0; j < colSize; j++) {
                ByteBuffer mapKey = MapSerializer.readValue(value, Server.CURRENT_VERSION);
                ByteBuffer mapValue = MapSerializer.readValue(value, Server.CURRENT_VERSION);
                String itemName = keyType.compose(mapKey).toString();
                collectionType.nameComparator();
                addColumns(columns, builder.withMapName(itemName), valueType, mapValue);
            }
            break;
        }
        default: {
            throw new IndexException("Unknown collection type %s", collectionType.kind);
        }
        }
    } else if (type instanceof UserType) {
        UserType userType = (UserType) type;
        ByteBuffer[] values = userType.split(value);
        for (int i = 0; i < userType.fieldNames().size(); i++) {
            String itemName = userType.fieldNameAsString(i);
            AbstractType<?> itemType = userType.fieldType(i);
            // This only occurs in UDT not fully composed
            if (values[i] != null) {
                addColumns(columns, builder.withUDTName(itemName), itemType, values[i]);
            }
        }
    } else if (type instanceof TupleType) {
        TupleType tupleType = (TupleType) type;
        ByteBuffer[] values = tupleType.split(value);
        for (Integer i = 0; i < tupleType.size(); i++) {
            String itemName = i.toString();
            AbstractType<?> itemType = tupleType.type(i);
            addColumns(columns, builder.withUDTName(itemName), itemType, values[i]);
        }
    } else {
        if (value != null) {
            columns.add(builder.buildWithDecomposed(value, type));
        }
    }
}

From source file:net.imagini.cassandra.DumpSSTables.SSTableExport.java

License:Apache License

/**
 * Serialize a given column to the JSON format
 * /* w w  w .j ava  2s . com*/
 * @param column
 *            column presentation
 * @param comparator
 *            columns comparator
 * @param cfMetaData
 *            Column Family metadata (to get validator)
 * 
 * @return column as serialized list
 */
private static List<Object> serializeColumn(IColumn column, AbstractType<?> comparator, CFMetaData cfMetaData) {
    ArrayList<Object> serializedColumn = new ArrayList<Object>();

    ByteBuffer name = ByteBufferUtil.clone(column.name());
    ByteBuffer value = ByteBufferUtil.clone(column.value());

    serializedColumn.add(comparator.getString(name));
    if (column instanceof DeletedColumn) {
        serializedColumn.add(ByteBufferUtil.bytesToHex(value));
    } else {
        AbstractType<?> validator = cfMetaData
                .getValueValidator(cfMetaData.getColumnDefinitionFromColumnName(name));
        serializedColumn.add(validator.getString(value));
    }
    serializedColumn.add(column.timestamp());

    if (column instanceof DeletedColumn) {
        serializedColumn.add("d");
    } else if (column instanceof ExpiringColumn) {
        serializedColumn.add("e");
        serializedColumn.add(((ExpiringColumn) column).getTimeToLive());
        serializedColumn.add(column.getLocalDeletionTime());
    } else if (column instanceof CounterColumn) {
        serializedColumn.add("c");
        serializedColumn.add(((CounterColumn) column).timestampOfLastDelete());
    }

    return serializedColumn;
}