Example usage for org.apache.cassandra.db ConsistencyLevel LOCAL_SERIAL

List of usage examples for org.apache.cassandra.db ConsistencyLevel LOCAL_SERIAL

Introduction

In this page you can find the example usage for org.apache.cassandra.db ConsistencyLevel LOCAL_SERIAL.

Prototype

ConsistencyLevel LOCAL_SERIAL

To view the source code for org.apache.cassandra.db ConsistencyLevel LOCAL_SERIAL.

Click Source Link

Usage

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public void upsertDocument(final IndicesService indicesService, final IndexRequest request,
        final IndexMetaData indexMetaData, boolean updateOperation) throws Exception {
    final IndexService indexService = indicesService.indexServiceSafe(request.index());
    final IndexShard indexShard = indexService.shardSafe(0);

    final SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source())
            .type(request.type()).id(request.id());
    if (request.routing() != null)
        sourceToParse.routing(request.routing());
    if (request.parent() != null)
        sourceToParse.parent(request.parent());
    if (request.timestamp() != null)
        sourceToParse.timestamp(request.timestamp());
    if (request.ttl() != null)
        sourceToParse.ttl(request.ttl());

    final String keyspaceName = indexService.indexSettings().get(IndexMetaData.SETTING_KEYSPACE,
            request.index());/*from w  w  w . j  av  a 2  s  . c  o  m*/
    final String cfName = typeToCfName(request.type());

    final Engine.IndexingOperation operation = indexShard.prepareIndexOnPrimary(sourceToParse,
            request.version(), request.versionType(), request.canHaveDuplicates());
    final Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
    final boolean dynamicMappingEnable = indexService.indexSettings().getAsBoolean("index.mapper.dynamic",
            true);
    if (update != null && dynamicMappingEnable) {
        if (logger.isDebugEnabled())
            logger.debug("Document source={} require a blocking mapping update of [{}]", request.sourceAsMap(),
                    indexService.index().name());
        // blocking Elasticsearch mapping update (required to update cassandra schema before inserting a row, this is the cost of dynamic mapping)
        blockingMappingUpdate(indexService, request.type(), update.toString());
    }

    // get the docMapper after a potential mapping update
    final DocumentMapper docMapper = indexShard.mapperService().documentMapperWithAutoCreate(request.type())
            .getDocumentMapper();

    // insert document into cassandra keyspace=index, table = type
    final Map<String, Object> sourceMap = request.sourceAsMap();
    final Map<String, ObjectMapper> objectMappers = docMapper.objectMappers();
    final DocumentFieldMappers fieldMappers = docMapper.mappers();

    Long timestamp = null;
    if (docMapper.timestampFieldMapper().enabled() && request.timestamp() != null) {
        timestamp = docMapper.timestampFieldMapper().fieldType().value(request.timestamp());
    }

    if (logger.isTraceEnabled())
        logger.trace(
                "Insert metadata.version={} index=[{}] table=[{}] id=[{}] source={} fieldMappers={} objectMappers={} consistency={} ttl={}",
                state().metaData().version(), indexService.index().name(), cfName, request.id(), sourceMap,
                Lists.newArrayList(fieldMappers.iterator()), objectMappers,
                request.consistencyLevel().toCassandraConsistencyLevel(), request.ttl());

    final CFMetaData metadata = getCFMetaData(keyspaceName, cfName);

    String id = request.id();
    Map<String, ByteBuffer> map = new HashMap<String, ByteBuffer>();
    if (request.parent() != null)
        sourceMap.put(ParentFieldMapper.NAME, request.parent());

    // normalize the _id and may find some column value in _id.
    // if the provided columns does not contains all the primary key columns, parse the _id to populate the columns in map.
    boolean buildId = true;
    ArrayNode array = jsonMapper.createArrayNode();
    for (ColumnDefinition cd : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) {
        if (cd.name.toString().equals("_id")) {
            sourceMap.put("_id", request.id());
        }
        Object value = sourceMap.get(cd.name.toString());
        if (value != null) {
            addToJsonArray(cd.type, value, array);
        } else {
            buildId = false;
            parseElasticId(request.index(), cfName, request.id(), sourceMap);
        }
    }
    if (buildId) {
        id = writeValueAsString(array);
    }

    // workaround because ParentFieldMapper.value() and UidFieldMapper.value() create an Uid.
    if (sourceMap.get(ParentFieldMapper.NAME) != null
            && ((String) sourceMap.get(ParentFieldMapper.NAME)).indexOf(Uid.DELIMITER) < 0) {
        sourceMap.put(ParentFieldMapper.NAME,
                request.type() + Uid.DELIMITER + sourceMap.get(ParentFieldMapper.NAME));
    }

    if (docMapper.sourceMapper().enabled()) {
        sourceMap.put(SourceFieldMapper.NAME, request.source());
    }

    for (String field : sourceMap.keySet()) {
        FieldMapper fieldMapper = fieldMappers.getMapper(field);
        Mapper mapper = (fieldMapper != null) ? fieldMapper : objectMappers.get(field);
        ByteBuffer colName;
        if (mapper == null) {
            if (dynamicMappingEnable)
                throw new MapperException("Unmapped field [" + field + "]");
            colName = ByteBufferUtil.bytes(field);
        } else {
            colName = mapper.cqlName(); // cached ByteBuffer column name.
        }
        final ColumnDefinition cd = metadata.getColumnDefinition(colName);
        if (cd != null) {
            // we got a CQL column.
            Object fieldValue = sourceMap.get(field);
            try {
                if (fieldValue == null) {
                    if (cd.type.isCollection()) {
                        switch (((CollectionType<?>) cd.type).kind) {
                        case LIST:
                        case SET:
                            map.put(field,
                                    CollectionSerializer.pack(Collections.emptyList(), 0, Server.VERSION_3));
                            break;
                        case MAP:
                            break;
                        }
                    } else {
                        map.put(field, null);
                    }
                    continue;
                }

                if (mapper != null && mapper.cqlCollection().equals(CqlCollection.SINGLETON)
                        && (fieldValue instanceof Collection)) {
                    throw new MapperParsingException(
                            "field " + fieldMapper.name() + " should be a single value");
                }

                // hack to store percolate query as a string while mapper is an object mapper.
                if (metadata.cfName.equals("_percolator") && field.equals("query")) {
                    if (cd.type.isCollection()) {
                        switch (((CollectionType<?>) cd.type).kind) {
                        case LIST:
                            if (((ListType) cd.type).getElementsType().asCQL3Type().equals(CQL3Type.Native.TEXT)
                                    && !(fieldValue instanceof String)) {
                                // opaque list of objects serialized to JSON text 
                                fieldValue = Collections.singletonList(stringify(fieldValue));
                            }
                            break;
                        case SET:
                            if (((SetType) cd.type).getElementsType().asCQL3Type().equals(CQL3Type.Native.TEXT)
                                    && !(fieldValue instanceof String)) {
                                // opaque set of objects serialized to JSON text 
                                fieldValue = Collections.singleton(stringify(fieldValue));
                            }
                            break;
                        }
                    } else {
                        if (cd.type.asCQL3Type().equals(CQL3Type.Native.TEXT)
                                && !(fieldValue instanceof String)) {
                            // opaque singleton object serialized to JSON text 
                            fieldValue = stringify(fieldValue);
                        }
                    }
                }

                map.put(field, serializeType(request.index(), cfName, cd.type, field, fieldValue, mapper));
            } catch (Exception e) {
                logger.error("[{}].[{}] failed to parse field {}={}", e, request.index(), cfName, field,
                        fieldValue);
                throw e;
            }
        }
    }

    String query;
    ByteBuffer[] values;
    if (request.autoGeneratedId() || request.opType() == OpType.CREATE) {
        boolean checkUniqueId = Booleans.parseBoolean(request.checkUniqueId(),
                (request.autoGeneratedId()) ? false : true);
        values = new ByteBuffer[map.size()];
        query = buildInsertQuery(keyspaceName, cfName, map, id, checkUniqueId,
                (request.ttl() != null) ? request.ttl().getSeconds() : null, // ttl
                timestamp, values, 0);
        final boolean applied = processConditional(request.consistencyLevel().toCassandraConsistencyLevel(),
                (checkUniqueId) ? ConsistencyLevel.LOCAL_SERIAL : null, query, values);
        if (!applied)
            throw new DocumentAlreadyExistsException(indexShard.shardId(), cfName, request.id());
    } else {
        values = new ByteBuffer[metadata.partitionKeyColumns().size() + metadata.clusteringColumns().size()];
        int i = 0;
        for (ColumnDefinition cd : metadata.partitionKeyColumns())
            values[i++] = map.get(cd.name.toString());
        for (ColumnDefinition cd : metadata.clusteringColumns())
            values[i++] = map.get(cd.name.toString());

        query = String.format((Locale) null, "DELETE FROM \"%s\".\"%s\" WHERE %s %s", keyspaceName, cfName,
                metadata.getCqlFragments().pkWhere,
                (timestamp != null) ? "USING TIMESTAMP " + Long.toString(timestamp * 1000 - 1) : "");
        process(request.consistencyLevel().toCassandraConsistencyLevel(), query, values);

        values = new ByteBuffer[map.size()];
        query = buildInsertQuery(keyspaceName, cfName, map, id, false,
                (request.ttl() != null) ? request.ttl().getSeconds() : null, timestamp, values, 0);
        process(request.consistencyLevel().toCassandraConsistencyLevel(), null, query, values);
    }
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public static ConsistencyLevel consistencyLevelFromString(String value) {
    switch (value.toUpperCase(Locale.ROOT)) {
    case "ANY":
        return ConsistencyLevel.ANY;
    case "ONE":
        return ConsistencyLevel.ONE;
    case "TWO":
        return ConsistencyLevel.TWO;
    case "THREE":
        return ConsistencyLevel.THREE;
    case "QUORUM":
        return ConsistencyLevel.QUORUM;
    case "ALL":
        return ConsistencyLevel.ALL;
    case "LOCAL_QUORUM":
        return ConsistencyLevel.LOCAL_QUORUM;
    case "EACH_QUORUM":
        return ConsistencyLevel.EACH_QUORUM;
    case "SERIAL":
        return ConsistencyLevel.SERIAL;
    case "LOCAL_SERIAL":
        return ConsistencyLevel.LOCAL_SERIAL;
    case "LOCAL_ONE":
        return ConsistencyLevel.LOCAL_ONE;
    default://from w w w.  j  ava2s.  co  m
        throw new IllegalArgumentException("No write consistency match [" + value + "]");
    }
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public String insertRow(final String ksName, final String cfName, Map<String, Object> map, String id,
        final boolean ifNotExists, final long ttl, final ConsistencyLevel cl, Long writetime, Boolean applied)
        throws Exception {

    CFMetaData metadata = getCFMetaData(ksName, cfName);
    // if the provided columns does not contains all the primary key columns, parse the _id to populate the columns in map.
    boolean buildId = true;
    ArrayNode array = SchemaService.Utils.jsonMapper.createArrayNode();
    for (ColumnDefinition cd : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) {
        if (map.keySet().contains(cd.name.toString())) {
            SchemaService.Utils.addToJsonArray(cd.type, map.get(cd.name.toString()), array);
        } else {/*from  w  w w.java  2s  . c  om*/
            buildId = false;
            parseElasticId(ksName, cfName, id, map);
        }
    }
    if (buildId) {
        id = SchemaService.Utils.writeValueAsString(array);
    }

    StringBuilder questionsMarks = new StringBuilder();
    StringBuilder columnNames = new StringBuilder();
    Object[] values = new Object[map.size()];
    int i = 0;
    for (Entry<String, Object> entry : map.entrySet()) {
        if (entry.getKey().equals("_token"))
            continue;
        if (columnNames.length() > 0) {
            columnNames.append(',');
            questionsMarks.append(',');
        }
        columnNames.append("\"").append(entry.getKey()).append("\"");
        questionsMarks.append('?');
        values[i++] = entry.getValue();
    }

    StringBuilder query = new StringBuilder();
    query.append("INSERT INTO \"").append(ksName).append("\".\"").append(cfName).append("\" (")
            .append(columnNames.toString()).append(") VALUES (").append(questionsMarks.toString()).append(") ");
    if (ifNotExists)
        query.append("IF NOT EXISTS ");
    if (ttl > 0 || writetime > 0)
        query.append("USING ");
    if (ttl > 0)
        query.append("TTL ").append(Long.toString(ttl));
    if (ttl > 0 && writetime > 0)
        query.append(" AND ");
    if (writetime > 0)
        query.append("TIMESTAMP ").append(Long.toString(writetime));

    try {
        UntypedResultSet result = process(cl, (ifNotExists) ? ConsistencyLevel.LOCAL_SERIAL : null,
                query.toString(), values);
        if (ifNotExists) {
            if (!result.isEmpty()) {
                Row row = result.one();
                if (row.has("[applied]")) {
                    applied = row.getBoolean("[applied]");
                }
            }
        } else {
            applied = true;
        }
        return id;
    } catch (Exception e) {
        logger.error("Failed to process query=" + query + " values=" + Arrays.toString(values), e);
        throw e;
    }
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public void initializeMetaDataAsComment() {
    MetaData metadata = state().metaData();
    try {/*from   w w  w .  j  a  v a 2 s . c o  m*/
        String metaDataString = MetaData.builder().toXContent(metadata, persistedParams);
        // initialize a first row if needed
        UntypedResultSet result = process(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL,
                String.format(
                        "INSERT INTO \"%s\".\"%s\" (dc,owner,version,metadata) VALUES (?,?,?,?) IF NOT EXISTS",
                        ELASTIC_ADMIN_KEYSPACE, metaDataTableName),
                DatabaseDescriptor.getLocalDataCenter(),
                UUID.fromString(StorageService.instance.getLocalHostId()), metadata.version(), metaDataString);

        Row row = result.one();
        boolean applied = false;
        if (row.has("[applied]")) {
            applied = row.getBoolean("[applied]");
        }
        if (applied) {
            logger.debug("Succefully initialize metadata metaData={}", metadata);
            writeMetaDataAsComment(metaDataString);
        }
    } catch (Exception e) {
        logger.error("Failed to initialize persisted metadata", e);
    }
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public void persistMetaData(MetaData oldMetaData, MetaData newMetaData, String source)
        throws IOException, InvalidRequestException, RequestExecutionException, RequestValidationException {
    if (!newMetaData.uuid().equals(localNode().id())) {
        logger.error("should not push metadata updated from another node {}/{}", newMetaData.uuid(),
                newMetaData.version());//  ww  w .  ja va  2s  . com
        return;
    }
    if (newMetaData.uuid().equals(state().metaData().uuid())
            && newMetaData.version() < state().metaData().version()) {
        logger.warn("don't push obsolete metadata uuid={} version {} < {}", newMetaData.uuid(),
                newMetaData.version(), state().metaData().version());
        return;
    }
    String metaDataString = MetaData.builder().toXContent(newMetaData,
            InternalCassandraClusterService.persistedParams);
    UntypedResultSet result = process(ConsistencyLevel.LOCAL_ONE, ConsistencyLevel.LOCAL_SERIAL,
            updateMetaDataQuery,
            new Object[] { UUID.fromString(localNode().id()), newMetaData.version(), metaDataString,
                    DatabaseDescriptor.getLocalDataCenter(), UUID.fromString(oldMetaData.uuid()),
                    oldMetaData.version() });
    Row row = result.one();
    boolean applied = false;
    if (row.has("[applied]")) {
        applied = row.getBoolean("[applied]");
    }
    if (applied) {
        logger.debug("Succefully update metadata source={} newMetaData={}", source, metaDataString);
        writeMetaDataAsComment(metaDataString);
        return;
    } else {
        logger.warn("Failed to update metadata source={} oldMetadata={}/{} currentMetaData={}/{}", source,
                oldMetaData.uuid(), oldMetaData.version(), row.getUUID("owner"), row.getLong("version"));
        throw new ConcurrentMetaDataUpdateException(row.getUUID("owner"), row.getLong("version"));
    }
}

From source file:org.elasticsearch.cluster.ClusterService.java

License:Apache License

@SuppressForbidden(reason = "toUpperCase() for consistency level")
public static ConsistencyLevel consistencyLevelFromString(String value) {
    switch (value.toUpperCase()) {
    case "ANY":
        return ConsistencyLevel.ANY;
    case "ONE":
        return ConsistencyLevel.ONE;
    case "TWO":
        return ConsistencyLevel.TWO;
    case "THREE":
        return ConsistencyLevel.THREE;
    case "QUORUM":
        return ConsistencyLevel.QUORUM;
    case "ALL":
        return ConsistencyLevel.ALL;
    case "LOCAL_QUORUM":
        return ConsistencyLevel.LOCAL_QUORUM;
    case "EACH_QUORUM":
        return ConsistencyLevel.EACH_QUORUM;
    case "SERIAL":
        return ConsistencyLevel.SERIAL;
    case "LOCAL_SERIAL":
        return ConsistencyLevel.LOCAL_SERIAL;
    case "LOCAL_ONE":
        return ConsistencyLevel.LOCAL_ONE;
    default://from  w  w  w  .j a va  2 s .  c  om
        throw new IllegalArgumentException("No write consistency match [" + value + "]");
    }
}