Example usage for org.apache.cassandra.cql3 UntypedResultSet isEmpty

List of usage examples for org.apache.cassandra.cql3 UntypedResultSet isEmpty

Introduction

In this page you can find the example usage for org.apache.cassandra.cql3 UntypedResultSet isEmpty.

Prototype

public boolean isEmpty() 

Source Link

Usage

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public boolean processConditional(final ConsistencyLevel cl, final ConsistencyLevel serialCl,
        final String query, Object... values)
        throws RequestExecutionException, RequestValidationException, InvalidRequestException {
    try {//from  w ww . j  a v  a2s  . c om
        UntypedResultSet result = process(cl, serialCl, query, values);
        if (serialCl != null) {
            if (!result.isEmpty()) {
                Row row = result.one();
                if (row.has("[applied]")) {
                    return row.getBoolean("[applied]");
                }
            }
            return false;
        }
        return true;
    } catch (Exception e) {
        logger.error("Failed to process query=" + query + " values=" + Arrays.toString(values), e);
        throw e;
    }
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

public Engine.GetResult fetchSourceInternal(final String ksName, String index, String type, String id)
        throws IOException {
    DocPrimaryKey docPk = parseElasticId(index, type, id);
    String[] columns = mappedColumns(index, type, docPk.isStaticDocument, true);
    UntypedResultSet result = fetchRowInternal(ksName, index, type, docPk, columns);
    if (!result.isEmpty()) {
        Map<String, Object> sourceMap = rowAsMap(index, type, result.one());
        BytesReference source = XContentFactory.contentBuilder(XContentType.JSON).map(sourceMap).bytes();
        Long timestamp = 0L;/*  w w w . ja  v a2 s. c  om*/
        if (sourceMap.get(TimestampFieldMapper.NAME) != null) {
            timestamp = (Long) sourceMap.get(TimestampFieldMapper.NAME);
        }
        Long ttl = 0L;
        if (sourceMap.get(TTLFieldMapper.NAME) != null) {
            ttl = (Long) sourceMap.get(TTLFieldMapper.NAME);
        }
        Translog.Source transloSource = new Translog.Source(source,
                (String) sourceMap.get(RoutingFieldMapper.NAME), (String) sourceMap.get(ParentFieldMapper.NAME),
                timestamp, ttl);
        return new Engine.GetResult(true, 1L, transloSource);

    }
    return new Engine.GetResult(false, -1, null);
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

/**
 * Load percolator queries./*from  w  w  w . j a va  2  s .  co  m*/
 * @param indexService
 * @return
 */
@Override
public Map<BytesRef, Query> loadQueries(final IndexService indexService, PercolatorQueriesRegistry percolator) {
    final String ksName = indexService.indexSettings().get(IndexMetaData.SETTING_KEYSPACE,
            indexService.index().name());
    final String cql = String.format((Locale) null, "SELECT \"_id\", query FROM \"%s\".\"%s\"", ksName,
            PERCOLATOR_TABLE);
    UntypedResultSet results = QueryProcessor.executeInternal(cql);
    Map<BytesRef, Query> queries = new HashMap<BytesRef, Query>();
    if (!results.isEmpty()) {
        for (Row row : results) {
            String query = row.getString("query");
            String id = row.getString("_id");
            try {
                // id is only used for logging, if we fail we log the id in the catch statement
                /*
                final Query parseQuery = percolator.parsePercolatorDocument(null, new BytesArray(query.getBytes("UTF-8")));
                if (parseQuery != null) {
                queries.put(new BytesRef(id), parseQuery);
                } else {
                logger.warn("failed to add query [{}] - parser returned null", id);
                }
                */
            } catch (Exception e) {
                logger.warn("failed to add query [{}]", e, id);
            }
        }
    }
    return queries;
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

/**
 * Should only be used after a SCHEMA change.
 * @throws IOException //from  www.  j a  v a 2s .co m
 */
@Override
public MetaData readMetaDataAsComment() throws NoPersistedMetaDataException {
    try {
        String query = String.format((Locale) null,
                "SELECT comment FROM system.schema_columnfamilies WHERE keyspace_name='%s' AND columnfamily_name='%s'",
                this.elasticAdminKeyspaceName, ELASTIC_ADMIN_METADATA_TABLE);
        UntypedResultSet result = QueryProcessor.executeInternal(query);
        if (result.isEmpty())
            throw new NoPersistedMetaDataException("Failed to read comment from " + elasticAdminKeyspaceName
                    + "+" + ELASTIC_ADMIN_METADATA_TABLE);

        String metadataString = result.one().getString(0);
        logger.debug("Recover metadata from {}.{} = {}", elasticAdminKeyspaceName, ELASTIC_ADMIN_METADATA_TABLE,
                metadataString);
        return parseMetaDataString(metadataString);
    } catch (RequestValidationException | RequestExecutionException e) {
        throw new NoPersistedMetaDataException(
                "Failed to read comment from " + elasticAdminKeyspaceName + "+" + ELASTIC_ADMIN_METADATA_TABLE,
                e);
    }
}

From source file:org.elassandra.cluster.InternalCassandraClusterService.java

License:Apache License

/**
 * Create or update elastic_admin keyspace.
 * @throws IOException //from ww w  .ja  va 2  s . co  m
 */
@Override
public void createOrUpdateElasticAdminKeyspace() {
    UntypedResultSet result = QueryProcessor.executeInternal(String.format((Locale) null,
            "SELECT strategy_class,strategy_options  FROM system.schema_keyspaces WHERE keyspace_name='%s'",
            elasticAdminKeyspaceName));
    logger.info(" elasticAdminMetadata exist={}", !result.isEmpty());
    if (result.isEmpty()) {
        MetaData metadata = state().metaData();
        try {
            String metaDataString = MetaData.Builder.toXContent(metadata);

            JSONObject replication = new JSONObject();
            replication.put("class", NetworkTopologyStrategy.class.getName());
            replication.put(DatabaseDescriptor.getLocalDataCenter(),
                    Integer.toString(getLocalDataCenterSize()));

            String createKeyspace = String.format((Locale) null,
                    "CREATE KEYSPACE IF NOT EXISTS \"%s\" WITH replication = %s;", elasticAdminKeyspaceName,
                    replication.toJSONString().replaceAll("\"", "'"));
            logger.info(createKeyspace);
            process(ConsistencyLevel.LOCAL_ONE, createKeyspace);

            String createTable = String.format((Locale) null,
                    "CREATE TABLE IF NOT EXISTS \"%s\".%s ( cluster_name text PRIMARY KEY, owner uuid, version bigint, metadata text) WITH comment='%s';",
                    elasticAdminKeyspaceName, ELASTIC_ADMIN_METADATA_TABLE,
                    MetaData.Builder.toXContent(metadata));
            logger.info(createTable);
            process(ConsistencyLevel.LOCAL_ONE, createTable);

            // initialize a first row if needed
            process(ConsistencyLevel.LOCAL_ONE, insertMetadataQuery, DatabaseDescriptor.getClusterName(),
                    UUID.fromString(StorageService.instance.getLocalHostId()), metadata.version(),
                    metaDataString);
            logger.info("Succefully initialize {}.{} = {}", elasticAdminKeyspaceName,
                    ELASTIC_ADMIN_METADATA_TABLE, metaDataString);
            writeMetaDataAsComment(metaDataString);
        } catch (Throwable e) {
            logger.error("Failed to initialize table {}.{}", e, elasticAdminKeyspaceName,
                    ELASTIC_ADMIN_METADATA_TABLE);
        }
    } else {
        Row row = result.one();
        if (!NetworkTopologyStrategy.class.getName().equals(row.getString("strategy_class"))) {
            throw new ConfigurationException("Keyspace [" + this.elasticAdminKeyspaceName + "] should use "
                    + NetworkTopologyStrategy.class.getName() + " replication strategy");
        }

        JSONObject replication;
        try {
            replication = (JSONObject) new JSONParser().parse(row.getString("strategy_options"));
            int currentRF = -1;
            if (replication.get(DatabaseDescriptor.getLocalDataCenter()) != null) {
                currentRF = Integer
                        .valueOf(replication.get(DatabaseDescriptor.getLocalDataCenter()).toString());
            }
            int targetRF = getLocalDataCenterSize();
            if (targetRF != currentRF) {
                replication.put(DatabaseDescriptor.getLocalDataCenter(), Integer.toString(targetRF));
                replication.put("class", NetworkTopologyStrategy.class.getName());
                try {
                    String query = String.format((Locale) null, "ALTER KEYSPACE \"%s\" WITH replication = %s",
                            elasticAdminKeyspaceName, replication.toJSONString().replaceAll("\"", "'"));
                    process(ConsistencyLevel.LOCAL_ONE, query);
                    logger.info(query);
                } catch (Throwable e) {
                    logger.error("Failed to alter keyspace [{}]", e, this.elasticAdminKeyspaceName);
                    throw e;
                }
            } else {
                logger.info("Keep unchanged keyspace={} datacenter={} RF={}", elasticAdminKeyspaceName,
                        DatabaseDescriptor.getLocalDataCenter(), targetRF);
            }
        } catch (ParseException e1) {
            throw new ConfigurationException("Failed to update " + elasticAdminKeyspaceName, e1);
        }

    }
}

From source file:org.elasticsearch.cassandra.cluster.InternalCassandraClusterService.java

License:Apache License

@Override
public String insertRow(final String ksName, final String cfName, Map<String, Object> map, String id,
        final boolean ifNotExists, final long ttl, final ConsistencyLevel cl, Long writetime, Boolean applied)
        throws Exception {

    CFMetaData metadata = getCFMetaData(ksName, cfName);
    // if the provided columns does not contains all the primary key columns, parse the _id to populate the columns in map.
    boolean buildId = true;
    ArrayNode array = SchemaService.Utils.jsonMapper.createArrayNode();
    for (ColumnDefinition cd : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) {
        if (map.keySet().contains(cd.name.toString())) {
            SchemaService.Utils.addToJsonArray(cd.type, map.get(cd.name.toString()), array);
        } else {/*ww w .  j a v  a 2  s  .  c om*/
            buildId = false;
            parseElasticId(ksName, cfName, id, map);
        }
    }
    if (buildId) {
        id = SchemaService.Utils.writeValueAsString(array);
    }

    StringBuilder questionsMarks = new StringBuilder();
    StringBuilder columnNames = new StringBuilder();
    Object[] values = new Object[map.size()];
    int i = 0;
    for (Entry<String, Object> entry : map.entrySet()) {
        if (entry.getKey().equals("_token"))
            continue;
        if (columnNames.length() > 0) {
            columnNames.append(',');
            questionsMarks.append(',');
        }
        columnNames.append("\"").append(entry.getKey()).append("\"");
        questionsMarks.append('?');
        values[i++] = entry.getValue();
    }

    StringBuilder query = new StringBuilder();
    query.append("INSERT INTO \"").append(ksName).append("\".\"").append(cfName).append("\" (")
            .append(columnNames.toString()).append(") VALUES (").append(questionsMarks.toString()).append(") ");
    if (ifNotExists)
        query.append("IF NOT EXISTS ");
    if (ttl > 0 || writetime > 0)
        query.append("USING ");
    if (ttl > 0)
        query.append("TTL ").append(Long.toString(ttl));
    if (ttl > 0 && writetime > 0)
        query.append(" AND ");
    if (writetime > 0)
        query.append("TIMESTAMP ").append(Long.toString(writetime));

    try {
        UntypedResultSet result = process(cl, (ifNotExists) ? ConsistencyLevel.LOCAL_SERIAL : null,
                query.toString(), values);
        if (ifNotExists) {
            if (!result.isEmpty()) {
                Row row = result.one();
                if (row.has("[applied]")) {
                    applied = row.getBoolean("[applied]");
                }
            }
        } else {
            applied = true;
        }
        return id;
    } catch (Exception e) {
        logger.error("Failed to process query=" + query + " values=" + Arrays.toString(values), e);
        throw e;
    }
}

From source file:org.elasticsearch.index.get.ShardGetService.java

License:Apache License

private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version,
        VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
    fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);

    //Engine.GetResult get = null;
    if (type == null || type.equals("_all")) {
        try {/*from   w w w . jav  a2s . c om*/
            for (String typeX : mapperService.types()) {
                // search for the matching type (table)
                if (clusterService.rowExists(shardId.index().name(), typeX, id)) {
                    type = typeX;
                    break;
                }
            }
        } catch (RequestExecutionException | RequestValidationException | IOException e1) {
            throw new ElasticsearchException("Cannot fetch source type [" + type + "] and id [" + id + "]", e1);
        }
    }
    if (type == null || type.equals("_all")) {
        return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
    }

    DocumentMapper docMapper = mapperService.documentMapper(type);
    if (docMapper == null) {
        //get.release();
        return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
    }

    fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
    Set<String> columns = new HashSet<String>();
    if ((gFields != null) && (!fetchSourceContext.fetchSource())) {
        for (String field : gFields) {
            int i = field.indexOf('.');
            String colName = (i > 0) ? field.substring(0, i) : field;
            if (!columns.contains(colName))
                columns.add(colName);
        }
    } else {
        try {
            for (String s : clusterService.mappedColumns(mapperService.index().name(), new Uid(type, id)))
                columns.add(s);
        } catch (IOException e) {
            throw new ElasticsearchException("Cannot parse id for type [" + type + "] and id [" + id + "]", e);
        }
    }

    if (docMapper.parentFieldMapper().active()) {
        columns.add(ParentFieldMapper.NAME);
    }
    if (docMapper.timestampFieldMapper().enabled()) {
        columns.add(TimestampFieldMapper.NAME);
    }
    if (docMapper.TTLFieldMapper().enabled()) {
        columns.add(TTLFieldMapper.NAME);
    }
    if (docMapper.sourceMapper().enabled()) {
        columns.add(SourceFieldMapper.NAME);
    }

    Map<String, GetField> fields = null;
    /*
    SearchLookup searchLookup = null;
    try {
    // break between having loaded it from translog (so we only have _source), and having a document to load
    if (get.docIdAndVersion() != null) {
        return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, docMapper, ignoreErrorsOnGeneratedFields);
    } else {
        Translog.Source source = get.source();
            
        Map<String, GetField> fields = null;
        SearchLookup searchLookup = null;
            
        // we can only load scripts that can run against the source
        Set<String> neededFields = new HashSet<>();
        // add meta fields
        neededFields.add(RoutingFieldMapper.NAME);
        if (docMapper.parentFieldMapper().active()) {
            neededFields.add(ParentFieldMapper.NAME);
        }
        if (docMapper.timestampFieldMapper().enabled()) {
            neededFields.add(TimestampFieldMapper.NAME);
        }
        if (docMapper.TTLFieldMapper().enabled()) {
            neededFields.add(TTLFieldMapper.NAME);
        }
        // add requested fields
        if (gFields != null) {
            neededFields.addAll(Arrays.asList(gFields));
        }
        for (String field : neededFields) {
            if (SourceFieldMapper.NAME.equals(field)) {
                // dealt with when normalizing fetchSourceContext.
                continue;
            }
            Object value = null;
            if (field.equals(RoutingFieldMapper.NAME)) {
                value = source.routing;
            } else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active()) {
                value = source.parent;
            } else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().enabled()) {
                value = source.timestamp;
            } else if (field.equals(TTLFieldMapper.NAME) && docMapper.TTLFieldMapper().enabled()) {
                // Call value for search with timestamp + ttl here to display the live remaining ttl value and be consistent with the search result display
                if (source.ttl > 0) {
                    value = docMapper.TTLFieldMapper().valueForSearch(source.timestamp + source.ttl);
                }
            } else {
                if (searchLookup == null) {
                    searchLookup = new SearchLookup(mapperService, null, new String[]{type});
                    searchLookup.source().setSource(source.source);
                }
            
                FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
                if (fieldMapper == null) {
                    if (docMapper.objectMappers().get(field) != null) {
                        // Only fail if we know it is a object field, missing paths / fields shouldn't fail.
                        throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
                    }
                } else if (shouldGetFromSource(ignoreErrorsOnGeneratedFields, docMapper, fieldMapper)) {
                    List<Object> values = searchLookup.source().extractRawValues(field);
                    if (!values.isEmpty()) {
                        for (int i = 0; i < values.size(); i++) {
                            values.set(i, fieldMapper.fieldType().valueForSearch(values.get(i)));
                        }
                        value = values;
                    }
            
                }
            }
            if (value != null) {
                if (fields == null) {
                    fields = newHashMapWithExpectedSize(2);
                }
                if (value instanceof List) {
                    fields.put(field, new GetField(field, (List) value));
                } else {
                    fields.put(field, new GetField(field, Collections.singletonList(value)));
                }
            }
        }
    */
    // deal with source, but only if it's enabled (we always have it from the translog)
    Map<String, Object> sourceAsMap = null;
    BytesReference sourceToBeReturned = null;
    SourceFieldMapper sourceFieldMapper = docMapper.sourceMapper();

    // In elassandra, Engine does not store the source any more, but fetch it from cassandra.
    try {
        UntypedResultSet result = clusterService.fetchRow(
                clusterService.state().metaData().index(shardId.index().name()).keyspace(),
                shardId.index().name(), type, id, columns.toArray(new String[columns.size()]));
        if (result.isEmpty()) {
            return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
        }
        sourceAsMap = clusterService.rowAsMap(shardId.index().name(), type, result.one());
        if (fetchSourceContext.fetchSource()) {
            sourceToBeReturned = clusterService.source(docMapper, sourceAsMap, shardId.index().name(), type,
                    id);
        }
    } catch (RequestExecutionException | RequestValidationException | IOException e1) {
        throw new ElasticsearchException("Cannot fetch source type [" + type + "] and id [" + id + "]", e1);
    }

    if (gFields != null && gFields.length > 0) {
        fields = new HashMap<String, GetField>();
        clusterService.flattenGetField(gFields, "", sourceAsMap, fields);
    }

    if (fetchSourceContext.fetchSource() && sourceFieldMapper.enabled()) {
        // Cater for source excludes/includes at the cost of performance
        // We must first apply the field mapper filtering to make sure we get correct results
        // in the case that the fetchSourceContext white lists something that's not included by the field mapper

        boolean sourceFieldFiltering = sourceFieldMapper.includes().length > 0
                || sourceFieldMapper.excludes().length > 0;
        boolean sourceFetchFiltering = fetchSourceContext.includes().length > 0
                || fetchSourceContext.excludes().length > 0;
        if (fetchSourceContext.transformSource() || sourceFieldFiltering || sourceFetchFiltering) {
            // TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
            XContentType sourceContentType = XContentType.JSON;
            if (fetchSourceContext.transformSource()) {
                sourceAsMap = docMapper.transformSourceAsMap(sourceAsMap);
            }
            if (sourceFieldFiltering) {
                sourceAsMap = XContentMapValues.filter(sourceAsMap, sourceFieldMapper.includes(),
                        sourceFieldMapper.excludes());
            }
            if (sourceFetchFiltering) {
                sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(),
                        fetchSourceContext.excludes());
            }
            try {
                sourceToBeReturned = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
            } catch (IOException e) {
                throw new ElasticsearchException(
                        "Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
            }
        }
    }

    return new GetResult(shardId.index().name(), type, id, 1L, true, sourceToBeReturned, fields);
}

From source file:org.elasticsearch.search.fetch.FetchPhase.java

License:Apache License

private void loadStoredFields(SearchContext searchContext, LeafReaderContext readerContext,
        FieldsVisitor fieldVisitor, int docId) {
    fieldVisitor.reset();/*from  w ww  .jav  a 2  s .c o m*/
    try {
        readerContext.reader().document(docId, fieldVisitor);
    } catch (IOException e) {
        throw new FetchPhaseExecutionException(searchContext, "Failed to fetch doc id [" + docId + "]", e);
    }

    // load field from cassandra
    if (!(fieldVisitor instanceof JustUidFieldsVisitor)) {
        try {
            DocPrimaryKey docPk = clusterService.parseElasticId(searchContext.request().index(),
                    fieldVisitor.uid().type(), fieldVisitor.uid().id());
            String typeKey = fieldVisitor.uid().type();
            if (docPk.isStaticDocument)
                typeKey += "_static";

            String cqlQuery = searchContext.getCqlFetchQuery(typeKey);
            if (cqlQuery == null) {
                Set<String> requiredColumns = fieldVisitor.requiredColumns(clusterService, searchContext);
                if (requiredColumns.size() > 0) {
                    IndexMetaData indexMetaData = clusterService.state().metaData()
                            .index(searchContext.request().index());
                    if (requiredColumns.contains(NodeFieldMapper.NAME)) {
                        searchContext.includeNode(indexMetaData.getSettings().getAsBoolean(
                                IndexMetaData.SETTING_INCLUDE_NODE_ID,
                                clusterService.settings().getAsBoolean(
                                        InternalCassandraClusterService.SETTING_CLUSTER_DEFAULT_INCLUDE_NODE_ID,
                                        false)));
                        requiredColumns.remove(NodeFieldMapper.NAME);
                    }
                    if (fieldVisitor.loadSource() && searchContext.mapperService()
                            .documentMapper(fieldVisitor.uid().type()).sourceMapper().enabled()) {
                        requiredColumns.add(SourceFieldMapper.NAME);
                    }
                    if (requiredColumns.size() > 0) {
                        cqlQuery = clusterService.buildFetchQuery(indexMetaData.keyspace(),
                                searchContext.request().index(), fieldVisitor.uid().type(),
                                requiredColumns.toArray(new String[requiredColumns.size()]),
                                docPk.isStaticDocument);
                        searchContext.putFetchQuery(typeKey, cqlQuery);
                    }
                }
            }

            if (cqlQuery != null) {
                UntypedResultSet result = QueryProcessor.executeInternal(cqlQuery, docPk.values);
                if (!result.isEmpty()) {
                    Map<String, Object> mapObject = clusterService.rowAsMap(searchContext.request().index(),
                            fieldVisitor.uid().type(), result.one());
                    if (searchContext.includeNode()) {
                        mapObject.put(NodeFieldMapper.NAME, clusterService.state().nodes().localNodeId());
                    }
                    if (fieldVisitor.requestedFields() != null && fieldVisitor.requestedFields().size() > 0) {
                        Map<String, List<Object>> flatMap = new HashMap<String, List<Object>>();
                        clusterService.flattenTree(fieldVisitor.requestedFields(), "", mapObject, flatMap);
                        for (String field : fieldVisitor.requestedFields()) {
                            if (flatMap.get(field) != null && field != IdFieldMapper.NAME)
                                fieldVisitor.setValues(field, flatMap.get(field));
                        }
                    }
                    if (fieldVisitor.loadSource()) {
                        fieldVisitor.source(clusterService.source(
                                searchContext.mapperService().documentMapper(fieldVisitor.uid().type()),
                                mapObject, searchContext.request().index(), fieldVisitor.uid()));
                    }
                }
            } else {
                // when only requesting for field _node
                if (searchContext.includeNode()) {
                    List<Object> values = new ArrayList<Object>(1);
                    values.add(clusterService.state().nodes().localNodeId());
                    fieldVisitor.setValues(NodeFieldMapper.NAME, values);
                }
            }
        } catch (Exception e) {
            Logger.getLogger(FetchPhase.class).error("Fetch failed id=" + fieldVisitor.uid().id(), e);
            throw new FetchPhaseExecutionException(searchContext,
                    "Failed to fetch doc id [" + fieldVisitor.uid().id() + "] from cassandra", e);
        }
    }
}