Example usage for com.google.common.collect Sets newLinkedHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newLinkedHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newLinkedHashSetWithExpectedSize.

Prototype

public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a LinkedHashSet instance, with a high enough "initial capacity" that it should hold expectedSize elements without growth.

Usage

From source file:org.terasology.world.propagation.StandardBatchPropagator.java

private void processIncrease() {
    int depth = 0;
    while (depth < rules.getMaxValue() - 1) {
        byte value = (byte) (rules.getMaxValue() - depth);
        Set<Vector3i> toProcess = increaseQueues[depth];
        if (!toProcess.isEmpty()) {
            increaseQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size());

            for (Vector3i pos : toProcess) {
                push(pos, value);/*  w  w  w  .  j  av a  2s  . c  om*/
            }
            if (increaseQueues[depth].isEmpty()) {
                depth++;
            }
        } else {
            depth++;
        }
    }
}

From source file:com.google.gerrit.server.git.GroupCollector.java

public void visit(RevCommit c) {
    checkState(!done, "visit() called after getGroups()");
    Set<RevCommit> interestingParents = getInterestingParents(c);

    if (interestingParents.size() == 0) {
        // All parents are uninteresting: treat this commit as the root of a new
        // group of related changes.
        groups.put(c, c.name());/*from w ww . j a v a  2 s  .co m*/
        return;
    } else if (interestingParents.size() == 1) {
        // Only one parent is new in this push. If it is the only parent, just use
        // that parent's group. If there are multiple parents, perhaps this commit
        // is a merge of a side branch. This commit belongs in that parent's group
        // in that case.
        groups.putAll(c, groups.get(interestingParents.iterator().next()));
        return;
    }

    // Multiple parents, merging at least two branches containing new commits in
    // this push.
    Set<String> thisCommitGroups = new TreeSet<>();
    Set<String> parentGroupsNewInThisPush = Sets.newLinkedHashSetWithExpectedSize(interestingParents.size());
    for (RevCommit p : interestingParents) {
        Collection<String> parentGroups = groups.get(p);
        if (parentGroups.isEmpty()) {
            throw new IllegalStateException(
                    String.format("no group assigned to parent %s of commit %s", p.name(), c.name()));
        }

        for (String parentGroup : parentGroups) {
            if (isGroupFromExistingPatchSet(p, parentGroup)) {
                // This parent's group is from an existing patch set, i.e. the parent
                // not new in this push. Use this group for the commit.
                thisCommitGroups.add(parentGroup);
            } else {
                // This parent's group is new in this push.
                parentGroupsNewInThisPush.add(parentGroup);
            }
        }
    }

    Iterable<String> toAlias;
    if (thisCommitGroups.isEmpty()) {
        // All parent groups were new in this push. Pick the first one and alias
        // other parents' groups to this first parent.
        String firstParentGroup = parentGroupsNewInThisPush.iterator().next();
        thisCommitGroups = ImmutableSet.of(firstParentGroup);
        toAlias = Iterables.skip(parentGroupsNewInThisPush, 1);
    } else {
        // For each parent group that was new in this push, alias it to the actual
        // computed group(s) for this commit.
        toAlias = parentGroupsNewInThisPush;
    }
    groups.putAll(c, thisCommitGroups);
    for (String pg : toAlias) {
        groupAliases.putAll(pg, thisCommitGroups);
    }
}

From source file:org.apache.phoenix.expression.InListExpression.java

@Override
public void readFields(DataInput input) throws IOException {
    super.readFields(input);
    input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release
    fixedWidth = WritableUtils.readVInt(input);
    byte[] valuesBytes = Bytes.readByteArray(input);
    valuesByteLength = valuesBytes.length;
    int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth;
    // TODO: consider using a regular HashSet as we never serialize from the server-side
    values = Sets.newLinkedHashSetWithExpectedSize(len);
    int offset = 0;
    int i = 0;//w  w w.  j a v  a  2 s  .c o m
    if (i < len) {
        offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
        while (++i < len - 1) {
            offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
        }
        if (i < len) {
            offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
        } else {
            maxValue = minValue;
        }
    } else {
        minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
    }
}

From source file:org.terasology.module.ResolutionAttempt.java

/**
 * Taking the already constrained moduleVersionPool, works through the remaining possibilities restricting down to the latest possible versions.
 * <p>/*from www. j av a  2  s . com*/
 * Root modules are restricted first and in order, to keep their versions as recent as possible.
 * Dependencies are then followed, restricted them to latest as needed.
 * As dependencies are followed, any modules that aren't required by the finally selected versions will not be present in the final result.
 * </p>
 *
 * @return The final set of compatible modules.
 */
private Set<Module> finaliseModules() {
    Set<Module> finalModuleSet = Sets.newLinkedHashSetWithExpectedSize(moduleVersionPool.keySet().size());
    Deque<Module> moduleQueue = Queues.newArrayDeque();
    for (Name rootModule : rootModules) {
        Version latestVersion = reduceToFinalVersion(rootModule, true).get();
        Module module = registry.getModule(rootModule, latestVersion);
        finalModuleSet.add(module);
        moduleQueue.push(module);
    }

    while (!moduleQueue.isEmpty()) {
        Module module = moduleQueue.pop();
        for (DependencyInfo dependency : module.getMetadata().getDependencies()) {
            Optional<Version> latestVersion = reduceToFinalVersion(dependency.getId(),
                    optionalStrategy.isDesired());
            if (latestVersion.isPresent()) {
                Module dependencyModule = registry.getModule(dependency.getId(), latestVersion.get());
                if (finalModuleSet.add(dependencyModule)) {
                    moduleQueue.push(dependencyModule);
                }
            }
        }
    }
    return finalModuleSet;
}

From source file:org.eclipse.elk.alg.layered.networksimplex.NetworkSimplex.java

/**
 * Helper method for the network simplex layerer. It instantiates all necessary attributes for
 * the execution of the network simplex layerer and initializes them with their default values.
 * All edges in the connected component given by the input argument will be determined, as well
 * as the number of incoming and outgoing edges of each node ( {@code inDegree}, respectively
 * {@code outDegree}). All sinks and source nodes in the connected component identified in this
 * step will be added to {@code sinks}, respectively {@code sources}.
 * /*www.  j  av  a2s. c  o m*/
 */
private void initialize() {
    // initialize node attributes
    int numNodes = graph.nodes.size();
    for (NNode n : graph.nodes) {
        n.treeNode = false;
    }
    poID = new int[numNodes];
    lowestPoID = new int[numNodes];
    sources = Lists.newArrayList();

    // determine edges and re-index nodes
    int index = 0;
    List<NEdge> theEdges = Lists.newArrayList();
    for (NNode node : graph.nodes) {
        node.id = index++;
        // add node to sinks, resp. sources
        if (node.getIncomingEdges().size() == 0) {
            sources.add(node);
        }
        theEdges.addAll(node.getOutgoingEdges());
    }
    // re-index edges
    int counter = 0;
    for (NEdge edge : theEdges) {
        edge.id = counter++;
        edge.treeEdge = false;
    }
    // initialize edge attributes
    int numEdges = theEdges.size();
    if (cutvalue == null || cutvalue.length < numEdges) {
        cutvalue = new double[numEdges];
        edgeVisited = new boolean[numEdges];
    } else {
        Arrays.fill(edgeVisited, false);
    }
    edges = theEdges;
    // we iterate over this set, thus we have to use a linked hash set 
    // to get a deterministic iteration order
    treeEdges = Sets.newLinkedHashSetWithExpectedSize(edges.size());
    postOrder = 1;
}

From source file:org.eclipse.xtext.resource.impl.ResourceDescriptionsData.java

@SuppressWarnings("unchecked")
protected void registerDescription(IResourceDescription description, Map<QualifiedName, Object> target) {
    for (IEObjectDescription object : description.getExportedObjects()) {
        QualifiedName lowerCase = object.getName().toLowerCase();
        Object existing = target.put(lowerCase, description);
        if (existing != null && existing != description) {
            Set<IResourceDescription> set = null;
            if (existing instanceof IResourceDescription) {
                set = Sets.newLinkedHashSetWithExpectedSize(2);
                set.add((IResourceDescription) existing);
            } else {
                set = (Set<IResourceDescription>) existing;
            }/*from  w w w  .j a va  2 s  .  c  o  m*/
            set.add(description);
            target.put(lowerCase, set);
        }
    }
}

From source file:com.google.gerrit.server.git.GroupCollector.java

private Set<RevCommit> getInterestingParents(RevCommit commit) {
    Set<RevCommit> result = Sets.newLinkedHashSetWithExpectedSize(commit.getParentCount());
    for (RevCommit p : commit.getParents()) {
        if (!p.has(UNINTERESTING)) {
            result.add(p);/* ww  w  . j a v a 2  s . c  om*/
        }
    }
    return result;
}

From source file:com.cinchapi.common.reflect.Reflection.java

/**
 * Return a collection containing the type arguments for the provided
 * {@code field}. If there are no type arguments, the collection that is
 * returned is empty.//from w  w  w .  j ava2s  .c  om
 * 
 * @param field
 * @return the type arguments
 */
@Nonnull
public static Collection<Class<?>> getTypeArguments(Field field) {
    try {
        ParameterizedType parameterized = (ParameterizedType) field.getGenericType();
        Type[] types = parameterized.getActualTypeArguments();
        Set<Class<?>> typeArgs = Sets.newLinkedHashSetWithExpectedSize(types.length);
        for (Type type : types) {
            typeArgs.add((Class<?>) type);
        }
        return typeArgs;
    } catch (ClassCastException e) {
        return ImmutableSet.of();
    }
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

private IndexMaintainer(PTable dataTable, PTable index, PhoenixConnection connection) {
    this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null);
    this.rowKeyOrderOptimizable = index.rowKeyOrderOptimizable();
    this.isMultiTenant = dataTable.isMultiTenant();
    this.viewIndexId = index.getViewIndexId() == null ? null
            : MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId());
    this.isLocalIndex = index.getIndexType() == IndexType.LOCAL;

    byte[] indexTableName = index.getPhysicalName().getBytes();
    // Use this for the nDataSaltBuckets as we need this for local indexes
    // TODO: persist nDataSaltBuckets separately, but maintain b/w compat.
    Integer nIndexSaltBuckets = isLocalIndex ? dataTable.getBucketNum() : index.getBucketNum();
    boolean indexWALDisabled = index.isWALDisabled();
    int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (this.isMultiTenant ? 1 : 0)
            + (this.viewIndexId == null ? 0 : 1);
    //        int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0;
    int nIndexColumns = index.getColumns().size() - indexPosOffset;
    int nIndexPKColumns = index.getPKColumns().size() - indexPosOffset;
    // number of expressions that are indexed that are not present in the row key of the data table
    int indexedExpressionCount = 0;
    for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
        PColumn indexColumn = index.getPKColumns().get(i);
        String indexColumnName = indexColumn.getName().getString();
        String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColumnName);
        String dataColumnName = IndexUtil.getDataColumnName(indexColumnName);
        try {/* w ww .j  av  a 2s  .c om*/
            PColumn dataColumn = dataFamilyName.equals("") ? dataTable.getColumn(dataColumnName)
                    : dataTable.getColumnFamily(dataFamilyName).getColumn(dataColumnName);
            if (SchemaUtil.isPKColumn(dataColumn))
                continue;
        } catch (ColumnNotFoundException e) {
            // This column must be an expression
        } catch (Exception e) {
            throw new IllegalArgumentException(e);
        }
        indexedExpressionCount++;
    }
    int indexPkColumnCount = this.dataRowKeySchema.getFieldCount() + indexedExpressionCount
            - (this.isDataTableSalted ? 1 : 0) - (this.isMultiTenant ? 1 : 0);
    this.rowKeyMetaData = newRowKeyMetaData(indexPkColumnCount);
    BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();

    int dataPosOffset = (isDataTableSalted ? 1 : 0) + (this.isMultiTenant ? 1 : 0);
    int nDataPKColumns = dataRowKeySchema.getFieldCount() - dataPosOffset;
    // For indexes on views, we need to remember which data columns are "constants"
    // These are the values in a VIEW where clause. For these, we don't put them in the
    // index, as they're the same for every row in the index.
    if (dataTable.getType() == PTableType.VIEW) {
        List<PColumn> dataPKColumns = dataTable.getPKColumns();
        for (int i = dataPosOffset; i < dataPKColumns.size(); i++) {
            PColumn dataPKColumn = dataPKColumns.get(i);
            if (dataPKColumn.getViewConstant() != null) {
                bitSet.set(i);
                nDataPKColumns--;
            }
        }
    }
    this.indexTableName = indexTableName;
    this.indexedColumnTypes = Lists.<PDataType>newArrayListWithExpectedSize(nIndexPKColumns - nDataPKColumns);
    this.indexedExpressions = Lists.newArrayListWithExpectedSize(nIndexPKColumns - nDataPKColumns);
    this.coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexColumns - nIndexPKColumns);
    this.nIndexSaltBuckets = nIndexSaltBuckets == null ? 0 : nIndexSaltBuckets;
    this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(dataTable);
    this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(index);
    this.nDataCFs = dataTable.getColumnFamilies().size();
    this.indexWALDisabled = indexWALDisabled;
    // TODO: check whether index is immutable or not. Currently it's always false so checking
    // data table is with immutable rows or not.
    this.immutableRows = dataTable.isImmutableRows();
    int indexColByteSize = 0;
    ColumnResolver resolver = null;
    List<ParseNode> parseNodes = new ArrayList<ParseNode>(1);
    UDFParseNodeVisitor visitor = new UDFParseNodeVisitor();
    for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
        PColumn indexColumn = index.getPKColumns().get(i);
        String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn);
        try {
            ParseNode parseNode = SQLParser.parseCondition(expressionStr);
            parseNode.accept(visitor);
            parseNodes.add(parseNode);
        } catch (SQLException e) {
            throw new RuntimeException(e);
        }
    }
    try {
        resolver = FromCompiler.getResolver(connection, new TableRef(dataTable), visitor.getUdfParseNodes());
    } catch (SQLException e) {
        throw new RuntimeException(e); // Impossible
    }
    StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver);
    IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
    for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) {
        PColumn indexColumn = index.getPKColumns().get(i);
        int indexPos = i - indexPosOffset;
        Expression expression = null;
        try {
            expressionIndexCompiler.reset();
            expression = parseNodes.get(indexPos).accept(expressionIndexCompiler);
        } catch (SQLException e) {
            throw new RuntimeException(e); // Impossible
        }
        if (expressionIndexCompiler.getColumnRef() != null) {
            // get the column of the data table that corresponds to this index column
            PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
            boolean isPKColumn = SchemaUtil.isPKColumn(column);
            if (isPKColumn) {
                int dataPkPos = dataTable.getPKColumns().indexOf(column)
                        - (dataTable.getBucketNum() == null ? 0 : 1) - (this.isMultiTenant ? 1 : 0);
                this.rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos);
            } else {
                indexColByteSize += column.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(column)
                        : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
                this.indexedExpressions.add(expression);
            }
        } else {
            indexColByteSize += expression.getDataType().isFixedWidth()
                    ? SchemaUtil.getFixedByteSize(expression)
                    : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE;
            this.indexedExpressions.add(expression);
        }
        // set the sort order of the expression correctly
        if (indexColumn.getSortOrder() == SortOrder.DESC) {
            this.rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos);
        }
    }
    this.estimatedExpressionSize = expressionIndexCompiler.getTotalNodeCount() * ESTIMATED_EXPRESSION_SIZE;
    for (int i = 0; i < index.getColumnFamilies().size(); i++) {
        PColumnFamily family = index.getColumnFamilies().get(i);
        for (PColumn indexColumn : family.getColumns()) {
            PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
            this.coveredColumns
                    .add(new ColumnReference(column.getFamilyName().getBytes(), column.getName().getBytes()));
        }
    }
    this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize);
    initCachedState();
}

From source file:org.apache.phoenix.compile.UpsertCompiler.java

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
            QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;/*w  ww  . ja  v  a  2 s.co m*/
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    // Retry once if auto commit is off, as the meta data may
    // be out of date. We do not retry if auto commit is on, as we
    // update the cache up front when we create the resolver in that case.
    boolean retryOnce = !connection.getAutoCommit();
    boolean useServerTimestampToBe = false;
    while (true) {
        try {
            resolver = FromCompiler.getResolverForMutation(upsert, connection);
            tableRefToBe = resolver.getTables().get(0);
            table = tableRefToBe.getTable();
            if (table.getType() == PTableType.VIEW) {
                if (table.getViewType().isReadOnly()) {
                    throw new ReadOnlyTableException(schemaName, tableName);
                }
            }
            boolean isSalted = table.getBucketNum() != null;
            isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
            isSharedViewIndex = table.getViewIndexId() != null;
            tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
            int posOffset = isSalted ? 1 : 0;
            // Setup array of column indexes parallel to values that are going to be set
            allColumnsToBe = table.getColumns();

            nColumnsToSet = 0;
            if (table.getViewType() == ViewType.UPDATABLE) {
                addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
                for (PColumn column : allColumnsToBe) {
                    if (column.getViewConstant() != null) {
                        addViewColumnsToBe.add(column);
                    }
                }
            }
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            // Allow full row upsert if no columns or only dynamic ones are specified and values count match
            if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
                nColumnsToSet = allColumnsToBe.size() - posOffset;
                columnIndexesToBe = new int[nColumnsToSet];
                pkSlotIndexesToBe = new int[columnIndexesToBe.length];
                targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
                targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
                int minPKPos = 0;
                if (isTenantSpecific) {
                    PColumn tenantColumn = table.getPKColumns().get(minPKPos);
                    columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
                    targetColumns.set(minPKPos, tenantColumn);
                    minPKPos++;
                }
                if (isSharedViewIndex) {
                    PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
                    columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
                    targetColumns.set(minPKPos, indexIdColumn);
                    minPKPos++;
                }
                for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
                    PColumn column = allColumnsToBe.get(i);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                        if (j++ < minPKPos) { // Skip, as it's already been set above
                            continue;
                        }
                        minPKPos = 0;
                    }
                    columnIndexesToBe[i - posOffset + minPKPos] = i;
                    targetColumns.set(i - posOffset + minPKPos, column);
                }
                if (!addViewColumnsToBe.isEmpty()) {
                    // All view columns overlap in this case
                    overlapViewColumnsToBe = addViewColumnsToBe;
                    addViewColumnsToBe = Collections.emptySet();
                }
            } else {
                // Size for worse case
                int numColsInUpsert = columnNodes.size();
                nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0)
                        + +(isSharedViewIndex ? 1 : 0);
                columnIndexesToBe = new int[nColumnsToSet];
                pkSlotIndexesToBe = new int[columnIndexesToBe.length];
                targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
                targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
                Arrays.fill(columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
                Arrays.fill(pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
                BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
                int i = 0;
                // Add tenant column directly, as we don't want to resolve it as this will fail
                if (isTenantSpecific) {
                    PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
                    columnIndexesToBe[i] = tenantColumn.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
                    targetColumns.set(i, tenantColumn);
                    i++;
                }
                if (isSharedViewIndex) {
                    PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
                    columnIndexesToBe[i] = indexIdColumn.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
                    targetColumns.set(i, indexIdColumn);
                    i++;
                }
                for (ColumnName colName : columnNodes) {
                    ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(),
                            colName.getColumnName());
                    PColumn column = ref.getColumn();
                    if (IndexUtil.getViewConstantValue(column, ptr)) {
                        if (overlapViewColumnsToBe.isEmpty()) {
                            overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                        }
                        nColumnsToSet--;
                        overlapViewColumnsToBe.add(column);
                        addViewColumnsToBe.remove(column);
                    }
                    columnIndexesToBe[i] = ref.getColumnPosition();
                    targetColumns.set(i, column);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
                    }
                    i++;
                }
                for (PColumn column : addViewColumnsToBe) {
                    columnIndexesToBe[i] = column.getPosition();
                    targetColumns.set(i, column);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column));
                    }
                    i++;
                }
                // If a table has rowtimestamp col, then we always set it.
                useServerTimestampToBe = table.getRowTimestampColPos() != -1
                        && !isRowTimestampSet(pkSlotIndexesToBe, table);
                if (useServerTimestampToBe) {
                    PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
                    // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
                    columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
                    pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
                    columnIndexesToBe[i] = rowTimestampCol.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = table.getRowTimestampColPos());
                    targetColumns.add(rowTimestampCol);
                    if (valueNodes != null && !valueNodes.isEmpty()) {
                        valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
                    }
                    nColumnsToSet++;
                }
                for (i = posOffset; i < table.getPKColumns().size(); i++) {
                    PColumn pkCol = table.getPKColumns().get(i);
                    if (!pkColumnsSet.get(i)) {
                        if (!pkCol.isNullable()) {
                            throw new ConstraintViolationException(table.getName().getString() + "."
                                    + pkCol.getName().getString() + " may not be null");
                        }
                    }
                }
            }
            boolean isAutoCommit = connection.getAutoCommit();
            if (valueNodes == null) {
                SelectStatement select = upsert.getSelect();
                assert (select != null);
                select = SubselectRewriter.flatten(select, connection);
                ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection);
                select = StatementNormalizer.normalize(select, selectResolver);
                select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe,
                        useServerTimestampToBe);
                SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver,
                        connection);
                if (transformedSelect != select) {
                    selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection);
                    select = StatementNormalizer.normalize(transformedSelect, selectResolver);
                }
                sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
                tableRefToBe = adjustTimestampToMinOfSameTable(tableRefToBe, selectResolver.getTables());
                /* We can run the upsert in a coprocessor if:
                 * 1) from has only 1 table and the into table matches from table
                 * 2) the select query isn't doing aggregation (which requires a client-side final merge)
                 * 3) autoCommit is on
                 * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
                 *    puts for index tables.
                 * 5) no limit clause, as the limit clause requires client-side post processing
                 * 6) no sequences, as sequences imply that the order of upsert must match the order of
                 *    selection.
                 * Otherwise, run the query to pull the data from the server
                 * and populate the MutationState (upto a limit).
                */
                if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null
                        || select.hasSequence())) {
                    // We can pipeline the upsert select instead of spooling everything to disk first,
                    // if we don't have any post processing that's required.
                    parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe,
                            useServerTimestampToBe);
                    // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
                    // so we might be able to run it entirely on the server side.
                    // For a table with row timestamp column, we can't guarantee that the row key will reside in the
                    // region space managed by region servers. So we bail out on executing on server side.
                    runOnServer = sameTable && isAutoCommit
                            && !(table.isImmutableRows() && !table.getIndexes().isEmpty())
                            && table.getRowTimestampColPos() == -1;
                }
                // If we may be able to run on the server, add a hint that favors using the data table
                // if all else is equal.
                // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
                // as this would disallow running on the server. We currently use the row projector we
                // get back to figure this out.
                HintNode hint = upsert.getHint();
                if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                    hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
                }
                select = SelectStatement.create(select, hint);
                // Pass scan through if same table in upsert and select so that projection is computed correctly
                // Use optimizer to choose the best plan
                try {
                    QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns,
                            parallelIteratorFactoryToBe, new SequenceManager(statement), false);
                    queryPlanToBe = compiler.compile();
                    // This is post-fix: if the tableRef is a projected table, this means there are post-processing 
                    // steps and parallelIteratorFactory did not take effect.
                    if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED
                            || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
                        parallelIteratorFactoryToBe = null;
                    }
                } catch (MetaDataEntityNotFoundException e) {
                    retryOnce = false; // don't retry if select clause has meta data entities that aren't found, as we already updated the cache
                    throw e;
                }
                nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
                // Cannot auto commit if doing aggregation or topN or salted
                // Salted causes problems because the row may end up living on a different region
            } else {
                nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0)
                        + (isSharedViewIndex ? 1 : 0);
            }
            // Resize down to allow a subset of columns to be specifiable
            if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
                nColumnsToSet = nValuesToSet;
                columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
                pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
            }

            if (nValuesToSet != nColumnsToSet) {
                // We might have added columns, so refresh cache and try again if stale.
                // Note that this check is not really sufficient, as a column could have
                // been removed and the added back and we wouldn't detect that here.
                if (retryOnce) {
                    retryOnce = false;
                    if (new MetaDataClient(connection).updateCache(schemaName, tableName).wasUpdated()) {
                        continue;
                    }
                }
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
                        .setMessage(
                                "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet)
                        .build().buildException();
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Catch column/column family not found exception, as our meta data may
            // be out of sync. Update the cache once and retry if we were out of sync.
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                if (new MetaDataClient(connection).updateCache(schemaName, tableName).wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }

    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns,
                parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
        runOnServer &= queryPlanToBe.getTableRef().equals(tableRefToBe);
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException(
                "For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side (maybe)
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists
                    .newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
             * Order projected columns and projected expressions with PK columns
             * leading order by slot position
             */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // Last PK column may be fixed width and nullable
                    // We don't want to insert a null expression b/c
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(),
                            Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing, be conservative and don't run this server side.
            // If the row ends up living in a different region, we'll get an error otherwise.
            for (int i = 0; i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                Expression source = projectedExpressions.get(i);
                if (source == null || !source
                        .equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
                    // TODO: we could check the region boundaries to see if the pk will still be in it.
                    runOnServer = false; // bail on running server side, since PK may be changing
                    break;
                }
            }

            ////////////////////////////////////////////////////////////////////
            // UPSERT SELECT run server-side
            /////////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists
                        .newArrayListWithExpectedSize(projectedExpressions.size());
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
                }
                // Build table from projectedColumns
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns);

                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                final RowProjector aggProjector = ProjectionCompiler.compile(queryPlan.getContext(), select,
                        GroupBy.EMPTY_GROUP_BY);
                /*
                 * Transfer over PTable representing subset of columns selected, but all PK columns.
                 * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                 * Transfer over List<Expression> for projection.
                 * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                 * Create the PRow and get the mutations, adding them to the batch
                 */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE,
                        UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS,
                        UngroupedAggregateRegionObserver.serialize(projectedExpressions));

                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, aggProjector, null,
                        OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                return new MutationPlan() {

                    @Override
                    public PhoenixConnection getConnection() {
                        return connection;
                    }

                    @Override
                    public ParameterMetaData getParameterMetaData() {
                        return queryPlan.getContext().getBindManager().getParameterMetaData();
                    }

                    @Override
                    public StatementContext getContext() {
                        return queryPlan.getContext();
                    }

                    @Override
                    public MutationState execute() throws SQLException {
                        ImmutableBytesWritable ptr = context.getTempPtr();
                        tableRef.getTable().getIndexMaintainers(ptr, context.getConnection());
                        ServerCache cache = null;
                        try {
                            if (ptr.getLength() > 0) {
                                IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection,
                                        tableRef);
                                cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                byte[] uuidValue = cache.getId();
                                scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            }
                            ResultIterator iterator = aggPlan.iterator();
                            try {
                                Tuple row = iterator.next();
                                final long mutationCount = (Long) aggProjector.getColumnProjector(0)
                                        .getValue(row, PLong.INSTANCE, ptr);
                                return new MutationState(maxSize, connection) {
                                    @Override
                                    public long getUpdateCount() {
                                        return mutationCount;
                                    }
                                };
                            } finally {
                                iterator.close();
                            }
                        } finally {
                            if (cache != null) {
                                cache.close();
                            }
                        }
                    }

                    @Override
                    public ExplainPlan getExplainPlan() throws SQLException {
                        List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                        List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                        planSteps.add("UPSERT ROWS");
                        planSteps.addAll(queryPlanSteps);
                        return new ExplainPlan(planSteps);
                    }
                };
            }
        }
        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run client-side
        /////////////////////////////////////////////////////////////////////
        return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
                return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
                return queryPlan.getContext().getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
                return queryPlan.getContext();
            }

            @Override
            public MutationState execute() throws SQLException {
                ResultIterator iterator = queryPlan.iterator();
                if (parallelIteratorFactory == null) {
                    return upsertSelect(new StatementContext(statement), tableRef, projector, iterator,
                            columnIndexes, pkSlotIndexes, useServerTimestamp);
                }
                try {
                    parallelIteratorFactory.setRowProjector(projector);
                    parallelIteratorFactory.setColumnIndexes(columnIndexes);
                    parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
                    Tuple tuple;
                    long totalRowCount = 0;
                    StatementContext context = queryPlan.getContext();
                    while ((tuple = iterator.next()) != null) {// Runs query
                        Cell kv = tuple.getValue(0);
                        totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(),
                                kv.getValueOffset(), SortOrder.getDefault());
                    }
                    // Return total number of rows that have been updated. In the case of auto commit being off
                    // the mutations will all be in the mutation state of the current connection.
                    MutationState mutationState = new MutationState(maxSize, statement.getConnection(),
                            totalRowCount);
                    /*
                     *  All the metrics collected for measuring the reads done by the parallel mutating iterators
                     *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
                     *  returned mutation state so they can be published on commit. 
                     */
                    mutationState.setReadMetricQueue(context.getReadMetricsQueue());
                    return mutationState;
                } finally {
                    iterator.close();
                }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
                List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
                List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                planSteps.add("UPSERT SELECT");
                planSteps.addAll(queryPlanSteps);
                return new ExplainPlan(planSteps);
            }

        };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null,
                tenantId);
    }
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }

    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(),
            new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // First build all the expressions, as with sequences we want to collect them all first
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build()
                    .buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(),
                    "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
            return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return context.getBindManager().getParameterMetaData();
        }

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public MutationState execute() throws SQLException {
            ImmutableBytesWritable ptr = context.getTempPtr();
            final SequenceManager sequenceManager = context.getSequenceManager();
            // Next evaluate all the expressions
            int nodeIndex = nodeIndexOffset;
            PTable table = tableRef.getTable();
            Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null
                    : sequenceManager.newSequenceTuple(null);
            for (Expression constantExpression : constantExpressions) {
                PColumn column = allColumns.get(columnIndexes[nodeIndex]);
                constantExpression.evaluate(tuple, ptr);
                Object value = null;
                if (constantExpression.getDataType() != null) {
                    value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(),
                            constantExpression.getMaxLength(), constantExpression.getScale());
                    if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
                        throw TypeMismatchException.newException(constantExpression.getDataType(),
                                column.getDataType(),
                                "expression: " + constantExpression.toString() + " in column " + column);
                    }
                    if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(),
                            constantExpression.getMaxLength(), constantExpression.getScale(),
                            column.getMaxLength(), column.getScale())) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                                .setColumnName(column.getName().getString())
                                .setMessage("value=" + constantExpression.toString()).build().buildException();
                    }
                }
                column.getDataType().coerceBytes(ptr, value, constantExpression.getDataType(),
                        constantExpression.getMaxLength(), constantExpression.getScale(),
                        constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(),
                        column.getSortOrder(), table.rowKeyOrderOptimizable());
                if (overlapViewColumns.contains(column)
                        && Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(),
                                column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN)
                            .setColumnName(column.getName().getString())
                            .setMessage("value=" + constantExpression.toString()).build().buildException();
                }
                values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                nodeIndex++;
            }
            // Add columns based on view
            for (PColumn column : addViewColumns) {
                if (IndexUtil.getViewConstantValue(column, ptr)) {
                    values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                } else {
                    throw new IllegalStateException();
                }
            }
            Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(1);
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp);
            return new MutationState(tableRef, mutation, 0, maxSize, connection);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
            if (context.getSequenceManager().getSequenceCount() > 0) {
                planSteps.add(
                        "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES");
            }
            planSteps.add("PUT SINGLE ROW");
            return new ExplainPlan(planSteps);
        }

    };
}