Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:org.summer.dsl.xbase.typesystem.internal.AbstractLinkingCandidate.java

protected LightweightTypeReference deferredBindTypeArgument(ITypeExpectation expectation,
        LightweightTypeReference type) {
    LightweightTypeReference expectedType = expectation.getExpectedType();
    if (expectedType != null) {
        ExpectationTypeParameterHintCollector collector = new ExpectationTypeParameterHintCollector(
                state.getReferenceOwner()) {
            @Override// w  ww .j  av a2s . c  o  m
            protected UnboundTypeReferenceTraverser createUnboundTypeReferenceTraverser() {
                return new UnboundTypeParameterHintCollector() {
                    Set<Object> seenParameters = Sets.newHashSetWithExpectedSize(3);

                    @Override
                    protected void doVisitTypeReference(LightweightTypeReference reference,
                            UnboundTypeReference declaration) {
                        if (declaration.internalIsResolved()
                                || getOwner().isResolved(declaration.getHandle())) {
                            declaration.tryResolve();
                            outerVisit(declaration, reference, declaration, getExpectedVariance(),
                                    getActualVariance());
                        } else if (reference.isValidHint()) {
                            addHint(declaration, reference);
                            if (seenParameters.add(declaration.getHandle())) {
                                /*
                                 * If we add hints like CharIterable extends Iterable<Character> 
                                 * for a type parameter V in <T, V extends Iterable<T>>, we want to
                                 * add a hint for the type parameter T, too.
                                 */
                                List<LightweightBoundTypeArgument> hints = getState().getResolvedTypes()
                                        .getHints(declaration.getHandle());
                                for (int i = 0; i < hints.size(); i++) {
                                    LightweightBoundTypeArgument hint = hints.get(i);
                                    if (hint.getSource() == BoundTypeArgumentSource.CONSTRAINT) {
                                        outerVisit(hint.getTypeReference(), reference);
                                    }
                                }
                            }
                        }
                    }

                    @Override
                    protected void doVisitUnboundTypeReference(UnboundTypeReference reference,
                            UnboundTypeReference declaration) {
                        super.doVisitTypeReference(reference, declaration);
                    }
                };
            }
        };
        collector.processPairedReferences(expectedType, type);
    }
    return type;
}

From source file:com.google.devtools.build.android.ResourceShrinker.java

private void keepPossiblyReferencedResources() {
    if (!mFoundGetIdentifier || mStrings == null) {
        // No calls to android.content.res.Resources#getIdentifier; no need
        // to worry about string references to resources
        return;/*from  ww  w.  ja  v  a 2s .  c o  m*/
    }
    List<String> strings = new ArrayList<String>(mStrings);
    Collections.sort(strings);
    logger.fine(String.format("android.content.res.Resources#getIdentifier present: %s", mFoundGetIdentifier));
    logger.fine("Referenced Strings:");
    for (String s : strings) {
        s = s.trim().replace("\n", "\\n");
        if (s.length() > 40) {
            s = s.substring(0, 37) + "...";
        } else if (s.isEmpty()) {
            continue;
        }
        logger.fine("  " + s);
    }

    Set<String> names = Sets.newHashSetWithExpectedSize(50);
    for (Map<String, Resource> map : typeToName.values()) {
        names.addAll(map.keySet());
    }
    for (String string : mStrings) {
        // Check whether the string looks relevant
        // We consider three types of strings:
        //  (1) simple resource names, e.g. "foo" from @layout/foo
        //      These might be the parameter to a getIdentifier() call, or could
        //      be composed into a fully qualified resource name for the getIdentifier()
        //      method. We match these for *all* resource types.
        //  (2) Relative source names, e.g. layout/foo, from @layout/foo
        //      These might be composed into a fully qualified resource name for
        //      getIdentifier().
        //  (3) Fully qualified resource names of the form package:type/name.
        int n = string.length();
        boolean justName = true;
        boolean haveSlash = false;
        for (int i = 0; i < n; i++) {
            char c = string.charAt(i);
            if (c == '/') {
                haveSlash = true;
                justName = false;
            } else if (c == '.' || c == ':') {
                justName = false;
            } else if (!Character.isJavaIdentifierPart(c)) {
                // This shouldn't happen; we've filtered out these strings in
                // the {@link #referencedString} method
                assert false : string;
                break;
            }
        }
        String name;
        if (justName) {
            // Check name (below)
            name = string;
        } else if (!haveSlash) {
            // If we have more than just a symbol name, we expect to also see a slash
            //noinspection UnnecessaryContinue
            continue;
        } else {
            // Try to pick out the resource name pieces; if we can find the
            // resource type unambiguously; if not, just match on names
            int slash = string.indexOf('/');
            assert slash != -1; // checked with haveSlash above
            name = string.substring(slash + 1);
            if (name.isEmpty() || !names.contains(name)) {
                continue;
            }
            // See if have a known specific resource type
            if (slash > 0) {
                int colon = string.indexOf(':');
                String typeName = string.substring(colon != -1 ? colon + 1 : 0, slash);
                ResourceType type = ResourceType.getEnum(typeName);
                if (type == null) {
                    continue;
                }
                Resource resource = getResource(type, name);
                if (resource != null) {
                    logger.fine("Marking " + resource + " used because it " + "matches string pool constant "
                            + string);
                }
                markReachable(resource);
                continue;
            }
            // fall through and check the name
        }
        if (names.contains(name)) {
            for (Map<String, Resource> map : typeToName.values()) {
                Resource resource = map.get(string);
                if (resource != null) {
                    logger.fine("Marking " + resource + " used because it " + "matches string pool constant "
                            + string);
                }
                markReachable(resource);
            }
        } else if (Character.isDigit(name.charAt(0))) {
            // Just a number? There are cases where it calls getIdentifier by
            // a String number; see for example SuggestionsAdapter in the support
            // library which reports supporting a string like "2130837524" and
            // "android.resource://com.android.alarmclock/2130837524".
            try {
                int id = Integer.parseInt(name);
                if (id != 0) {
                    markReachable(valueToResource.get(id));
                }
            } catch (NumberFormatException e) {
                // pass
            }
        }
    }
}

From source file:com.android.tools.lint.checks.ResourceCycleDetector.java

private void findCycles(@NonNull Context context, @NonNull ResourceType type,
        @NonNull Multimap<String, String> map) {
    Set<String> visiting = Sets.newHashSetWithExpectedSize(map.size());
    Set<String> seen = Sets.newHashSetWithExpectedSize(map.size());
    for (String from : map.keySet()) {
        if (seen.contains(from)) {
            continue;
        }/*from  ww  w  .j ava 2 s .  c  o  m*/
        List<String> chain = dfs(map, from, visiting);
        if (chain != null && chain.size() > 2) { // size 1 chains are handled directly
            seen.addAll(chain);
            Collections.reverse(chain);
            if (mChains == null) {
                mChains = Maps.newEnumMap(ResourceType.class);
                mLocations = Maps.newEnumMap(ResourceType.class);
                context.getDriver().requestRepeat(this, Scope.RESOURCE_FILE_SCOPE);
            }
            List<List<String>> list = mChains.get(type);
            if (list == null) {
                list = Lists.newArrayList();
                mChains.put(type, list);
            }
            list.add(chain);
        }
    }
}

From source file:com.google.gerrit.server.project.ChangeControl.java

static <T extends ChangePermissionOrLabel> Set<T> newSet(Collection<T> permSet) {
    if (permSet instanceof EnumSet) {
        @SuppressWarnings({ "unchecked", "rawtypes" })
        Set<T> s = ((EnumSet) permSet).clone();
        s.clear();/*w  w w  .j  a v  a 2 s  .  c o  m*/
        return s;
    }
    return Sets.newHashSetWithExpectedSize(permSet.size());
}

From source file:org.apache.phoenix.compile.UpsertCompiler.java

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
            QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;//from www. j  a va2s  . c o m
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    // Retry once if auto commit is off, as the meta data may
    // be out of date. We do not retry if auto commit is on, as we
    // update the cache up front when we create the resolver in that case.
    boolean retryOnce = !connection.getAutoCommit();
    boolean useServerTimestampToBe = false;
    while (true) {
        try {
            resolver = FromCompiler.getResolverForMutation(upsert, connection);
            tableRefToBe = resolver.getTables().get(0);
            table = tableRefToBe.getTable();
            if (table.getType() == PTableType.VIEW) {
                if (table.getViewType().isReadOnly()) {
                    throw new ReadOnlyTableException(schemaName, tableName);
                }
            }
            boolean isSalted = table.getBucketNum() != null;
            isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
            isSharedViewIndex = table.getViewIndexId() != null;
            tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
            int posOffset = isSalted ? 1 : 0;
            // Setup array of column indexes parallel to values that are going to be set
            allColumnsToBe = table.getColumns();

            nColumnsToSet = 0;
            if (table.getViewType() == ViewType.UPDATABLE) {
                addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
                for (PColumn column : allColumnsToBe) {
                    if (column.getViewConstant() != null) {
                        addViewColumnsToBe.add(column);
                    }
                }
            }
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            // Allow full row upsert if no columns or only dynamic ones are specified and values count match
            if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
                nColumnsToSet = allColumnsToBe.size() - posOffset;
                columnIndexesToBe = new int[nColumnsToSet];
                pkSlotIndexesToBe = new int[columnIndexesToBe.length];
                targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
                targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
                int minPKPos = 0;
                if (isTenantSpecific) {
                    PColumn tenantColumn = table.getPKColumns().get(minPKPos);
                    columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
                    targetColumns.set(minPKPos, tenantColumn);
                    minPKPos++;
                }
                if (isSharedViewIndex) {
                    PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
                    columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
                    targetColumns.set(minPKPos, indexIdColumn);
                    minPKPos++;
                }
                for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
                    PColumn column = allColumnsToBe.get(i);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                        if (j++ < minPKPos) { // Skip, as it's already been set above
                            continue;
                        }
                        minPKPos = 0;
                    }
                    columnIndexesToBe[i - posOffset + minPKPos] = i;
                    targetColumns.set(i - posOffset + minPKPos, column);
                }
                if (!addViewColumnsToBe.isEmpty()) {
                    // All view columns overlap in this case
                    overlapViewColumnsToBe = addViewColumnsToBe;
                    addViewColumnsToBe = Collections.emptySet();
                }
            } else {
                // Size for worse case
                int numColsInUpsert = columnNodes.size();
                nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0)
                        + +(isSharedViewIndex ? 1 : 0);
                columnIndexesToBe = new int[nColumnsToSet];
                pkSlotIndexesToBe = new int[columnIndexesToBe.length];
                targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
                targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
                Arrays.fill(columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
                Arrays.fill(pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
                BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
                int i = 0;
                // Add tenant column directly, as we don't want to resolve it as this will fail
                if (isTenantSpecific) {
                    PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
                    columnIndexesToBe[i] = tenantColumn.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
                    targetColumns.set(i, tenantColumn);
                    i++;
                }
                if (isSharedViewIndex) {
                    PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
                    columnIndexesToBe[i] = indexIdColumn.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
                    targetColumns.set(i, indexIdColumn);
                    i++;
                }
                for (ColumnName colName : columnNodes) {
                    ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(),
                            colName.getColumnName());
                    PColumn column = ref.getColumn();
                    if (IndexUtil.getViewConstantValue(column, ptr)) {
                        if (overlapViewColumnsToBe.isEmpty()) {
                            overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                        }
                        nColumnsToSet--;
                        overlapViewColumnsToBe.add(column);
                        addViewColumnsToBe.remove(column);
                    }
                    columnIndexesToBe[i] = ref.getColumnPosition();
                    targetColumns.set(i, column);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
                    }
                    i++;
                }
                for (PColumn column : addViewColumnsToBe) {
                    columnIndexesToBe[i] = column.getPosition();
                    targetColumns.set(i, column);
                    if (SchemaUtil.isPKColumn(column)) {
                        pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column));
                    }
                    i++;
                }
                // If a table has rowtimestamp col, then we always set it.
                useServerTimestampToBe = table.getRowTimestampColPos() != -1
                        && !isRowTimestampSet(pkSlotIndexesToBe, table);
                if (useServerTimestampToBe) {
                    PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
                    // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
                    columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
                    pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
                    columnIndexesToBe[i] = rowTimestampCol.getPosition();
                    pkColumnsSet.set(pkSlotIndexesToBe[i] = table.getRowTimestampColPos());
                    targetColumns.add(rowTimestampCol);
                    if (valueNodes != null && !valueNodes.isEmpty()) {
                        valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
                    }
                    nColumnsToSet++;
                }
                for (i = posOffset; i < table.getPKColumns().size(); i++) {
                    PColumn pkCol = table.getPKColumns().get(i);
                    if (!pkColumnsSet.get(i)) {
                        if (!pkCol.isNullable()) {
                            throw new ConstraintViolationException(table.getName().getString() + "."
                                    + pkCol.getName().getString() + " may not be null");
                        }
                    }
                }
            }
            boolean isAutoCommit = connection.getAutoCommit();
            if (valueNodes == null) {
                SelectStatement select = upsert.getSelect();
                assert (select != null);
                select = SubselectRewriter.flatten(select, connection);
                ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection);
                select = StatementNormalizer.normalize(select, selectResolver);
                select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe,
                        useServerTimestampToBe);
                SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver,
                        connection);
                if (transformedSelect != select) {
                    selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection);
                    select = StatementNormalizer.normalize(transformedSelect, selectResolver);
                }
                sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
                tableRefToBe = adjustTimestampToMinOfSameTable(tableRefToBe, selectResolver.getTables());
                /* We can run the upsert in a coprocessor if:
                 * 1) from has only 1 table and the into table matches from table
                 * 2) the select query isn't doing aggregation (which requires a client-side final merge)
                 * 3) autoCommit is on
                 * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
                 *    puts for index tables.
                 * 5) no limit clause, as the limit clause requires client-side post processing
                 * 6) no sequences, as sequences imply that the order of upsert must match the order of
                 *    selection.
                 * Otherwise, run the query to pull the data from the server
                 * and populate the MutationState (upto a limit).
                */
                if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null
                        || select.hasSequence())) {
                    // We can pipeline the upsert select instead of spooling everything to disk first,
                    // if we don't have any post processing that's required.
                    parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe,
                            useServerTimestampToBe);
                    // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
                    // so we might be able to run it entirely on the server side.
                    // For a table with row timestamp column, we can't guarantee that the row key will reside in the
                    // region space managed by region servers. So we bail out on executing on server side.
                    runOnServer = sameTable && isAutoCommit
                            && !(table.isImmutableRows() && !table.getIndexes().isEmpty())
                            && table.getRowTimestampColPos() == -1;
                }
                // If we may be able to run on the server, add a hint that favors using the data table
                // if all else is equal.
                // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
                // as this would disallow running on the server. We currently use the row projector we
                // get back to figure this out.
                HintNode hint = upsert.getHint();
                if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                    hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
                }
                select = SelectStatement.create(select, hint);
                // Pass scan through if same table in upsert and select so that projection is computed correctly
                // Use optimizer to choose the best plan
                try {
                    QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns,
                            parallelIteratorFactoryToBe, new SequenceManager(statement), false);
                    queryPlanToBe = compiler.compile();
                    // This is post-fix: if the tableRef is a projected table, this means there are post-processing 
                    // steps and parallelIteratorFactory did not take effect.
                    if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED
                            || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
                        parallelIteratorFactoryToBe = null;
                    }
                } catch (MetaDataEntityNotFoundException e) {
                    retryOnce = false; // don't retry if select clause has meta data entities that aren't found, as we already updated the cache
                    throw e;
                }
                nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
                // Cannot auto commit if doing aggregation or topN or salted
                // Salted causes problems because the row may end up living on a different region
            } else {
                nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0)
                        + (isSharedViewIndex ? 1 : 0);
            }
            // Resize down to allow a subset of columns to be specifiable
            if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
                nColumnsToSet = nValuesToSet;
                columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
                pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
            }

            if (nValuesToSet != nColumnsToSet) {
                // We might have added columns, so refresh cache and try again if stale.
                // Note that this check is not really sufficient, as a column could have
                // been removed and the added back and we wouldn't detect that here.
                if (retryOnce) {
                    retryOnce = false;
                    if (new MetaDataClient(connection).updateCache(schemaName, tableName).wasUpdated()) {
                        continue;
                    }
                }
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
                        .setMessage(
                                "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet)
                        .build().buildException();
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Catch column/column family not found exception, as our meta data may
            // be out of sync. Update the cache once and retry if we were out of sync.
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                if (new MetaDataClient(connection).updateCache(schemaName, tableName).wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }

    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns,
                parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
        runOnServer &= queryPlanToBe.getTableRef().equals(tableRefToBe);
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException(
                "For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side (maybe)
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists
                    .newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
             * Order projected columns and projected expressions with PK columns
             * leading order by slot position
             */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // Last PK column may be fixed width and nullable
                    // We don't want to insert a null expression b/c
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(),
                            Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing, be conservative and don't run this server side.
            // If the row ends up living in a different region, we'll get an error otherwise.
            for (int i = 0; i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                Expression source = projectedExpressions.get(i);
                if (source == null || !source
                        .equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
                    // TODO: we could check the region boundaries to see if the pk will still be in it.
                    runOnServer = false; // bail on running server side, since PK may be changing
                    break;
                }
            }

            ////////////////////////////////////////////////////////////////////
            // UPSERT SELECT run server-side
            /////////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists
                        .newArrayListWithExpectedSize(projectedExpressions.size());
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
                }
                // Build table from projectedColumns
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns);

                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                final RowProjector aggProjector = ProjectionCompiler.compile(queryPlan.getContext(), select,
                        GroupBy.EMPTY_GROUP_BY);
                /*
                 * Transfer over PTable representing subset of columns selected, but all PK columns.
                 * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                 * Transfer over List<Expression> for projection.
                 * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                 * Create the PRow and get the mutations, adding them to the batch
                 */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE,
                        UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS,
                        UngroupedAggregateRegionObserver.serialize(projectedExpressions));

                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, aggProjector, null,
                        OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
                return new MutationPlan() {

                    @Override
                    public PhoenixConnection getConnection() {
                        return connection;
                    }

                    @Override
                    public ParameterMetaData getParameterMetaData() {
                        return queryPlan.getContext().getBindManager().getParameterMetaData();
                    }

                    @Override
                    public StatementContext getContext() {
                        return queryPlan.getContext();
                    }

                    @Override
                    public MutationState execute() throws SQLException {
                        ImmutableBytesWritable ptr = context.getTempPtr();
                        tableRef.getTable().getIndexMaintainers(ptr, context.getConnection());
                        ServerCache cache = null;
                        try {
                            if (ptr.getLength() > 0) {
                                IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection,
                                        tableRef);
                                cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                byte[] uuidValue = cache.getId();
                                scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            }
                            ResultIterator iterator = aggPlan.iterator();
                            try {
                                Tuple row = iterator.next();
                                final long mutationCount = (Long) aggProjector.getColumnProjector(0)
                                        .getValue(row, PLong.INSTANCE, ptr);
                                return new MutationState(maxSize, connection) {
                                    @Override
                                    public long getUpdateCount() {
                                        return mutationCount;
                                    }
                                };
                            } finally {
                                iterator.close();
                            }
                        } finally {
                            if (cache != null) {
                                cache.close();
                            }
                        }
                    }

                    @Override
                    public ExplainPlan getExplainPlan() throws SQLException {
                        List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                        List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                        planSteps.add("UPSERT ROWS");
                        planSteps.addAll(queryPlanSteps);
                        return new ExplainPlan(planSteps);
                    }
                };
            }
        }
        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run client-side
        /////////////////////////////////////////////////////////////////////
        return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
                return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
                return queryPlan.getContext().getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
                return queryPlan.getContext();
            }

            @Override
            public MutationState execute() throws SQLException {
                ResultIterator iterator = queryPlan.iterator();
                if (parallelIteratorFactory == null) {
                    return upsertSelect(new StatementContext(statement), tableRef, projector, iterator,
                            columnIndexes, pkSlotIndexes, useServerTimestamp);
                }
                try {
                    parallelIteratorFactory.setRowProjector(projector);
                    parallelIteratorFactory.setColumnIndexes(columnIndexes);
                    parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
                    Tuple tuple;
                    long totalRowCount = 0;
                    StatementContext context = queryPlan.getContext();
                    while ((tuple = iterator.next()) != null) {// Runs query
                        Cell kv = tuple.getValue(0);
                        totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(),
                                kv.getValueOffset(), SortOrder.getDefault());
                    }
                    // Return total number of rows that have been updated. In the case of auto commit being off
                    // the mutations will all be in the mutation state of the current connection.
                    MutationState mutationState = new MutationState(maxSize, statement.getConnection(),
                            totalRowCount);
                    /*
                     *  All the metrics collected for measuring the reads done by the parallel mutating iterators
                     *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
                     *  returned mutation state so they can be published on commit. 
                     */
                    mutationState.setReadMetricQueue(context.getReadMetricsQueue());
                    return mutationState;
                } finally {
                    iterator.close();
                }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
                List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
                List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                planSteps.add("UPSERT SELECT");
                planSteps.addAll(queryPlanSteps);
                return new ExplainPlan(planSteps);
            }

        };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null,
                tenantId);
    }
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }

    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(),
            new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // First build all the expressions, as with sequences we want to collect them all first
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build()
                    .buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(),
                    "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
            return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return context.getBindManager().getParameterMetaData();
        }

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public MutationState execute() throws SQLException {
            ImmutableBytesWritable ptr = context.getTempPtr();
            final SequenceManager sequenceManager = context.getSequenceManager();
            // Next evaluate all the expressions
            int nodeIndex = nodeIndexOffset;
            PTable table = tableRef.getTable();
            Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null
                    : sequenceManager.newSequenceTuple(null);
            for (Expression constantExpression : constantExpressions) {
                PColumn column = allColumns.get(columnIndexes[nodeIndex]);
                constantExpression.evaluate(tuple, ptr);
                Object value = null;
                if (constantExpression.getDataType() != null) {
                    value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(),
                            constantExpression.getMaxLength(), constantExpression.getScale());
                    if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
                        throw TypeMismatchException.newException(constantExpression.getDataType(),
                                column.getDataType(),
                                "expression: " + constantExpression.toString() + " in column " + column);
                    }
                    if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(),
                            constantExpression.getMaxLength(), constantExpression.getScale(),
                            column.getMaxLength(), column.getScale())) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                                .setColumnName(column.getName().getString())
                                .setMessage("value=" + constantExpression.toString()).build().buildException();
                    }
                }
                column.getDataType().coerceBytes(ptr, value, constantExpression.getDataType(),
                        constantExpression.getMaxLength(), constantExpression.getScale(),
                        constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(),
                        column.getSortOrder(), table.rowKeyOrderOptimizable());
                if (overlapViewColumns.contains(column)
                        && Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(),
                                column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN)
                            .setColumnName(column.getName().getString())
                            .setMessage("value=" + constantExpression.toString()).build().buildException();
                }
                values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                nodeIndex++;
            }
            // Add columns based on view
            for (PColumn column : addViewColumns) {
                if (IndexUtil.getViewConstantValue(column, ptr)) {
                    values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                } else {
                    throw new IllegalStateException();
                }
            }
            Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(1);
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp);
            return new MutationState(tableRef, mutation, 0, maxSize, connection);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
            if (context.getSequenceManager().getSequenceCount() > 0) {
                planSteps.add(
                        "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES");
            }
            planSteps.add("PUT SINGLE ROW");
            return new ExplainPlan(planSteps);
        }

    };
}

From source file:com.android.tools.klint.checks.SupportAnnotationDetector.java

private PermissionHolder getPermissions(@NonNull UastAndroidContext context) {
    if (mPermissions == null) {
        Set<String> permissions = Sets.newHashSetWithExpectedSize(30);
        Set<String> revocable = Sets.newHashSetWithExpectedSize(4);
        JavaContext lintContext = context.getLintContext();
        LintClient client = lintContext.getClient();
        // Gather permissions from all projects that contribute to the
        // main project.
        Project mainProject = lintContext.getMainProject();
        for (File manifest : mainProject.getManifestFiles()) {
            addPermissions(client, permissions, revocable, manifest);
        }//from   ww w  . j  ava2  s . co  m
        for (Project library : mainProject.getAllLibraries()) {
            for (File manifest : library.getManifestFiles()) {
                addPermissions(client, permissions, revocable, manifest);
            }
        }

        mPermissions = new PermissionHolder.SetPermissionLookup(permissions, revocable);
    }

    return mPermissions;
}

From source file:org.n52.sos.ds.hibernate.HibernateFeatureQueryHandler.java

protected FeatureOfInterest insertFeatureOfInterest(final SamplingFeature samplingFeature,
        final Session session) throws OwsExceptionReport {
    if (!GeometryHandler.getInstance().isSpatialDatasource()) {
        throw new NotYetSupportedException("Insertion of full encoded features for non spatial datasources");
    }/*from   w  w  w.j a  v a2  s . co m*/
    FeatureOfInterestDAO featureOfInterestDAO = new FeatureOfInterestDAO();
    final String newId = samplingFeature.getIdentifierCodeWithAuthority().getValue();
    FeatureOfInterest feature = getFeatureOfInterest(newId, samplingFeature.getGeometry(), session);
    if (feature == null) {
        feature = new TFeatureOfInterest();
        featureOfInterestDAO.addIdentifierNameDescription(samplingFeature, feature, session);
        processGeometryPreSave(samplingFeature, feature, session);
        if (samplingFeature.isSetXmlDescription()) {
            feature.setDescriptionXml(samplingFeature.getXmlDescription());
        }
        if (samplingFeature.isSetFeatureType()) {
            feature.setFeatureOfInterestType(new FeatureOfInterestTypeDAO()
                    .getOrInsertFeatureOfInterestType(samplingFeature.getFeatureType(), session));
        }
        if (samplingFeature.isSetSampledFeatures()) {
            Set<FeatureOfInterest> parents = Sets
                    .newHashSetWithExpectedSize(samplingFeature.getSampledFeatures().size());
            for (AbstractFeature sampledFeature : samplingFeature.getSampledFeatures()) {
                if (!OGCConstants.UNKNOWN.equals(sampledFeature.getIdentifierCodeWithAuthority().getValue())) {
                    if (sampledFeature instanceof SamplingFeature) {
                        parents.add(insertFeatureOfInterest((SamplingFeature) sampledFeature, session));
                    } else {
                        parents.add(insertFeatureOfInterest(
                                new SamplingFeature(sampledFeature.getIdentifierCodeWithAuthority()), session));
                    }
                }
            }
            ((TFeatureOfInterest) feature).setParents(parents);
        }
        session.save(feature);
        session.flush();
        session.refresh(feature);
        featureOfInterestDAO.insertNameAndDescription(feature, samplingFeature, session);
        //            return newId;
        //        } else {
        //            return feature.getIdentifier();
    }
    return feature;
}

From source file:architecture.user.MultiProviderUserManager.java

@Transactional(readOnly = false, propagation = Propagation.REQUIRES_NEW)
public void switchCompanies(Company company, List<User> users) {

    Set<Long> userIds = Sets.newHashSetWithExpectedSize(users.size());
    for (User u : users)
        userIds.add(u.getUserId());/*from w ww  .  j  av  a2 s .  co  m*/
    userDao.switchCompanies(company.getCompanyId(), userIds);
    for (User u : users)
        userCache.remove(u.getUserId());

}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraKeyValueService.java

private Map<Cell, Value> getRowsForSpecificColumns(final String tableName, final Iterable<byte[]> rows,
        ColumnSelection selection, final long startTs) {
    Preconditions.checkArgument(!selection.allColumnsSelected(), "Must select specific columns");

    Collection<byte[]> selectedColumns = selection.getSelectedColumns();
    Set<Cell> cells = Sets.newHashSetWithExpectedSize(selectedColumns.size() * Iterables.size(rows));
    for (byte[] row : rows) {
        for (byte[] col : selectedColumns) {
            cells.add(Cell.create(row, col));
        }//from  ww w. jav a2s. com
    }

    try {
        StartTsResultsCollector collector = new StartTsResultsCollector(startTs);
        loadWithTs(tableName, cells, startTs, false, collector, readConsistency);
        return collector.collectedResults;
    } catch (Exception e) {
        throw Throwables.throwUncheckedException(e);
    }
}

From source file:com.opengamma.engine.view.worker.ParallelRecompilationViewProcessWorker.java

protected void checkForRecompilation(final AbstractViewProcessWorkerContext primary,
        CompiledViewDefinitionWithGraphs compiled) {
    final ViewCycleExecutionSequence tailSequence = (getSecondary() == null) ? primary.getSequence().copy()
            : null;// ww w.  j a v a2  s.co m
    final ViewCycleExecutionOptions nextCycle = primary.getSequence().poll(getDefaultExecutionOptions());
    if (nextCycle != null) {
        final VersionCorrection vc = nextCycle.getResolverVersionCorrection();
        boolean changes = false;
        if ((vc == null) || VersionCorrection.LATEST.equals(vc)) {
            if (_resolverChanges == null) {
                _resolverChanges = new TargetResolverChangeListener() {
                    @Override
                    protected void onChanged() {
                        // Something has changed; request a cycle on the primary and that may then do the necessary
                        ViewProcessWorker worker = null;
                        synchronized (this) {
                            if (!_terminated && (getPrimary() != null)) {
                                worker = getPrimary()._worker;
                            }
                        }
                        if (worker != null) {
                            worker.requestCycle();
                        }
                    }
                };
                getContext().getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
                        .getRawComputationTargetResolver().changeManager().addChangeListener(_resolverChanges);
            }
            final Collection<UniqueId> uids = compiled.getResolvedIdentifiers().values();
            final Set<ObjectId> oids = Sets.newHashSetWithExpectedSize(uids.size());
            for (UniqueId uid : uids) {
                final ObjectId oid = uid.getObjectId();
                if (tailSequence != null) {
                    changes |= _resolverChanges.isChanged(oid);
                }
                oids.add(oid);
            }
            _resolverChanges.watchOnly(oids);
        } else {
            if (_resolverChanges != null) {
                getContext().getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
                        .getRawComputationTargetResolver().changeManager()
                        .removeChangeListener(_resolverChanges);
                _resolverChanges = null;
            }
        }
        if (tailSequence == null) {
            // Already got a secondary worker; just went this far to update any change listeners
            s_logger.debug("Secondary worker already active");
            return;
        }
        if ((_resolverChanges == null) || changes) {
            startSecondaryWorker(primary, tailSequence);
        }
    }
}