Example usage for com.google.common.collect ImmutableSortedMap copyOf

List of usage examples for com.google.common.collect ImmutableSortedMap copyOf

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableSortedMap copyOf.

Prototype

public static <K, V> ImmutableSortedMap<K, V> copyOf(
            Iterable<? extends Entry<? extends K, ? extends V>> entries) 

Source Link

Usage

From source file:io.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
 *
 * @return query/*  w ww  . jav  a2s. c  om*/
 */
@Nullable
public TimeseriesQuery toTimeseriesQuery() {
    if (grouping == null || grouping.getHavingFilter() != null) {
        return null;
    }

    final Granularity queryGranularity;
    final boolean descending;

    if (grouping.getDimensions().isEmpty()) {
        queryGranularity = Granularities.ALL;
        descending = false;
    } else if (grouping.getDimensions().size() == 1) {
        final DimensionSpec dimensionSpec = Iterables.getOnlyElement(grouping.getDimensions())
                .toDimensionSpec();
        final Granularity gran = ExtractionFns.toQueryGranularity(dimensionSpec.getExtractionFn());

        if (gran == null || !dimensionSpec.getDimension().equals(Column.TIME_COLUMN_NAME)) {
            // Timeseries only applies if the single dimension is granular __time.
            return null;
        } else {
            queryGranularity = gran;
        }

        if (limitSpec != null) {
            // If there is a limit spec, timeseries cannot LIMIT; and must be ORDER BY time (or nothing).

            if (limitSpec.isLimited()) {
                return null;
            }

            if (limitSpec.getColumns().isEmpty()) {
                descending = false;
            } else {
                // We're ok if the first order by is time (since every time value is distinct, the rest of the columns
                // wouldn't matter anyway).
                final OrderByColumnSpec firstOrderBy = limitSpec.getColumns().get(0);

                if (firstOrderBy.getDimension().equals(dimensionSpec.getOutputName())) {
                    // Order by time.
                    descending = firstOrderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING;
                } else {
                    // Order by something else.
                    return null;
                }
            }
        } else {
            // No limitSpec.
            descending = false;
        }
    } else {
        // More than one dimension, timeseries cannot handle.
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);
    final Map<String, Object> theContext = Maps.newHashMap();
    theContext.put("skipEmptyBuckets", true);
    theContext.putAll(plannerContext.getQueryContext());

    return new TimeseriesQuery(dataSource, filtration.getQuerySegmentSpec(), descending,
            getVirtualColumns(plannerContext.getExprMacroTable()), filtration.getDimFilter(), queryGranularity,
            grouping.getAggregatorFactories(), grouping.getPostAggregators(),
            ImmutableSortedMap.copyOf(theContext));
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

@Override
public void multiPut(Map<String, ? extends Map<Cell, byte[]>> valuesByTable, final long timestamp)
        throws KeyAlreadyExistsException {
    Map<ResultSetFuture, String> resultSetFutures = Maps.newHashMap();
    for (Entry<String, ? extends Map<Cell, byte[]>> e : valuesByTable.entrySet()) {
        final String table = e.getKey();
        // We sort here because some key value stores are more efficient if you store adjacent keys together.
        NavigableMap<Cell, byte[]> sortedMap = ImmutableSortedMap.copyOf(e.getValue());

        Iterable<List<Entry<Cell, byte[]>>> partitions = partitionByCountAndBytes(sortedMap.entrySet(),
                getMultiPutBatchCount(), getMultiPutBatchSizeBytes(), table,
                CQLKeyValueServices.MULTIPUT_ENTRY_SIZING_FUNCTION);

        for (final List<Entry<Cell, byte[]>> p : partitions) {
            List<Entry<Cell, Value>> partition = Lists.transform(p,
                    new Function<Entry<Cell, byte[]>, Entry<Cell, Value>>() {
                        @Override
                        public Entry<Cell, Value> apply(Entry<Cell, byte[]> input) {
                            return Maps.immutableEntry(input.getKey(),
                                    Value.create(input.getValue(), timestamp));
                        }/*from  w w w .ja v a 2s.  com*/
                    });
            resultSetFutures.put(getPutPartitionResultSetFuture(table, partition, TransactionType.NONE), table);
        }
    }

    for (Entry<ResultSetFuture, String> result : resultSetFutures.entrySet()) {
        ResultSet resultSet;
        try {
            resultSet = result.getKey().getUninterruptibly();
            resultSet.all();
        } catch (Throwable t) {
            throw Throwables.throwUncheckedException(t);
        }
        CQLKeyValueServices.logTracedQuery(getPutQuery(result.getValue(), CassandraConstants.NO_TTL), resultSet,
                session, cqlStatementCache.NORMAL_QUERY);
    }
}

From source file:io.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a TopN query, or null if this query is not compatible with TopN.
 *
 * @return query or null/*from   w w  w  .  j av a 2 s. co m*/
 */
@Nullable
public TopNQuery toTopNQuery() {
    // Must have GROUP BY one column, ORDER BY zero or one column, limit less than maxTopNLimit, and no HAVING.
    final boolean topNOk = grouping != null && grouping.getDimensions().size() == 1 && limitSpec != null
            && (limitSpec.getColumns().size() <= 1
                    && limitSpec.getLimit() <= plannerContext.getPlannerConfig().getMaxTopNLimit())
            && grouping.getHavingFilter() == null;

    if (!topNOk) {
        return null;
    }

    final DimensionSpec dimensionSpec = Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec();
    final OrderByColumnSpec limitColumn;
    if (limitSpec.getColumns().isEmpty()) {
        limitColumn = new OrderByColumnSpec(dimensionSpec.getOutputName(),
                OrderByColumnSpec.Direction.ASCENDING,
                Calcites.getStringComparatorForValueType(dimensionSpec.getOutputType()));
    } else {
        limitColumn = Iterables.getOnlyElement(limitSpec.getColumns());
    }
    final TopNMetricSpec topNMetricSpec;

    if (limitColumn.getDimension().equals(dimensionSpec.getOutputName())) {
        // DimensionTopNMetricSpec is exact; always return it even if allowApproximate is false.
        final DimensionTopNMetricSpec baseMetricSpec = new DimensionTopNMetricSpec(null,
                limitColumn.getDimensionComparator());
        topNMetricSpec = limitColumn.getDirection() == OrderByColumnSpec.Direction.ASCENDING ? baseMetricSpec
                : new InvertedTopNMetricSpec(baseMetricSpec);
    } else if (plannerContext.getPlannerConfig().isUseApproximateTopN()) {
        // ORDER BY metric
        final NumericTopNMetricSpec baseMetricSpec = new NumericTopNMetricSpec(limitColumn.getDimension());
        topNMetricSpec = limitColumn.getDirection() == OrderByColumnSpec.Direction.ASCENDING
                ? new InvertedTopNMetricSpec(baseMetricSpec)
                : baseMetricSpec;
    } else {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    return new TopNQuery(dataSource, getVirtualColumns(plannerContext.getExprMacroTable()), dimensionSpec,
            topNMetricSpec, limitSpec.getLimit(), filtration.getQuerySegmentSpec(), filtration.getDimFilter(),
            Granularities.ALL, grouping.getAggregatorFactories(), grouping.getPostAggregators(),
            ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:io.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a GroupBy query, or null if this query is not compatible with GroupBy.
 *
 * @return query or null// w w w  . j av a  2  s . c  om
 */
@Nullable
public GroupByQuery toGroupByQuery() {
    if (grouping == null) {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    return new GroupByQuery(dataSource, filtration.getQuerySegmentSpec(),
            getVirtualColumns(plannerContext.getExprMacroTable()), filtration.getDimFilter(), Granularities.ALL,
            grouping.getDimensionSpecs(), grouping.getAggregatorFactories(), grouping.getPostAggregators(),
            grouping.getHavingFilter() != null ? new DimFilterHavingSpec(grouping.getHavingFilter(), true)
                    : null,
            limitSpec, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:org.apache.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
 *
 * @return query//from w  w w . j  a  v  a2s .  com
 */
@Nullable
public TimeseriesQuery toTimeseriesQuery() {
    if (grouping == null || grouping.getHavingFilter() != null) {
        return null;
    }

    final Granularity queryGranularity;
    final boolean descending;
    int timeseriesLimit = 0;
    if (grouping.getDimensions().isEmpty()) {
        queryGranularity = Granularities.ALL;
        descending = false;
    } else if (grouping.getDimensions().size() == 1) {
        final DimensionExpression dimensionExpression = Iterables.getOnlyElement(grouping.getDimensions());
        queryGranularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(),
                plannerContext.getExprMacroTable());

        if (queryGranularity == null) {
            // Timeseries only applies if the single dimension is granular __time.
            return null;
        }
        if (limitSpec != null) {
            // If there is a limit spec, set timeseriesLimit to given value if less than Integer.Max_VALUE
            if (limitSpec.isLimited()) {
                timeseriesLimit = limitSpec.getLimit();
            }

            if (limitSpec.getColumns().isEmpty()) {
                descending = false;
            } else {
                // We're ok if the first order by is time (since every time value is distinct, the rest of the columns
                // wouldn't matter anyway).
                final OrderByColumnSpec firstOrderBy = limitSpec.getColumns().get(0);

                if (firstOrderBy.getDimension().equals(dimensionExpression.getOutputName())) {
                    // Order by time.
                    descending = firstOrderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING;
                } else {
                    // Order by something else.
                    return null;
                }
            }
        } else {
            // No limitSpec.
            descending = false;
        }
    } else {
        // More than one dimension, timeseries cannot handle.
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sortProject != null) {
        postAggregators.addAll(sortProject.getPostAggregators());
    }

    final Map<String, Object> theContext = Maps.newHashMap();
    theContext.put("skipEmptyBuckets", true);
    theContext.putAll(plannerContext.getQueryContext());

    return new TimeseriesQuery(dataSource, filtration.getQuerySegmentSpec(), descending,
            getVirtualColumns(plannerContext.getExprMacroTable(), false), filtration.getDimFilter(),
            queryGranularity, grouping.getAggregatorFactories(), postAggregators, timeseriesLimit,
            ImmutableSortedMap.copyOf(theContext));
}

From source file:org.apache.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a TopN query, or null if this query is not compatible with TopN.
 *
 * @return query or null//  w w w . j  a  va2 s  .  c om
 */
@Nullable
public TopNQuery toTopNQuery() {
    // Must have GROUP BY one column, ORDER BY zero or one column, limit less than maxTopNLimit, and no HAVING.
    final boolean topNOk = grouping != null && grouping.getDimensions().size() == 1 && limitSpec != null
            && (limitSpec.getColumns().size() <= 1
                    && limitSpec.getLimit() <= plannerContext.getPlannerConfig().getMaxTopNLimit())
            && grouping.getHavingFilter() == null;

    if (!topNOk) {
        return null;
    }

    final DimensionSpec dimensionSpec = Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec();
    final OrderByColumnSpec limitColumn;
    if (limitSpec.getColumns().isEmpty()) {
        limitColumn = new OrderByColumnSpec(dimensionSpec.getOutputName(),
                OrderByColumnSpec.Direction.ASCENDING,
                Calcites.getStringComparatorForValueType(dimensionSpec.getOutputType()));
    } else {
        limitColumn = Iterables.getOnlyElement(limitSpec.getColumns());
    }
    final TopNMetricSpec topNMetricSpec;

    if (limitColumn.getDimension().equals(dimensionSpec.getOutputName())) {
        // DimensionTopNMetricSpec is exact; always return it even if allowApproximate is false.
        final DimensionTopNMetricSpec baseMetricSpec = new DimensionTopNMetricSpec(null,
                limitColumn.getDimensionComparator());
        topNMetricSpec = limitColumn.getDirection() == OrderByColumnSpec.Direction.ASCENDING ? baseMetricSpec
                : new InvertedTopNMetricSpec(baseMetricSpec);
    } else if (plannerContext.getPlannerConfig().isUseApproximateTopN()) {
        // ORDER BY metric
        final NumericTopNMetricSpec baseMetricSpec = new NumericTopNMetricSpec(limitColumn.getDimension());
        topNMetricSpec = limitColumn.getDirection() == OrderByColumnSpec.Direction.ASCENDING
                ? new InvertedTopNMetricSpec(baseMetricSpec)
                : baseMetricSpec;
    } else {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sortProject != null) {
        postAggregators.addAll(sortProject.getPostAggregators());
    }

    return new TopNQuery(dataSource, getVirtualColumns(plannerContext.getExprMacroTable(), true), dimensionSpec,
            topNMetricSpec, limitSpec.getLimit(), filtration.getQuerySegmentSpec(), filtration.getDimFilter(),
            Granularities.ALL, grouping.getAggregatorFactories(), postAggregators,
            ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:org.apache.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a GroupBy query, or null if this query is not compatible with GroupBy.
 *
 * @return query or null/*from w  w  w .java  2 s. com*/
 */
@Nullable
public GroupByQuery toGroupByQuery() {
    if (grouping == null) {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
    if (sortProject != null) {
        postAggregators.addAll(sortProject.getPostAggregators());
    }

    return new GroupByQuery(dataSource, filtration.getQuerySegmentSpec(),
            getVirtualColumns(plannerContext.getExprMacroTable(), true), filtration.getDimFilter(),
            Granularities.ALL, grouping.getDimensionSpecs(), grouping.getAggregatorFactories(), postAggregators,
            grouping.getHavingFilter() != null ? new DimFilterHavingSpec(grouping.getHavingFilter(), true)
                    : null,
            limitSpec, null, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:io.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a Select query, or null if this query is not compatible with Select.
 *
 * @return query or null//w w w .j a v a2  s .  c  o m
 */
@Nullable
public SelectQuery toSelectQuery() {
    if (grouping != null) {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);
    final boolean descending;
    final int threshold;

    if (limitSpec != null) {
        // Safe to assume limitSpec has zero or one entry; DruidSelectSortRule wouldn't push in anything else.
        if (limitSpec.getColumns().size() == 0) {
            descending = false;
        } else if (limitSpec.getColumns().size() == 1) {
            final OrderByColumnSpec orderBy = Iterables.getOnlyElement(limitSpec.getColumns());
            if (!orderBy.getDimension().equals(Column.TIME_COLUMN_NAME)) {
                // Select cannot handle sorting on anything other than __time.
                return null;
            }
            descending = orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING;
        } else {
            // Select cannot handle sorting on more than one column.
            return null;
        }

        threshold = limitSpec.getLimit();
    } else {
        descending = false;
        threshold = 0;
    }

    // We need to ask for dummy columns to prevent Select from returning all of them.
    String dummyColumn = "dummy";
    while (sourceRowSignature.getColumnType(dummyColumn) != null
            || outputRowSignature.getRowOrder().contains(dummyColumn)) {
        dummyColumn = dummyColumn + "_";
    }

    final List<String> metrics = new ArrayList<>();

    if (selectProjection != null) {
        metrics.addAll(selectProjection.getDirectColumns());
        metrics.addAll(selectProjection.getVirtualColumns().stream().map(VirtualColumn::getOutputName)
                .collect(Collectors.toList()));
    } else {
        // No projection, rowOrder should reference direct columns.
        metrics.addAll(outputRowSignature.getRowOrder());
    }

    if (metrics.isEmpty()) {
        metrics.add(dummyColumn);
    }

    // Not used for actual queries (will be replaced by QueryMaker) but the threshold is important for the planner.
    final PagingSpec pagingSpec = new PagingSpec(null, threshold);

    return new SelectQuery(dataSource, filtration.getQuerySegmentSpec(), descending, filtration.getDimFilter(),
            Granularities.ALL, ImmutableList.of(new DefaultDimensionSpec(dummyColumn, dummyColumn)),
            metrics.stream().sorted().distinct().collect(Collectors.toList()),
            getVirtualColumns(plannerContext.getExprMacroTable()), pagingSpec,
            ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:org.apache.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a Scan query, or null if this query is not compatible with Scan.
 *
 * @return query or null/*  w  w  w .  j av a2s.  c  om*/
 */
@Nullable
public ScanQuery toScanQuery() {
    if (grouping != null) {
        // Scan cannot GROUP BY.
        return null;
    }

    if (limitSpec != null && limitSpec.getColumns().size() > 0) {
        // Scan cannot ORDER BY.
        return null;
    }

    if (outputRowSignature.getRowOrder().isEmpty()) {
        // Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
        throw new ISE("WTF?! Attempting to convert to Scan query without any columns?");
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);

    // DefaultLimitSpec (which we use to "remember" limits) is int typed, and Integer.MAX_VALUE means "no limit".
    final long scanLimit = limitSpec == null || limitSpec.getLimit() == Integer.MAX_VALUE ? 0L
            : (long) limitSpec.getLimit();

    return new ScanQuery(dataSource, filtration.getQuerySegmentSpec(),
            selectProjection != null ? VirtualColumns.create(selectProjection.getVirtualColumns())
                    : VirtualColumns.EMPTY,
            ScanQuery.RESULT_FORMAT_COMPACTED_LIST, 0, scanLimit, filtration.getDimFilter(),
            Ordering.natural().sortedCopy(ImmutableSet.copyOf(outputRowSignature.getRowOrder())), false,
            ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}

From source file:org.apache.druid.sql.calcite.rel.DruidQuery.java

/**
 * Return this query as a Select query, or null if this query is not compatible with Select.
 *
 * @return query or null//  www  . ja v a 2  s .c o  m
 */
@Nullable
public SelectQuery toSelectQuery() {
    if (grouping != null) {
        return null;
    }

    final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);
    final boolean descending;
    final int threshold;

    if (limitSpec != null) {
        // Safe to assume limitSpec has zero or one entry; DruidSelectSortRule wouldn't push in anything else.
        if (limitSpec.getColumns().size() == 0) {
            descending = false;
        } else if (limitSpec.getColumns().size() == 1) {
            final OrderByColumnSpec orderBy = Iterables.getOnlyElement(limitSpec.getColumns());
            if (!orderBy.getDimension().equals(ColumnHolder.TIME_COLUMN_NAME)) {
                // Select cannot handle sorting on anything other than __time.
                return null;
            }
            descending = orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING;
        } else {
            // Select cannot handle sorting on more than one column.
            return null;
        }

        threshold = limitSpec.getLimit();
    } else {
        descending = false;
        threshold = 0;
    }

    // We need to ask for dummy columns to prevent Select from returning all of them.
    String dummyColumn = "dummy";
    while (sourceRowSignature.getColumnType(dummyColumn) != null
            || outputRowSignature.getRowOrder().contains(dummyColumn)) {
        dummyColumn = dummyColumn + "_";
    }

    final List<String> metrics = new ArrayList<>();

    if (selectProjection != null) {
        metrics.addAll(selectProjection.getDirectColumns());
        metrics.addAll(selectProjection.getVirtualColumns().stream().map(VirtualColumn::getOutputName)
                .collect(Collectors.toList()));
    } else {
        // No projection, rowOrder should reference direct columns.
        metrics.addAll(outputRowSignature.getRowOrder());
    }

    if (metrics.isEmpty()) {
        metrics.add(dummyColumn);
    }

    // Not used for actual queries (will be replaced by QueryMaker) but the threshold is important for the planner.
    final PagingSpec pagingSpec = new PagingSpec(null, threshold);

    return new SelectQuery(dataSource, filtration.getQuerySegmentSpec(), descending, filtration.getDimFilter(),
            Granularities.ALL, ImmutableList.of(new DefaultDimensionSpec(dummyColumn, dummyColumn)),
            metrics.stream().sorted().distinct().collect(Collectors.toList()),
            getVirtualColumns(plannerContext.getExprMacroTable(), true), pagingSpec,
            ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}