Example usage for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory

List of usage examples for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory.

Prototype

public SmileFactory() 

Source Link

Document

Default constructor used to create factory instances.

Usage

From source file:io.airlift.jaxrs.SmileMapper.java

@Override
public Object readFrom(Class<Object> type, Type genericType, Annotation[] annotations, MediaType mediaType,
        MultivaluedMap<String, String> httpHeaders, InputStream inputStream) throws IOException {
    Object object;//from   ww  w. j  av a2 s .  c  o  m
    try {
        JsonParser jsonParser = new SmileFactory().createParser(inputStream);

        // Important: we are NOT to close the underlying stream after
        // mapping, so we need to instruct parser:
        jsonParser.disable(JsonParser.Feature.AUTO_CLOSE_SOURCE);

        object = objectMapper.readValue(jsonParser, objectMapper.getTypeFactory().constructType(genericType));
    } catch (Exception e) {
        // we want to return a 400 for bad JSON but not for a real IO exception
        if (e instanceof IOException && !(e instanceof JsonProcessingException)
                && !(e instanceof EOFException)) {
            throw (IOException) e;
        }

        // log the exception at debug so it can be viewed during development
        // Note: we are not logging at a higher level because this could cause a denial of service
        log.debug(e, "Invalid json for Java type %s", type);

        // invalid json request
        throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST)
                .entity("Invalid json for Java type " + type).build());
    }

    // validate object using the bean validation framework
    Set<ConstraintViolation<Object>> violations = VALIDATOR.validate(object);
    if (!violations.isEmpty()) {
        throw new WebApplicationException(
                Response.status(Response.Status.BAD_REQUEST).entity(messagesFor(violations)).build());
    }

    return object;
}

From source file:com.proofpoint.jaxrs.SmileMapper.java

@Override
public void writeTo(Object value, Class<?> type, Type genericType, Annotation[] annotations,
        MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream outputStream)
        throws IOException {
    JsonGenerator jsonGenerator = new SmileFactory().createGenerator(outputStream);

    // Important: we are NOT to close the underlying stream after
    // mapping, so we need to instruct generator:
    jsonGenerator.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);

    // 04-Mar-2010, tatu: How about type we were given? (if any)
    JavaType rootType = null;// w  w w.j a  va 2s.c o  m
    if (genericType != null && value != null) {
        // 10-Jan-2011, tatu: as per [JACKSON-456], it's not safe to just force root
        // type since it prevents polymorphic type serialization. Since we really
        // just need this for generics, let's only use generic type if it's truly
        // generic.
        if (genericType.getClass() != Class.class) { // generic types are other implementations of 'java.lang.reflect.Type'
            // This is still not exactly right; should root type be further
            // specialized with 'value.getClass()'? Let's see how well this works before
            // trying to come up with more complete solution.
            rootType = objectMapper.getTypeFactory().constructType(genericType);
            // 26-Feb-2011, tatu: To help with [JACKSON-518], we better recognize cases where
            // type degenerates back into "Object.class" (as is the case with plain TypeVariable,
            // for example), and not use that.
            //
            if (rootType.getRawClass() == Object.class) {
                rootType = null;
            }
        }
    }

    if (rootType != null) {
        objectMapper.writerWithType(rootType).writeValue(jsonGenerator, value);
    } else {
        objectMapper.writeValue(jsonGenerator, value);
    }
}

From source file:io.airlift.jaxrs.SmileMapper.java

@Override
public void writeTo(Object value, Class<?> type, Type genericType, Annotation[] annotations,
        MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream outputStream)
        throws IOException {
    JsonGenerator jsonGenerator = new SmileFactory().createGenerator(outputStream);

    // Important: we are NOT to close the underlying stream after
    // mapping, so we need to instruct generator:
    jsonGenerator.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);

    // 04-Mar-2010, tatu: How about type we were given? (if any)
    JavaType rootType = null;/* w  w  w . ja va  2  s. c om*/
    if (genericType != null && value != null) {
        // 10-Jan-2011, tatu: as per [JACKSON-456], it's not safe to just force root
        //    type since it prevents polymorphic type serialization. Since we really
        //    just need this for generics, let's only use generic type if it's truly
        //    generic.
        if (genericType.getClass() != Class.class) { // generic types are other implementations of 'java.lang.reflect.Type'
            // This is still not exactly right; should root type be further
            // specialized with 'value.getClass()'? Let's see how well this works before
            // trying to come up with more complete solution.
            rootType = objectMapper.getTypeFactory().constructType(genericType);
            // 26-Feb-2011, tatu: To help with [JACKSON-518], we better recognize cases where
            //    type degenerates back into "Object.class" (as is the case with plain TypeVariable,
            //    for example), and not use that.
            //
            if (rootType.getRawClass() == Object.class) {
                rootType = null;
            }
        }
    }

    if (rootType != null) {
        objectMapper.writerWithType(rootType).writeValue(jsonGenerator, value);
    } else {
        objectMapper.writeValue(jsonGenerator, value);
    }
}

From source file:org.restlet.ext.jackson.JacksonRepresentation.java

/**
 * Creates a Jackson object mapper based on a media type. It supports JSON,
 * JSON Smile, XML, YAML and CSV./*from w  ww.  j av a  2  s  . c o m*/
 * 
 * @return The Jackson object mapper.
 */
protected ObjectMapper createObjectMapper() {
    ObjectMapper result = null;

    if (MediaType.APPLICATION_JSON.isCompatible(getMediaType())) {
        JsonFactory jsonFactory = new JsonFactory();
        jsonFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new ObjectMapper(jsonFactory);
    } else if (MediaType.APPLICATION_JSON_SMILE.isCompatible(getMediaType())) {
        SmileFactory smileFactory = new SmileFactory();
        smileFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new ObjectMapper(smileFactory);
        // [ifndef android]
    } else if (MediaType.APPLICATION_XML.isCompatible(getMediaType())
            || MediaType.TEXT_XML.isCompatible(getMediaType())) {
        javax.xml.stream.XMLInputFactory xif = XmlFactoryProvider.newInputFactory();
        xif.setProperty(javax.xml.stream.XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES,
                isExpandingEntityRefs());
        xif.setProperty(javax.xml.stream.XMLInputFactory.SUPPORT_DTD, isExpandingEntityRefs());
        xif.setProperty(javax.xml.stream.XMLInputFactory.IS_VALIDATING, isValidatingDtd());
        javax.xml.stream.XMLOutputFactory xof = XmlFactoryProvider.newOutputFactory();
        XmlFactory xmlFactory = new XmlFactory(xif, xof);
        xmlFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new XmlMapper(xmlFactory);
        // [enddef]
    } else if (MediaType.APPLICATION_YAML.isCompatible(getMediaType())
            || MediaType.TEXT_YAML.isCompatible(getMediaType())) {
        YAMLFactory yamlFactory = new YAMLFactory();
        yamlFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new ObjectMapper(yamlFactory);
    } else if (MediaType.TEXT_CSV.isCompatible(getMediaType())) {
        CsvFactory csvFactory = new CsvFactory();
        csvFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new CsvMapper(csvFactory);
    } else {
        JsonFactory jsonFactory = new JsonFactory();
        jsonFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
        result = new ObjectMapper(jsonFactory);
    }

    return result;
}

From source file:io.druid.query.groupby.GroupByMultiSegmentTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(2, "GroupByThreadPool[%d]");

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override/*from w  ww. ja va  2s  . c  o  m*/
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:io.druid.query.groupby.GroupByLimitPushDownMultiNodeMergeTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool2 = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);

    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override//from www  . j  a  va2  s  .co  m
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    final GroupByStrategySelector strategySelector2 = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool2,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));

    groupByFactory2 = new GroupByQueryRunnerFactory(strategySelector2,
            new GroupByQueryQueryToolChest(strategySelector2, NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:io.druid.query.groupby.GroupByLimitPushDownInsufficientBufferTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> tooSmallMergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 255), 2);

    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override//  w  w  w  .j a v a  2  s .  c  o  m
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    DruidProcessingConfig tooSmallDruidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int intermediateComputeSizeBytes() {
            return 255;
        }

        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    final GroupByStrategySelector tooSmallStrategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(tooSmallDruidProcessingConfig, configSupplier, bufferPool, tooSmallMergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));

    tooSmallGroupByFactory = new GroupByQueryRunnerFactory(tooSmallStrategySelector,
            new GroupByQueryQueryToolChest(tooSmallStrategySelector,
                    NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:io.druid.benchmark.query.GroupByBenchmark.java

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + +System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(Hashing.murmur3_128()));
    }//ww  w  . j  ava2  s .  com
    executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");

    setupQueries();

    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);

    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(),
            RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);

    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());

    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);

    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);

        final IncrementalIndex index = makeIncIndex();

        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }

        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment,
                rowsPerSegment * numSegments, i + 1, numSegments);

        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec());

        queryableIndexes.add(INDEX_IO.loadIndex(file));

        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }

    StupidPool<ByteBuffer> bufferPool = new StupidPool<>(new OffheapBufferGenerator("compute", 250_000_000), 0,
            Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new BlockingPool<>(new OffheapBufferGenerator("merge", 250_000_000),
            2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 0L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));

    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(configSupplier,
            strategySelector, bufferPool, QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:io.druid.benchmark.GroupByTypeInterfaceBenchmark.java

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT %d", System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }// ww w. j a v a 2s .com
    executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");

    setupQueries();

    String schemaName = "basic";

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    stringQuery = SCHEMA_QUERY_MAP.get(schemaName).get("string");
    longFloatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("longFloat");
    longQuery = SCHEMA_QUERY_MAP.get(schemaName).get("long");
    floatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("float");

    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(),
            RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);

    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());

    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);

    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);

        final IncrementalIndex index = makeIncIndex();

        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }

        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment,
                rowsPerSegment * numSegments, i + 1, numSegments);

        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec());

        queryableIndexes.add(INDEX_IO.loadIndex(file));

        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));

    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector,
            QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}