Example usage for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory

List of usage examples for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.smile SmileFactory SmileFactory.

Prototype

public SmileFactory() 

Source Link

Document

Default constructor used to create factory instances.

Usage

From source file:org.apache.druid.benchmark.GroupByTypeInterfaceBenchmark.java

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT %d", System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }/*from w w  w.  j  av a2 s .c  o  m*/

    setupQueries();

    String schemaName = "basic";

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    stringQuery = SCHEMA_QUERY_MAP.get(schemaName).get("string");
    longFloatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("longFloat");
    longQuery = SCHEMA_QUERY_MAP.get(schemaName).get("long");
    floatQuery = SCHEMA_QUERY_MAP.get(schemaName).get("float");

    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(),
            RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);

    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());

    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);

    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);

        final IncrementalIndex index = makeIncIndex();

        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }

        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment,
                rowsPerSegment * numSegments, i + 1, numSegments);

        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec(),
                null);

        queryableIndexes.add(INDEX_IO.loadIndex(file));

        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));

    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector,
            QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:org.apache.druid.benchmark.query.GroupByBenchmark.java

@Setup(Level.Trial)
public void setup() throws IOException {
    log.info("SETUP CALLED AT " + +System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
        ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
    }//from   ww w .j  a v a  2s  .co m
    executorService = Execs.multiThreaded(numProcessingThreads, "GroupByThreadPool[%d]");

    setupQueries();

    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);

    final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(),
            RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);

    tmpDir = Files.createTempDir();
    log.info("Using temp dir: %s", tmpDir.getAbsolutePath());

    // queryableIndexes   -> numSegments worth of on-disk segments
    // anIncrementalIndex -> the last incremental index
    anIncrementalIndex = null;
    queryableIndexes = new ArrayList<>(numSegments);

    for (int i = 0; i < numSegments; i++) {
        log.info("Generating rows for segment %d/%d", i + 1, numSegments);

        final IncrementalIndex index = makeIncIndex(schemaInfo.isWithRollup());

        for (int j = 0; j < rowsPerSegment; j++) {
            final InputRow row = dataGenerator.nextRow();
            if (j % 20000 == 0) {
                log.info("%,d/%,d rows generated.", i * rowsPerSegment + j, rowsPerSegment * numSegments);
            }
            index.add(row);
        }

        log.info("%,d/%,d rows generated, persisting segment %d/%d.", (i + 1) * rowsPerSegment,
                rowsPerSegment * numSegments, i + 1, numSegments);

        final File file = INDEX_MERGER_V9.persist(index, new File(tmpDir, String.valueOf(i)), new IndexSpec(),
                null);

        queryableIndexes.add(INDEX_IO.loadIndex(file));

        if (i == numSegments - 1) {
            anIncrementalIndex = index;
        } else {
            index.close();
        }
    }

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 250_000_000), 0, Integer.MAX_VALUE);

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 250_000_000), 2);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return defaultStrategy;
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return initialBuckets;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return numProcessingThreads;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    QueryBenchmarkUtil.NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), QueryBenchmarkUtil.NOOP_QUERYWATCHER));

    factory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector,
            QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:org.apache.druid.benchmark.query.GroupByBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)/*from ww  w . j  ava 2s .c om*/
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndexWithSerde(Blackhole blackhole) {
    QueryToolChest<Row, GroupByQuery> toolChest = factory.getToolchest();
    QueryRunner<Row> theRunner = new FinalizeResultsQueryRunner<>(
            toolChest.mergeResults(
                    new SerializingQueryRunner<>(new DefaultObjectMapper(new SmileFactory()), Row.class,
                            toolChest.mergeResults(factory.mergeRunners(executorService, makeMultiRunners())))),
            (QueryToolChest) toolChest);

    Sequence<Row> queryResult = theRunner.run(QueryPlus.wrap(query), Maps.newHashMap());
    List<Row> results = queryResult.toList();

    for (Row result : results) {
        blackhole.consume(result);
    }
}

From source file:org.apache.druid.query.groupby.GroupByLimitPushDownInsufficientBufferTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");

    final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>(
            "GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0,
            Integer.MAX_VALUE);//from   w  ww .  j a  v  a  2 s . co  m

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    final CloseableDefaultBlockingPool<ByteBuffer> tooSmallMergePool = new CloseableDefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 255), 2);

    resourceCloser.register(bufferPool);
    resourceCloser.register(mergePool);
    resourceCloser.register(tooSmallMergePool);

    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    DruidProcessingConfig tooSmallDruidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int intermediateComputeSizeBytes() {
            return 255;
        }

        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    final GroupByStrategySelector tooSmallStrategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(tooSmallDruidProcessingConfig, configSupplier, bufferPool, tooSmallMergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));

    tooSmallGroupByFactory = new GroupByQueryRunnerFactory(tooSmallStrategySelector,
            new GroupByQueryQueryToolChest(tooSmallStrategySelector,
                    NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:org.apache.druid.query.groupby.GroupByLimitPushDownMultiNodeMergeTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");

    final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>(
            "GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0,
            Integer.MAX_VALUE);// ww  w  .  j a v a2s  . co  m

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);
    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    final CloseableDefaultBlockingPool<ByteBuffer> mergePool2 = new CloseableDefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);

    resourceCloser.register(bufferPool);
    resourceCloser.register(mergePool);
    resourceCloser.register(mergePool2);

    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    final GroupByStrategySelector strategySelector2 = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool2,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));

    groupByFactory2 = new GroupByQueryRunnerFactory(strategySelector2,
            new GroupByQueryQueryToolChest(strategySelector2, NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:org.apache.druid.query.groupby.GroupByMultiSegmentTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(2, "GroupByThreadPool[%d]");

    final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>(
            "GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0,
            Integer.MAX_VALUE);//  ww  w  .  j av a 2s.  c  o  m

    // limit of 2 is required since we simulate both historical merge and broker merge in the same process
    final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 2);

    resourceCloser.register(bufferPool);
    resourceCloser.register(mergePool);
    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));
}

From source file:org.apache.druid.query.groupby.NestedQueryPushDownTest.java

private void setupGroupByFactory() {
    executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");

    NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool",
            new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);

    // limit of 3 is required since we simulate running historical running nested query and broker doing the final merge
    BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 10);
    // limit of 3 is required since we simulate running historical running nested query and broker doing the final merge
    BlockingPool<ByteBuffer> mergePool2 = new DefaultBlockingPool<>(
            new OffheapBufferGenerator("merge", 10_000_000), 10);

    final GroupByQueryConfig config = new GroupByQueryConfig() {
        @Override/*  ww w .  ja  va2s  .  c  om*/
        public String getDefaultStrategy() {
            return "v2";
        }

        @Override
        public int getBufferGrouperInitialBuckets() {
            return -1;
        }

        @Override
        public long getMaxOnDiskStorage() {
            return 1_000_000_000L;
        }
    };
    config.setSingleThreaded(false);
    config.setMaxIntermediateRows(Integer.MAX_VALUE);
    config.setMaxResults(Integer.MAX_VALUE);

    DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
        @Override
        public int getNumThreads() {
            // Used by "v2" strategy for concurrencyHint
            return 2;
        }

        @Override
        public String getFormatString() {
            return null;
        }
    };

    final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
    final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    final GroupByStrategySelector strategySelector2 = new GroupByStrategySelector(configSupplier,
            new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool),
                    NOOP_QUERYWATCHER, bufferPool),
            new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool2,
                    new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));

    groupByFactory = new GroupByQueryRunnerFactory(strategySelector,
            new GroupByQueryQueryToolChest(strategySelector, NoopIntervalChunkingQueryRunnerDecorator()));

    groupByFactory2 = new GroupByQueryRunnerFactory(strategySelector2,
            new GroupByQueryQueryToolChest(strategySelector2, NoopIntervalChunkingQueryRunnerDecorator()));
}