Example usage for org.hibernate.cfg AvailableSettings BATCH_VERSIONED_DATA

List of usage examples for org.hibernate.cfg AvailableSettings BATCH_VERSIONED_DATA

Introduction

In this page you can find the example usage for org.hibernate.cfg AvailableSettings BATCH_VERSIONED_DATA.

Prototype

String BATCH_VERSIONED_DATA

To view the source code for org.hibernate.cfg AvailableSettings BATCH_VERSIONED_DATA.

Click Source Link

Document

Should versioned data be included in batching?

Usage

From source file:nl.rivm.cib.morphine.household.HHSimulator.java

License:Apache License

/**
 * @param args arguments from the command line
 * @throws IOException// w  w  w.j  a  v a 2s  .c om
 * @throws InterruptedException
 */
public static void main(final String[] args) throws IOException, InterruptedException {
    final HHConfig hhConfig = HHConfig.getOrCreate(args);

    if (System.getProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY) == null)
        try (final InputStream is = FileUtil.toInputStream(hhConfig.configBase() + "log4j2.yaml")) {
            // see https://stackoverflow.com/a/42524443
            final LoggerContext ctx = LoggerContext.getContext(false);
            ctx.start(new YamlConfiguration(ctx, new ConfigurationSource(is)));
        } catch (final IOException ignore) {
        }

    final Logger LOG = LogUtil.getLogger(HHSimulator.class);
    LOG.info("Starting {}, args: {} -> config: {}", HHSimulator.class.getSimpleName(), args,
            hhConfig.toJSON(HHConfig.MORPHINE_BASE));

    // FIXME move binder configuration to morphine.yaml
    final LocalConfig binderConfig = LocalConfig.builder().withId(hhConfig.setupName()) // replication name, sets random seeds

            // configure event scheduler
            .withProvider(Scheduler.class, Dsol3Scheduler.class)

            // configure randomness
            .withProvider(ProbabilityDistribution.Parser.class, DistributionParser.class)

            // FIXME skip until work-around is no longer needed
            //            .withProvider( ProbabilityDistribution.Factory.class,
            //                  Math3ProbabilityDistribution.class )
            //            .withProvider( PseudoRandom.Factory.class,
            //                  Math3PseudoRandom.MersenneTwisterFactory.class )

            .build();

    // FIXME workaround until scheduler becomes configurable in coala binder
    final ZonedDateTime offset = hhConfig.offset().atStartOfDay(TimeUtil.NL_TZ);
    final long durationDays = Duration.between(offset, offset.plus(hhConfig.duration())).toDays();
    ConfigCache.getOrCreate(ReplicateConfig.class,
            MapBuilder.unordered().put(ReplicateConfig.ID_KEY, "" + binderConfig.rawId())
                    .put(ReplicateConfig.OFFSET_KEY, "" + offset)
                    .put(ReplicateConfig.DURATION_KEY, "" + durationDays).build());

    // FIXME workaround until seed becomes configurable in coala
    final LocalBinder binder = binderConfig.createBinder(MapBuilder.<Class<?>, Object>unordered()
            .put(ProbabilityDistribution.Factory.class,
                    new Math3ProbabilityDistribution.Factory(new Math3PseudoRandom.MersenneTwisterFactory()
                            .create(PseudoRandom.Config.NAME_DEFAULT, hhConfig.randomSeed())))
            .build());

    final HHModel model = binder.inject(HHModel.class);

    // persist statistics
    final boolean jpa = hhConfig.dbEnabled();
    final CountDownLatch dbLatch = new CountDownLatch(jpa ? 1 : 0);
    if (jpa)
        try {
            // trade-off; see https://stackoverflow.com/a/30347287/1418999
            final int jdbcBatchSize = 25;
            // trade-off; 50K+ are postponed until sim ends, flooding the stack
            final int rowsPerTx = 10000;

            final Pattern pattern = Pattern.compile("^(" + Pattern.quote("javax.persistence") + ").*");
            // JPA config with vendor (i.e. Hibernate)-specific settings
            final EntityManagerFactory emf = ConfigFactory
                    .create(HikariHibernateJPAConfig.class,
                            ConfigUtil.join(hhConfig.export(pattern),
                                    MapBuilder.unordered()
                                            .put(AvailableSettings.STATEMENT_BATCH_SIZE, "" + jdbcBatchSize)
                                            .put(AvailableSettings.BATCH_VERSIONED_DATA, "" + true)
                                            .put(AvailableSettings.ORDER_INSERTS, "" + true)
                                            .put(AvailableSettings.ORDER_UPDATES, "" + true).build()))
                    .createEMF();

            // shared between threads generating (sim) and flushing (db) rows
            final AtomicLong rowsPending = new AtomicLong();

            model.statistics().doOnNext(dao -> rowsPending.incrementAndGet())
                    .buffer(10, TimeUnit.SECONDS, rowsPerTx).observeOn(
                            // Schedulers.from( Executors.newFixedThreadPool( 4 ) )
                            Schedulers.io()) // TODO is (unlimited) I/O smart?
                    .subscribe(buffer -> {
                        // TODO hold simulator while pending exceeds a maximum ?
                        final long start = System.currentTimeMillis();
                        final long n = rowsPending.addAndGet(-buffer.size());
                        JPAUtil.session(emf).subscribe(em -> {
                            final AtomicLong it = new AtomicLong();
                            buffer.forEach(dao -> {
                                dao.persist(em, binder.id());
                                if (it.incrementAndGet() % jdbcBatchSize == 0) {
                                    em.flush();
                                    //                           em.clear();
                                }
                            });
                        }, e -> LOG.error("Problem persisting stats", e),
                                () -> LOG.trace("Persisted {} rows in {}s, {} pending", buffer.size(),
                                        Pretty.of(() -> DecimalUtil
                                                .toScale((System.currentTimeMillis() - start) / 1000., 1)),
                                        n));
                    }, e -> {
                        LOG.error("Problem generating household stats", e);
                        emf.close(); // clean up connections
                        dbLatch.countDown();
                    }, () -> {
                        LOG.trace("Database persistence completed");
                        emf.close(); // clean up connections
                        dbLatch.countDown();
                    });
        } catch (final Exception e) {
            LOG.error("Could not start database", e);
            dbLatch.countDown();
        }
    //      model.network().subscribe( e -> LOG.trace( "change: {}", e ) );

    // run injected (Singleton) model; start generating the statistics
    model.run();
    LOG.info("{} completed...", model.scheduler().getClass().getSimpleName());

    // wait until all statistics persisted
    dbLatch.await();

    LOG.info("Completed {}", model.getClass().getSimpleName());
}

From source file:nl.rivm.cib.pilot.PilotSimulator.java

License:Apache License

/**
 * @param args arguments from the command line
 * @throws IOException/*from  w ww.  j  a v a  2s. com*/
 * @throws InterruptedException
 */
public static void main(final String[] args) throws IOException, InterruptedException {
    // convert command-line arguments to map
    final Map<String, String> argMap = ConfigUtil.cliArgMap(args);

    // set default configuration data file base directory/url
    final String fileName = argMap.computeIfAbsent(CONF_ARG,
            confArg -> System.getProperty(CONF_ARG, ConfigUtil.cliConfBase(argMap, PilotConfig.CONFIG_BASE_KEY,
                    PilotConfig.CONFIG_BASE_DIR, PilotConfig.CONFIG_YAML_FILE)));

    // merge arguments into configuration imported from YAML file
    final PilotConfig hhConfig = ConfigCache.getOrCreate(PilotConfig.class,
            // CLI args added first: override config resource and defaults 
            argMap, YamlUtil.flattenYaml(FileUtil.toInputStream(fileName)));

    if (System.getProperty(ConfigurationFactory.CONFIGURATION_FILE_PROPERTY) == null)
        try (final InputStream is = FileUtil.toInputStream(hhConfig.configBase() + "log4j2.yaml")) {
            // see https://stackoverflow.com/a/42524443
            final LoggerContext ctx = LoggerContext.getContext(false);
            ctx.start(new YamlConfiguration(ctx, new ConfigurationSource(is)));
        } catch (final IOException ignore) {
        }

    final Logger LOG = LogUtil.getLogger(PilotSimulator.class);
    LOG.info("Starting {}, args: {} -> config: {}", PilotSimulator.class.getSimpleName(), args,
            hhConfig.toJSON(PilotConfig.SCENARIO_BASE));

    // FIXME move binder configuration to sim.yaml
    final LocalConfig binderConfig = LocalConfig.builder().withId(hhConfig.setupName()) // replication name, sets random seeds

            // configure event scheduler
            .withProvider(Scheduler.class, Dsol3Scheduler.class)

            // configure randomness
            .withProvider(ProbabilityDistribution.Parser.class, DistributionParser.class)

            // FIXME skip until work-around is no longer needed
            //            .withProvider( ProbabilityDistribution.Factory.class,
            //                  Math3ProbabilityDistribution.class )
            //            .withProvider( PseudoRandom.Factory.class,
            //                  Math3PseudoRandom.MersenneTwisterFactory.class )

            .build();

    // FIXME workaround until scheduler becomes configurable in coala binder
    final ZonedDateTime offset = hhConfig.offset().atStartOfDay(TimeUtil.NL_TZ);
    final long durationDays = Duration.between(offset, offset.plus(hhConfig.duration())).toDays();
    ConfigCache.getOrCreate(SchedulerConfig.class,
            MapBuilder.unordered().put(SchedulerConfig.ID_KEY, "" + binderConfig.rawId())
                    .put(SchedulerConfig.OFFSET_KEY, "" + offset)
                    .put(SchedulerConfig.DURATION_KEY, "" + durationDays).build());

    // FIXME workaround until seed becomes configurable in coala
    final LocalBinder binder = binderConfig.createBinder(MapBuilder.<Class<?>, Object>unordered()
            .put(ProbabilityDistribution.Factory.class,
                    new Math3ProbabilityDistribution.Factory(new Math3PseudoRandom.MersenneTwisterFactory()
                            .create(PseudoRandom.Config.NAME_DEFAULT, hhConfig.randomSeed())))
            .build());

    final PilotScenario model = binder.inject(PilotScenario.class);

    final File file = //null;
            new File("pilot-sir-" + model.seed() + ".txt");
    final CountDownLatch outFile = new CountDownLatch(file != null && file.createNewFile() ? 1 : 0);

    final String sep = "\t";
    if (outFile.getCount() > 0)
        Observable.using(() -> new PrintWriter(FileUtil.toOutputStream(file, false)), pw -> {
            return model.sirTransitions().map(sir -> {
                final String line = DecimalUtil.toScale(model.now().toQuantity(TimeUnits.DAYS).getValue(), 4)
                        + sep + sir[0] + sep + sir[1] + sep + sir[2] + sep + sir[3];
                pw.println(line);
                return line;
            });
        }, out -> {
            out.close();
            outFile.countDown();
        }).subscribe(line -> {
        }, Exceptions::propagate);

    // persist statistics
    final boolean jpa = hhConfig.dbEnabled();
    final CountDownLatch dbLatch = new CountDownLatch(jpa ? 1 : 0);
    if (jpa)
        try {
            // trade-off; see https://stackoverflow.com/a/30347287/1418999
            final int jdbcBatchSize = 25;
            // trade-off; 50K+ are postponed until sim ends, flooding the stack
            final int rowsPerTx = 10000;

            final EntityManagerFactory emf = hhConfig.toJPAConfig(HibernateJPAConfig.class,
                    // add vendor-specific JPA settings (i.e. Hibernate)
                    MapBuilder.unordered().put(AvailableSettings.STATEMENT_BATCH_SIZE, "" + jdbcBatchSize)
                            .put(AvailableSettings.BATCH_VERSIONED_DATA, "" + true)
                            .put(AvailableSettings.ORDER_INSERTS, "" + true)
                            .put(AvailableSettings.ORDER_UPDATES, "" + true).build())
                    .createEMF();

            // shared between threads generating (sim) and flushing (db) rows
            final AtomicLong rowsPending = new AtomicLong();

            model.statistics().doOnNext(dao -> rowsPending.incrementAndGet())
                    .buffer(10, TimeUnit.SECONDS, rowsPerTx).observeOn(
                            // Schedulers.from( Executors.newFixedThreadPool( 4 ) )
                            Schedulers.io()) // TODO is (unlimited) I/O smart?
                    .subscribe(buffer -> {
                        // TODO hold simulator while pending exceeds a maximum ?
                        final long start = System.currentTimeMillis();
                        final long n = rowsPending.addAndGet(-buffer.size());
                        JPAUtil.session(emf).subscribe(em -> {
                            final AtomicLong it = new AtomicLong();
                            buffer.forEach(dao -> {
                                dao.persist(em, binder.id());
                                if (it.incrementAndGet() % jdbcBatchSize == 0) {
                                    em.flush();
                                    //                           em.clear();
                                }
                            });
                        }, e -> LOG.error("Problem persisting stats", e),
                                () -> LOG.trace("Persisted {} rows in {}s, {} pending", buffer.size(),
                                        Pretty.of(() -> DecimalUtil
                                                .toScale((System.currentTimeMillis() - start) / 1000., 1)),
                                        n));
                    }, e -> {
                        LOG.error("Problem generating household stats", e);
                        emf.close(); // clean up connections
                        dbLatch.countDown();
                    }, () -> {
                        LOG.trace("Database persistence completed");
                        emf.close(); // clean up connections
                        dbLatch.countDown();
                    });
        } catch (final Exception e) {
            LOG.error("Could not start database", e);
            dbLatch.countDown();
        }
    //      model.network().subscribe( e -> LOG.trace( "change: {}", e ) );

    // run injected (Singleton) model; start generating the statistics
    model.run();
    LOG.info("{} ready, finalizing...", model.scheduler().getClass().getSimpleName());

    // wait until all statistics persisted
    dbLatch.await();

    if (outFile.getCount() > 0) {
        LOG.trace("Waiting for output to: {}", file);
        outFile.await();
        LOG.trace("Output written to: {}", file);
    }

    LOG.info("Completed {}!", model.getClass().getSimpleName());
}