Example usage for org.joda.time Duration getMillis

List of usage examples for org.joda.time Duration getMillis

Introduction

In this page you can find the example usage for org.joda.time Duration getMillis.

Prototype

public long getMillis() 

Source Link

Document

Gets the length of this duration in milliseconds.

Usage

From source file:org.apache.druid.java.util.common.concurrent.ScheduledExecutors.java

License:Apache License

/**
 * Run callable repeatedly with the given delay between calls, until it
 * returns Signal.STOP. Exceptions are caught and logged as errors.
 *//*from w ww  .  j a v  a  2  s. c  om*/
public static void scheduleWithFixedDelay(final ScheduledExecutorService exec, final Duration initialDelay,
        final Duration delay, final Callable<Signal> callable) {
    log.debug("Scheduling repeatedly: %s with delay %s", callable, delay);
    exec.schedule(new Runnable() {
        @Override
        public void run() {
            try {
                log.trace("Running %s (delay %s)", callable, delay);
                if (callable.call() == Signal.REPEAT) {
                    log.trace("Rescheduling %s (delay %s)", callable, delay);
                    exec.schedule(this, delay.getMillis(), TimeUnit.MILLISECONDS);
                } else {
                    log.debug("Stopped rescheduling %s (delay %s)", callable, delay);
                }
            } catch (Throwable e) {
                log.error(e, "Uncaught exception.");
            }
        }
    }, initialDelay.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:org.apache.druid.java.util.common.concurrent.ScheduledExecutors.java

License:Apache License

public static void scheduleAtFixedRate(final ScheduledExecutorService exec, final Duration initialDelay,
        final Duration rate, final Callable<Signal> callable) {
    log.debug("Scheduling periodically: %s with period %s", callable, rate);
    exec.schedule(new Runnable() {
        private volatile Signal prevSignal = null;

        @Override//from  w  w  w.j  a va  2s  .  com
        public void run() {
            if (prevSignal == null || prevSignal == Signal.REPEAT) {
                exec.schedule(this, rate.getMillis(), TimeUnit.MILLISECONDS);
            }

            try {
                log.trace("Running %s (period %s)", callable, rate);
                prevSignal = callable.call();
            } catch (Throwable e) {
                log.error(e, "Uncaught exception.");
            }
        }
    }, initialDelay.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:org.apache.pig.Main.java

License:Apache License

private static void printScriptRunTime(DateTime startTime) {
    DateTime endTime = new DateTime();
    Duration duration = new Duration(startTime, endTime);
    Period period = duration.toPeriod().normalizedStandard(PeriodType.time());
    log.info("Pig script completed in " + PeriodFormat.getDefault().print(period) + " (" + duration.getMillis()
            + " ms)");
}

From source file:org.axonframework.eventhandling.scheduling.java.SimpleEventScheduler.java

License:Apache License

@Override
public ScheduleToken schedule(Duration triggerDuration, Object event) {
    String tokenId = IdentifierFactory.getInstance().generateIdentifier();
    ScheduledFuture<?> future = executorService.schedule(new PublishEventTask(event, tokenId),
            triggerDuration.getMillis(), TimeUnit.MILLISECONDS);
    tokens.put(tokenId, future);/* w  w  w.  j  a  v a2  s.  c om*/
    return new SimpleScheduleToken(tokenId);
}

From source file:org.cook_e.data.TimeLearner.java

License:Open Source License

/**
 * Learns the actual time of a step./* w  ww  .  j av a2  s  .c o m*/
 *
 * @param r the recipe you want to record the time for
 * @param time the actual time user took to finish this step (in milliseconds)
 * @throws IllegalArgumentException when actual time is negative
 */
@Override
public void learnStep(@NonNull Recipe r, Step s, @NonNull Duration time)
        throws IllegalArgumentException, SQLException {
    Objects.requireNonNull(r, "recipe must not be null");
    Objects.requireNonNull(time, "time must not be null");
    long actualTime = time.getMillis();
    if (actualTime < 0)
        throw new IllegalArgumentException("time must not be negative");

    LearningWeight lw = accessOrCreateLearningWeight(r, s);

    // calculate new weight
    long oldEstimatedTime = (long) (s.getTime().getMillis() * lw.getTimeWeight());
    double weightChange;
    if (actualTime >= oldEstimatedTime * LEARNING_LIMIT)
        weightChange = LEARNING_LIMIT - 1;
    else if (actualTime * LEARNING_LIMIT <= oldEstimatedTime)
        weightChange = (1 / LEARNING_LIMIT) - 1;
    else {
        weightChange = (actualTime * 1.0 / oldEstimatedTime) - 1;
    }

    lw.setTimeWeight(lw.getTimeWeight() + lw.getTimeWeight() * weightChange * lw.getLearnRate());
    lw.setLearnRate(lw.getLearnRate() * LEARN_RATE_DECAY_RATE);
    mStorageAccessor.updateLearnerData(r, lw);
}

From source file:org.datanucleus.store.types.jodatime.converters.JodaDurationLongConverter.java

License:Open Source License

public Long toDatastoreType(Duration dur) {
    return Long.valueOf(dur.getMillis());
}

From source file:org.graylog2.lookup.LookupDataAdapterRefreshService.java

License:Open Source License

/**
 * Add the given {@link LookupDataAdapter} to the refresh service.
 * <p>/*from  w  ww.j a v a  2 s  .  c om*/
 * The {@link LookupDataAdapter#doRefresh(LookupCachePurge) refresh method} method will be called periodically
 * according to the {@link LookupDataAdapter#refreshInterval() refresh interval} of the data adapter.
 * @param dataAdapter the adapter to be added
 */
public void add(LookupDataAdapter dataAdapter) {
    if (state() == State.STOPPING || state() == State.TERMINATED) {
        LOG.debug("Service is in state <{}> - not adding new job for <{}/{}/@{}>", state(), dataAdapter.name(),
                dataAdapter.id(), objectId(dataAdapter));
        return;
    }

    final Duration interval = dataAdapter.refreshInterval();

    // No need to schedule the data adapter refresh if it doesn't implement a refresh
    if (!interval.equals(Duration.ZERO)) {
        // Using the adapter object ID here to make it possible to have multiple jobs for the same adapter
        final String instanceId = objectId(dataAdapter);

        // Manually synchronize here to avoid overwriting an existing refresh job for the given data adapter.
        // ConcurrentMap#computeIfAbsent() does not work here because scheduling a job is not idempotent.
        synchronized (futures) {
            if (!futures.containsKey(instanceId)) {
                LOG.info("Adding job for <{}/{}/@{}> [interval={}ms]", dataAdapter.name(), dataAdapter.id(),
                        instanceId, interval.getMillis());
                futures.put(instanceId, schedule(dataAdapter, interval));
            } else {
                LOG.warn("Job for <{}/{}/@{}> already exists, not adding it again.", dataAdapter.name(),
                        dataAdapter.id(), instanceId);
            }
        }
    }
}

From source file:org.graylog2.lookup.LookupDataAdapterRefreshService.java

License:Open Source License

private ScheduledFuture<?> schedule(LookupDataAdapter dataAdapter, Duration interval) {
    final CachePurge cachePurge = new CachePurge(liveTables, dataAdapter);

    return scheduler.scheduleAtFixedRate(() -> {
        try {//www.  ja v a  2  s. c om
            dataAdapter.refresh(cachePurge);
        } catch (Exception e) {
            LOG.warn("Unhandled error while refreshing <{}/{}/@{}>", dataAdapter.name(), dataAdapter.id(),
                    objectId(dataAdapter), e);
        }
    }, interval.getMillis(), interval.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:org.graylog2.shared.journal.KafkaJournal.java

License:Open Source License

@Inject
public KafkaJournal(@Named("message_journal_dir") File journalDirectory,
        @Named("scheduler") ScheduledExecutorService scheduler,
        @Named("message_journal_segment_size") Size segmentSize,
        @Named("message_journal_segment_age") Duration segmentAge,
        @Named("message_journal_max_size") Size retentionSize,
        @Named("message_journal_max_age") Duration retentionAge,
        @Named("message_journal_flush_interval") long flushInterval,
        @Named("message_journal_flush_age") Duration flushAge, MetricRegistry metricRegistry) {
    this.scheduler = scheduler;

    this.messagesWritten = metricRegistry.meter(name(this.getClass(), "messagesWritten"));
    this.messagesRead = metricRegistry.meter(name(this.getClass(), "messagesRead"));

    registerUncommittedGauge(metricRegistry, name(this.getClass(), "uncommittedMessages"));

    // the registerHdrTimer helper doesn't throw on existing metrics
    this.writeTime = registerHdrTimer(metricRegistry, name(this.getClass(), "writeTime"));
    this.readTime = registerHdrTimer(metricRegistry, name(this.getClass(), "readTime"));

    // these are the default values as per kafka 0.8.1.1
    final LogConfig defaultConfig = new LogConfig(
            // segmentSize: The soft maximum for the size of a segment file in the log
            Ints.saturatedCast(segmentSize.toBytes()),
            // segmentMs: The soft maximum on the amount of time before a new log segment is rolled
            segmentAge.getMillis(),
            // flushInterval: The number of messages that can be written to the log before a flush is forced
            flushInterval,//from   w  w  w . j a  v  a2s .c  o  m
            // flushMs: The amount of time the log can have dirty data before a flush is forced
            flushAge.getMillis(),
            // retentionSize: The approximate total number of bytes this log can use
            retentionSize.toBytes(),
            // retentionMs: The age approximate maximum age of the last segment that is retained
            retentionAge.getMillis(),
            // maxMessageSize: The maximum size of a message in the log
            Integer.MAX_VALUE,
            // maxIndexSize: The maximum size of an index file
            Ints.saturatedCast(megabytes(1l).toBytes()),
            // indexInterval: The approximate number of bytes between index entries
            4096,
            // fileDeleteDelayMs: The time to wait before deleting a file from the filesystem
            MINUTES.toMillis(1l),
            // deleteRetentionMs: The time to retain delete markers in the log. Only applicable for logs that are being compacted.
            DAYS.toMillis(1l),
            // minCleanableRatio: The ratio of bytes that are available for cleaning to the bytes already cleaned
            0.5,
            // compact: Should old segments in this log be deleted or de-duplicated?
            false);
    // these are the default values as per kafka 0.8.1.1, except we don't turn on the cleaner
    // Cleaner really is log compaction with respect to "deletes" in the log.
    // we never insert a message twice, at least not on purpose, so we do not "clean" logs, ever.
    final CleanerConfig cleanerConfig = new CleanerConfig(1, megabytes(4l).toBytes(), 0.9d,
            Ints.saturatedCast(megabytes(1l).toBytes()), Ints.saturatedCast(megabytes(32l).toBytes()),
            Ints.saturatedCast(megabytes(5l).toBytes()), SECONDS.toMillis(15l), false, "MD5");

    if (!journalDirectory.exists() && !journalDirectory.mkdirs()) {
        LOG.error("Cannot create journal directory at {}, please check the permissions",
                journalDirectory.getAbsolutePath());
        Throwables.propagate(new AccessDeniedException(journalDirectory.getAbsolutePath(), null,
                "Could not create journal directory."));
    }

    // TODO add check for directory, etc
    committedReadOffsetFile = new File(journalDirectory, "graylog2-committed-read-offset");
    try {
        if (!committedReadOffsetFile.createNewFile()) {
            final String line = Files.readFirstLine(committedReadOffsetFile, Charsets.UTF_8);
            // the file contains the last offset graylog2 has successfully processed.
            // thus the nextReadOffset is one beyond that number
            if (line != null) {
                committedOffset.set(Long.parseLong(line.trim()));
                nextReadOffset = committedOffset.get() + 1;
            }
        }
    } catch (IOException e) {
        LOG.error("Cannot access offset file: {}", e.getMessage());
        Throwables.propagate(
                new AccessDeniedException(committedReadOffsetFile.getAbsolutePath(), null, e.getMessage()));
    }
    try {
        kafkaScheduler = new KafkaScheduler(2, "kafka-journal-scheduler-", false); // TODO make thread count configurable
        kafkaScheduler.startup();
        logManager = new LogManager(new File[] { journalDirectory }, Map$.MODULE$.<String, LogConfig>empty(),
                defaultConfig, cleanerConfig, SECONDS.toMillis(60l), SECONDS.toMillis(60l),
                SECONDS.toMillis(60l), kafkaScheduler, JODA_TIME);

        final TopicAndPartition topicAndPartition = new TopicAndPartition("messagejournal", 0);
        final Option<Log> messageLog = logManager.getLog(topicAndPartition);
        if (messageLog.isEmpty()) {
            kafkaLog = logManager.createLog(topicAndPartition, logManager.defaultConfig());
        } else {
            kafkaLog = messageLog.get();
        }
        LOG.info("Initialized Kafka based journal at {}", journalDirectory);
        setupKafkaLogMetrics(metricRegistry);

        offsetFlusher = new OffsetFileFlusher();
        dirtyLogFlusher = new DirtyLogFlusher();
        recoveryCheckpointFlusher = new RecoveryCheckpointFlusher();
        logRetentionCleaner = new LogRetentionCleaner();
    } catch (KafkaException e) {
        // most likely failed to grab lock
        LOG.error("Unable to start logmanager.", e);
        throw new RuntimeException(e);
    }

}

From source file:org.jadira.usertype.dateandtime.joda.columnmapper.BigIntegerColumnDurationMapper.java

License:Apache License

@Override
public BigInteger toNonNullValue(Duration value) {
    return BigInteger.valueOf(value.getMillis()).multiply(BigInteger.valueOf(1000000L));
}