Example usage for org.joda.time Period toStandardDuration

List of usage examples for org.joda.time Period toStandardDuration

Introduction

In this page you can find the example usage for org.joda.time Period toStandardDuration.

Prototype

public Duration toStandardDuration() 

Source Link

Document

Converts this period to a duration assuming a 7 day week, 24 hour day, 60 minute hour and 60 second minute.

Usage

From source file:org.apache.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.bucketStart(DateTimes.nowUtc());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", DateTimes.nowUtc().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    String threadName = StringUtils.format("%s-overseer-%d", schema.getDataSource(),
            config.getShardSpec().getPartitionNum());
    ThreadRenamingCallable<ScheduledExecutors.Signal> threadRenamingCallable = new ThreadRenamingCallable<ScheduledExecutors.Signal>(
            threadName) {/*from ww  w  . j  a  v a  2  s . c om*/
        @Override
        public ScheduledExecutors.Signal doCall() {
            if (stopped) {
                log.info("Stopping merge-n-push overseer thread");
                return ScheduledExecutors.Signal.STOP;
            }

            mergeAndPush();

            if (stopped) {
                log.info("Stopping merge-n-push overseer thread");
                return ScheduledExecutors.Signal.STOP;
            } else {
                return ScheduledExecutors.Signal.REPEAT;
            }
        }
    };
    Duration initialDelay = new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis);
    Duration rate = new Duration(truncatedNow, segmentGranularity.increment(truncatedNow));
    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, initialDelay, rate, threadRenamingCallable);
}

From source file:org.apache.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private void mergeAndPush() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    log.info("Starting merge and push.");
    DateTime minTimestampAsDate = segmentGranularity.bucketStart(
            DateTimes.utc(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
    long minTimestamp = minTimestampAsDate.getMillis();

    final List<SegmentIdWithShardSpec> appenderatorSegments = appenderator.getSegments();
    final List<SegmentIdWithShardSpec> segmentsToPush = new ArrayList<>();

    if (shuttingDown) {
        log.info("Found [%,d] segments. Attempting to hand off all of them.", appenderatorSegments.size());
        segmentsToPush.addAll(appenderatorSegments);
    } else {//from   w  w  w. j av  a 2 s  .co  m
        log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].",
                appenderatorSegments.size(), minTimestampAsDate);

        for (SegmentIdWithShardSpec segment : appenderatorSegments) {
            final Long intervalStart = segment.getInterval().getStartMillis();
            if (intervalStart < minTimestamp) {
                log.info("Adding entry [%s] for merge and push.", segment);
                segmentsToPush.add(segment);
            } else {
                log.info(
                        "Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.",
                        segment, DateTimes.utc(intervalStart), minTimestampAsDate);
            }
        }
    }

    log.info("Found [%,d] segments to persist and merge", segmentsToPush.size());

    final Function<Throwable, Void> errorHandler = new Function<Throwable, Void>() {
        @Override
        public Void apply(Throwable throwable) {
            final List<String> segmentIdentifierStrings = Lists.transform(segmentsToPush,
                    SegmentIdWithShardSpec::toString);

            log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource())
                    .addData("segments", segmentIdentifierStrings).emit();

            if (shuttingDown) {
                // We're trying to shut down, and these segments failed to push. Let's just get rid of them.
                // This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
                cleanShutdown = false;
                for (SegmentIdWithShardSpec identifier : segmentsToPush) {
                    dropSegment(identifier);
                }
            }

            return null;
        }
    };

    // WARNING: Committers.nil() here means that on-disk data can get out of sync with committing.
    Futures.addCallback(appenderator.push(segmentsToPush, Committers.nil(), false),
            new FutureCallback<SegmentsAndMetadata>() {
                @Override
                public void onSuccess(SegmentsAndMetadata result) {
                    // Immediately publish after pushing
                    for (DataSegment pushedSegment : result.getSegments()) {
                        try {
                            segmentPublisher.publishSegment(pushedSegment);
                        } catch (Exception e) {
                            errorHandler.apply(e);
                        }
                    }

                    log.info("Published [%,d] sinks.", segmentsToPush.size());
                }

                @Override
                public void onFailure(Throwable e) {
                    log.warn(e, "Failed to push [%,d] segments.", segmentsToPush.size());
                    errorHandler.apply(e);
                }
            });
}

From source file:org.apache.druid.segment.realtime.plumber.MessageTimeRejectionPolicyFactory.java

License:Apache License

@Override
public RejectionPolicy create(final Period windowPeriod) {
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    return new MessageTimeRejectionPolicy(windowMillis, windowPeriod);
}

From source file:org.apache.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

protected void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.bucketStart(DateTimes.nowUtc());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", DateTimes.nowUtc().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    String threadName = StringUtils.format("%s-overseer-%d", schema.getDataSource(),
            config.getShardSpec().getPartitionNum());
    ThreadRenamingCallable<ScheduledExecutors.Signal> threadRenamingCallable = new ThreadRenamingCallable<ScheduledExecutors.Signal>(
            threadName) {// ww w.  j a va 2  s.c o m
        @Override
        public ScheduledExecutors.Signal doCall() {
            if (stopped) {
                log.info("Stopping merge-n-push overseer thread");
                return ScheduledExecutors.Signal.STOP;
            }

            mergeAndPush();

            if (stopped) {
                log.info("Stopping merge-n-push overseer thread");
                return ScheduledExecutors.Signal.STOP;
            } else {
                return ScheduledExecutors.Signal.REPEAT;
            }
        }
    };
    Duration initialDelay = new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis);
    Duration rate = new Duration(truncatedNow, segmentGranularity.increment(truncatedNow));
    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, initialDelay, rate, threadRenamingCallable);
}

From source file:org.apache.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

private DateTime getAllowedMinTime() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    return segmentGranularity.bucketStart(
            DateTimes.utc(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
}

From source file:org.apache.druid.segment.realtime.plumber.ServerTimeRejectionPolicyFactory.java

License:Apache License

@Override
public RejectionPolicy create(final Period windowPeriod) {
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    return new RejectionPolicy() {
        @Override// w  w  w .ja  va 2 s. c  om
        public DateTime getCurrMaxTime() {
            return DateTimes.nowUtc();
        }

        @Override
        public boolean accept(long timestamp) {
            long now = System.currentTimeMillis();

            boolean notTooOld = timestamp >= (now - windowMillis);
            boolean notTooYoung = timestamp <= (now + windowMillis);

            return notTooOld && notTooYoung;
        }

        @Override
        public String toString() {
            return StringUtils.format("serverTime-%s", windowPeriod);
        }
    };
}

From source file:org.apache.hadoop.gateway.config.impl.GatewayConfigImpl.java

License:Apache License

@Override
public long getGatewayDeploymentsBackupAgeLimit() {
    PeriodFormatter f = new PeriodFormatterBuilder().appendDays().toFormatter();
    String s = get(DEPLOYMENTS_BACKUP_AGE_LIMIT, "-1");
    long d;//from   ww  w . j ava2 s. c  om
    try {
        Period p = Period.parse(s, f);
        d = p.toStandardDuration().getMillis();
        if (d < 0) {
            d = -1;
        }
    } catch (Exception e) {
        d = -1;
    }
    return d;
}

From source file:org.apache.hadoop.gateway.config.impl.GatewayConfigImpl.java

License:Apache License

private static long parseNetworkTimeout(String s) {
    PeriodFormatter f = new PeriodFormatterBuilder().appendMinutes().appendSuffix("m", " min").appendSeconds()
            .appendSuffix("s", " sec").appendMillis().toFormatter();
    Period p = Period.parse(s, f);
    return p.toStandardDuration().getMillis();
}

From source file:org.apache.hadoop.gateway.dispatch.DefaultHttpClientFactory.java

License:Apache License

private static long parseTimeout(String s) {
    PeriodFormatter f = new PeriodFormatterBuilder().appendMinutes().appendSuffix("m", " min").appendSeconds()
            .appendSuffix("s", " sec").appendMillis().toFormatter();
    Period p = Period.parse(s, f);
    return p.toStandardDuration().getMillis();
}

From source file:org.apache.hadoop.hive.druid.DruidStorageHandler.java

License:Apache License

private static HttpClient makeHttpClient(Lifecycle lifecycle) {
    final int numConnection = HiveConf.getIntVar(SessionState.getSessionConf(),
            HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
    final Period readTimeout = new Period(
            HiveConf.getVar(SessionState.getSessionConf(), HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
    LOG.info("Creating Druid HTTP client with {} max parallel connections and {}ms read timeout", numConnection,
            readTimeout.toStandardDuration().getMillis());

    return HttpClientInit.createClient(HttpClientConfig.builder().withNumConnections(numConnection)
            .withReadTimeout(new Period(readTimeout).toStandardDuration()).build(), lifecycle);
}