Example usage for org.joda.time Period toStandardDuration

List of usage examples for org.joda.time Period toStandardDuration

Introduction

In this page you can find the example usage for org.joda.time Period toStandardDuration.

Prototype

public Duration toStandardDuration() 

Source Link

Document

Converts this period to a duration assuming a 7 day week, 24 hour day, 60 minute hour and 60 second minute.

Usage

From source file:io.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.bucketStart(new DateTime());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow).getMillis() + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingCallable<ScheduledExecutors.Signal>(StringUtils.format("%s-overseer-%d",
                    schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
                @Override//from   w w  w.jav a 2  s.  co  m
                public ScheduledExecutors.Signal doCall() {
                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    }

                    mergeAndPush();

                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                }
            });
}

From source file:io.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private void mergeAndPush() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    log.info("Starting merge and push.");
    DateTime minTimestampAsDate = segmentGranularity.bucketStart(
            new DateTime(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
    long minTimestamp = minTimestampAsDate.getMillis();

    final List<SegmentIdentifier> appenderatorSegments = appenderator.getSegments();
    final List<SegmentIdentifier> segmentsToPush = Lists.newArrayList();

    if (shuttingDown) {
        log.info("Found [%,d] segments. Attempting to hand off all of them.", appenderatorSegments.size());
        segmentsToPush.addAll(appenderatorSegments);
    } else {/*from  ww  w .j a  v  a  2s . com*/
        log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].",
                appenderatorSegments.size(), minTimestampAsDate);

        for (SegmentIdentifier segment : appenderatorSegments) {
            final Long intervalStart = segment.getInterval().getStartMillis();
            if (intervalStart < minTimestamp) {
                log.info("Adding entry [%s] for merge and push.", segment);
                segmentsToPush.add(segment);
            } else {
                log.info(
                        "Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.",
                        segment, new DateTime(intervalStart), minTimestampAsDate);
            }
        }
    }

    log.info("Found [%,d] segments to persist and merge", segmentsToPush.size());

    final Function<Throwable, Void> errorHandler = new Function<Throwable, Void>() {
        @Override
        public Void apply(Throwable throwable) {
            final List<String> segmentIdentifierStrings = Lists.transform(segmentsToPush,
                    new Function<SegmentIdentifier, String>() {
                        @Override
                        public String apply(SegmentIdentifier input) {
                            return input.getIdentifierAsString();
                        }
                    });

            log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource())
                    .addData("segments", segmentIdentifierStrings).emit();

            if (shuttingDown) {
                // We're trying to shut down, and these segments failed to push. Let's just get rid of them.
                // This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
                cleanShutdown = false;
                for (SegmentIdentifier identifier : segmentsToPush) {
                    dropSegment(identifier);
                }
            }

            return null;
        }
    };

    // WARNING: Committers.nil() here means that on-disk data can get out of sync with committing.
    Futures.addCallback(appenderator.push(segmentsToPush, Committers.nil()),
            new FutureCallback<SegmentsAndMetadata>() {
                @Override
                public void onSuccess(SegmentsAndMetadata result) {
                    // Immediately publish after pushing
                    for (DataSegment pushedSegment : result.getSegments()) {
                        try {
                            segmentPublisher.publishSegment(pushedSegment);
                        } catch (Exception e) {
                            errorHandler.apply(e);
                        }
                    }

                    log.info("Published [%,d] sinks.", segmentsToPush.size());
                }

                @Override
                public void onFailure(Throwable e) {
                    log.warn(e, "Failed to push [%,d] segments.", segmentsToPush.size());
                    errorHandler.apply(e);
                }
            });
}

From source file:io.druid.segment.realtime.plumber.MessageTimeRejectionPolicyFactory.java

License:Apache License

@Override
public RejectionPolicy create(final Period windowPeriod) {
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    return new RejectionPolicy() {
        private volatile long maxTimestamp = JodaUtils.MIN_INSTANT;

        @Override/*from   w w  w .  java2s  .  com*/
        public DateTime getCurrMaxTime() {
            return new DateTime(maxTimestamp);
        }

        @Override
        public boolean accept(long timestamp) {
            maxTimestamp = Math.max(maxTimestamp, timestamp);

            return timestamp >= (maxTimestamp - windowMillis);
        }

        @Override
        public String toString() {
            return String.format("messageTime-%s", windowPeriod);
        }
    };
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

protected void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.truncate(new DateTime());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow).getMillis() + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                    schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
                @Override/*from w  w w  . jav  a  2s .co m*/
                public ScheduledExecutors.Signal doCall() {
                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    }

                    mergeAndPush();

                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                }
            });
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

private void mergeAndPush() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    log.info("Starting merge and push.");
    DateTime minTimestampAsDate = segmentGranularity.truncate(
            new DateTime(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis));
    long minTimestamp = minTimestampAsDate.getMillis();

    log.info("Found [%,d] segments. Attempting to hand off segments that start before [%s].", sinks.size(),
            minTimestampAsDate);/*from  w w w  .jav a2  s .co m*/

    List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
    for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
        final Long intervalStart = entry.getKey();
        if (intervalStart < minTimestamp) {
            log.info("Adding entry [%s] for merge and push.", entry);
            sinksToPush.add(entry);
        } else {
            log.info(
                    "Skipping persist and merge for entry [%s] : Start time [%s] >= [%s] min timestamp required in this run. Segment will be picked up in a future run.",
                    entry, new DateTime(intervalStart), minTimestampAsDate);
        }
    }

    log.info("Found [%,d] sinks to persist and merge", sinksToPush.size());

    for (final Map.Entry<Long, Sink> entry : sinksToPush) {
        persistAndMerge(entry.getKey(), entry.getValue());
    }
}

From source file:io.druid.segment.realtime.plumber.ServerTimeRejectionPolicyFactory.java

License:Apache License

@Override
public RejectionPolicy create(final Period windowPeriod) {
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    return new RejectionPolicy() {
        @Override//from ww w.  j ava 2 s  .  co m
        public DateTime getCurrMaxTime() {
            return new DateTime();
        }

        @Override
        public boolean accept(long timestamp) {
            long now = System.currentTimeMillis();

            boolean notTooOld = timestamp >= (now - windowMillis);
            boolean notTooYoung = timestamp <= (now + windowMillis);

            return notTooOld && notTooYoung;
        }

        @Override
        public String toString() {
            return String.format("serverTime-%s", windowPeriod);
        }
    };
}

From source file:microsoft.exchange.webservices.data.core.EwsUtilities.java

License:Open Source License

/**
 * Takes an xs:duration string as defined by the W3 Consortiums
 * Recommendation "XML Schema Part 2: Datatypes Second Edition",
 * http://www.w3.org/TR/xmlschema-2/#duration, and converts it into a
 * System.TimeSpan structure This method uses the following approximations:
 * 1 year = 365 days 1 month = 30 days Additionally, it only allows for four
 * decimal points of seconds precision.//from w w w .  ja  va  2  s .  com
 *
 * @param xsDuration xs:duration string to convert
 * @return System.TimeSpan structure
 */
public static TimeSpan getXSDurationToTimeSpan(String xsDuration) {
    // TODO: Need to check whether this should be the equivalent or not
    Matcher m = PATTERN_TIME_SPAN.matcher(xsDuration);
    boolean negative = false;
    if (m.find()) {
        negative = true;
    }

    // Removing leading '-'
    if (negative) {
        xsDuration = xsDuration.replace("-P", "P");
    }

    Period period = Period.parse(xsDuration, ISOPeriodFormat.standard());

    long retval = period.toStandardDuration().getMillis();

    if (negative) {
        retval = -retval;
    }

    return new TimeSpan(retval);

}

From source file:net.sf.jacclog.service.importer.commands.internal.ImportStatsShellCommand.java

License:Apache License

private void renderEntries(final LogFileImporterStatistic statistic) {
    if (statistic.getEntries() != null && !statistic.getEntries().isEmpty()) {
        final int size = (statistic.getEntries().get(0).getFile() != null)
                ? statistic.getEntries().get(0).getFile().getFile().getPath().length() + 8
                : 32;// w w w .  j av a2  s. com
        final String format = "%-" + size + "s%10s%18s";
        final StringBuilder builder = new StringBuilder();
        builder.append('\n');
        final Formatter formatter = new Formatter(builder);
        formatter.format(format, "Path", "Count", "Elapsed time");
        builder.append('\n');

        String path;
        Period p;
        int totalCount = 0;
        Duration totalElapsedTime = new Duration(0);
        for (final Entry entry : statistic.getEntries()) {
            path = entry.getFile().getFile().getPath();
            p = entry.getElapsedTime();
            totalElapsedTime = totalElapsedTime.plus(p.toStandardDuration());
            totalCount += entry.getCount();
            formatter.format(format, path, entry.getCount(), p.toString(FORMATTER));
            builder.append('\n');
        }

        builder.append('\n');
        builder.append("Total imported entries: " + totalCount);
        builder.append('\n');
        builder.append("Total processing time: " + totalElapsedTime.toPeriod().toString(FORMATTER));
        builder.append('\n');

        System.out.println(builder);
    } else {
        System.out.println("No files have been recently imported.");
    }
}

From source file:org.apache.druid.data.input.parquet.simple.ParquetGroupConverter.java

License:Apache License

/**
 * Convert a primitive group field to a "ingestion friendly" java object
 *
 * @return "ingestion ready" java object, or null
 *//* w  w  w. ja  va2  s. c o  m*/
@Nullable
private static Object convertPrimitiveField(Group g, int fieldIndex, int index, boolean binaryAsString) {
    PrimitiveType pt = (PrimitiveType) g.getType().getFields().get(fieldIndex);
    OriginalType ot = pt.getOriginalType();

    try {
        if (ot != null) {
            // convert logical types
            switch (ot) {
            case DATE:
                long ts = g.getInteger(fieldIndex, index) * MILLIS_IN_DAY;
                return ts;
            case TIME_MICROS:
                return g.getLong(fieldIndex, index);
            case TIME_MILLIS:
                return g.getInteger(fieldIndex, index);
            case TIMESTAMP_MICROS:
                return TimeUnit.MILLISECONDS.convert(g.getLong(fieldIndex, index), TimeUnit.MICROSECONDS);
            case TIMESTAMP_MILLIS:
                return g.getLong(fieldIndex, index);
            case INTERVAL:
                /*
                INTERVAL is used for an interval of time. It must annotate a fixed_len_byte_array of length 12.
                This array stores three little-endian unsigned integers that represent durations at different
                granularities of time. The first stores a number in months, the second stores a number in days,
                and the third stores a number in milliseconds. This representation is independent of any particular
                timezone or date.
                        
                Each component in this representation is independent of the others. For example, there is no
                requirement that a large number of days should be expressed as a mix of months and days because there is
                not a constant conversion from days to months.
                        
                The sort order used for INTERVAL is undefined. When writing data, no min/max statistics should be
                 saved for this type and if such non-compliant statistics are found during reading, they must be ignored.
                 */
                Binary intervalVal = g.getBinary(fieldIndex, index);
                IntBuffer intBuf = intervalVal.toByteBuffer().order(ByteOrder.LITTLE_ENDIAN).asIntBuffer();
                int months = intBuf.get(0);
                int days = intBuf.get(1);
                int millis = intBuf.get(2);
                StringBuilder periodBuilder = new StringBuilder("P");
                if (months > 0) {
                    periodBuilder.append(months).append("M");
                }
                if (days > 0) {
                    periodBuilder.append(days).append("D");
                }
                if (periodBuilder.length() > 1) {
                    Period p = Period.parse(periodBuilder.toString());
                    Duration d = p.toStandardDuration().plus(millis);
                    return d;
                } else {
                    return new Duration(millis);
                }
            case INT_8:
            case INT_16:
            case INT_32:
                return g.getInteger(fieldIndex, index);
            case INT_64:
                return g.getLong(fieldIndex, index);
            // todo: idk wtd about unsigned
            case UINT_8:
            case UINT_16:
            case UINT_32:
                return g.getInteger(fieldIndex, index);
            case UINT_64:
                return g.getLong(fieldIndex, index);
            case DECIMAL:
                /*
                  DECIMAL can be used to annotate the following types:
                    int32: for 1 <= precision <= 9
                    int64: for 1 <= precision <= 18; precision < 10 will produce a warning
                    fixed_len_byte_array: precision is limited by the array size. Length n can
                      store <= floor(log_10(2^(8*n - 1) - 1)) base-10 digits
                    binary: precision is not limited, but is required. The minimum number of bytes to store
                      the unscaled value should be used.
                 */
                int precision = pt.asPrimitiveType().getDecimalMetadata().getPrecision();
                int scale = pt.asPrimitiveType().getDecimalMetadata().getScale();
                switch (pt.getPrimitiveTypeName()) {
                case INT32:
                    return new BigDecimal(g.getInteger(fieldIndex, index));
                case INT64:
                    return new BigDecimal(g.getLong(fieldIndex, index));
                case FIXED_LEN_BYTE_ARRAY:
                case BINARY:
                    Binary value = g.getBinary(fieldIndex, index);
                    return convertBinaryToDecimal(value, precision, scale);
                default:
                    throw new RE(
                            "Unknown 'DECIMAL' type supplied to primitive conversion: %s (this should never happen)",
                            pt.getPrimitiveTypeName());
                }
            case UTF8:
            case ENUM:
            case JSON:
                return g.getString(fieldIndex, index);
            case LIST:
            case MAP:
            case MAP_KEY_VALUE:
            case BSON:
            default:
                throw new RE("Non-primitive supplied to primitive conversion: %s (this should never happen)",
                        ot.name());
            }
        } else {
            // fallback to handling the raw primitive type if no logical type mapping
            switch (pt.getPrimitiveTypeName()) {
            case BOOLEAN:
                return g.getBoolean(fieldIndex, index);
            case INT32:
                return g.getInteger(fieldIndex, index);
            case INT64:
                return g.getLong(fieldIndex, index);
            case FLOAT:
                return g.getFloat(fieldIndex, index);
            case DOUBLE:
                return g.getDouble(fieldIndex, index);
            case INT96:
                Binary tsBin = g.getInt96(fieldIndex, index);
                return convertInt96BinaryToTimestamp(tsBin);
            case FIXED_LEN_BYTE_ARRAY:
            case BINARY:
                Binary bin = g.getBinary(fieldIndex, index);
                byte[] bytes = bin.getBytes();
                if (binaryAsString) {
                    return StringUtils.fromUtf8(bytes);
                } else {
                    return bytes;
                }
            default:
                throw new RE("Unknown primitive conversion: %s", pt.getPrimitiveTypeName());
            }
        }
    } catch (Exception ex) {
        return null;
    }
}

From source file:org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisorIOConfig.java

License:Apache License

public SeekableStreamSupervisorIOConfig(String stream, Integer replicas, Integer taskCount, Period taskDuration,
        Period startDelay, Period period, Boolean useEarliestSequenceNumber, Period completionTimeout,
        Period lateMessageRejectionPeriod, Period earlyMessageRejectionPeriod) {
    this.stream = Preconditions.checkNotNull(stream, "stream cannot be null");
    this.replicas = replicas != null ? replicas : 1;
    this.taskCount = taskCount != null ? taskCount : 1;
    this.taskDuration = defaultDuration(taskDuration, "PT1H");
    this.startDelay = defaultDuration(startDelay, "PT5S");
    this.period = defaultDuration(period, "PT30S");
    this.useEarliestSequenceNumber = useEarliestSequenceNumber != null ? useEarliestSequenceNumber : false;
    this.completionTimeout = defaultDuration(completionTimeout, "PT30M");
    this.lateMessageRejectionPeriod = lateMessageRejectionPeriod == null ? Optional.absent()
            : Optional.of(lateMessageRejectionPeriod.toStandardDuration());
    this.earlyMessageRejectionPeriod = earlyMessageRejectionPeriod == null ? Optional.absent()
            : Optional.of(earlyMessageRejectionPeriod.toStandardDuration());
}