Example usage for org.joda.time Interval getStartMillis

List of usage examples for org.joda.time Interval getStartMillis

Introduction

In this page you can find the example usage for org.joda.time Interval getStartMillis.

Prototype

public long getStartMillis() 

Source Link

Document

Gets the start of this time interval which is inclusive.

Usage

From source file:de.fraunhofer.iosb.ilt.sta.persistence.postgres.PgExpressionHandler.java

License:Open Source License

@Override
public Expression<?> visit(IntervalConstant node) {
    Interval value = node.getValue();
    return new TimeIntervalExpression(
            new ConstantDateTimeExpression(new Timestamp(value.getStartMillis()), true),
            new ConstantDateTimeExpression(new Timestamp(value.getEndMillis()), true));
}

From source file:de.hpi.bpmn2_0.replay.TimeUtilities.java

License:Open Source License

public static ArrayList<Interval> divide(Interval v1, Interval v2) {
    ArrayList<Interval> divide = new ArrayList();
    Interval overlap = v1.overlap(v2);

    if (overlap != null) {
        long overlapStart = overlap.getStartMillis();
        long overlapEnd = overlap.getEndMillis();

        long v1Start = v1.getStartMillis();
        long v1End = v1.getEndMillis();

        long v2Start = v2.getStartMillis();
        long v2End = v2.getEndMillis();

        long minStart = Math.min(v1Start, v2Start);
        long maxEnd = Math.max(v1End, v2End);

        divide.add(new Interval(minStart, overlapStart));
        divide.add(overlap);// ww  w .  j  a v a 2  s .  c  o m
        divide.add(new Interval(overlapEnd, maxEnd));
    }
    return divide;
}

From source file:de.javakaffee.kryoserializers.jodatime.JodaIntervalSerializer.java

License:Apache License

@Override
public void write(final Kryo kryo, final Output output, final Interval obj) {
    final long startMillis = obj.getStartMillis();
    final long endMillis = obj.getEndMillis();
    final String chronologyId = IdentifiableChronology.getChronologyId(obj.getChronology());

    output.writeLong(startMillis, true);
    output.writeLong(endMillis, true);//from www .  jav  a  2s.  c  o m
    output.writeString(chronologyId == null ? "" : chronologyId);
}

From source file:dk.dma.ais.store.AisStoreQueryBuilder.java

License:Apache License

public AisStoreQueryBuilder setInterval(Interval interval) {
    return setInterval(Instant.ofEpochMilli(interval.getStartMillis()),
            Instant.ofEpochMilli(interval.getEndMillis()));
}

From source file:io.coala.dsol.util.DsolUtil.java

License:Apache License

/**
 * @return overlap of specified interval within replication run period
 *         (after warm-up and before run length)
 *///w  w  w  .j av  a2s. co m
public static Interval crop(final Interval interval, final Treatment treatment) {
    final Interval runPeriod = getRunInterval(treatment);
    if (interval.overlaps(runPeriod)) {
        final long croppedStart = Math.max(interval.getStartMillis(), runPeriod.getStartMillis());
        final long croppedEnd = Math.min(interval.getEndMillis(), runPeriod.getEndMillis());
        return new Interval(croppedStart, croppedEnd);
    }
    return interval;
}

From source file:io.druid.benchmark.datagen.BenchmarkDataGenerator.java

License:Apache License

public BenchmarkDataGenerator(List<BenchmarkColumnSchema> columnSchemas, final long seed, Interval interval,
        int numRows) {
    this.columnSchemas = columnSchemas;
    this.seed = seed;

    this.startTime = interval.getStartMillis();
    this.endTime = interval.getEndMillis() - 1;

    Preconditions.checkArgument(endTime >= startTime, "endTime >= startTime");

    long timeDelta = endTime - startTime;
    this.timestampIncrement = timeDelta / (numRows * 1.0);
    this.numConsecutiveTimestamps = 0;

    init();// w  w  w.j  a  v a2  s.c o m
}

From source file:io.druid.client.CacheUtil.java

License:Apache License

public static Cache.NamedKey computeSegmentCacheKey(String segmentIdentifier, SegmentDescriptor descriptor,
        byte[] queryCacheKey) {
    final Interval segmentQueryInterval = descriptor.getInterval();
    final byte[] versionBytes = com.metamx.common.StringUtils.toUtf8(descriptor.getVersion());

    return new Cache.NamedKey(segmentIdentifier,
            ByteBuffer.allocate(16 + versionBytes.length + 4 + queryCacheKey.length)
                    .putLong(segmentQueryInterval.getStartMillis()).putLong(segmentQueryInterval.getEndMillis())
                    .put(versionBytes).putInt(descriptor.getPartitionNumber()).put(queryCacheKey).array());
}

From source file:io.druid.indexing.materializedview.MaterializedViewSupervisor.java

License:Apache License

/**
 * check whether the start millis of target interval is more than minDataLagMs lagging behind maxInterval's
 * minDataLag is required to prevent repeatedly building data because of delay data.
 *
 * @param target/*from   w  ww .j a  va2  s  . c o  m*/
 * @param maxInterval
 * @return true if the start millis of target interval is more than minDataLagMs lagging behind maxInterval's
 */
private boolean hasEnoughLag(Interval target, Interval maxInterval) {
    return minDataLagMs <= (maxInterval.getStartMillis() - target.getStartMillis());
}

From source file:io.druid.query.aggregation.atomcube.AtomCubeQuery.java

License:Apache License

private Cache.NamedKey getKey(Query _query) {
    Cache.NamedKey key = null;//from   ww w  .  j a  v a 2s.c o m
    StringBuffer sb = new StringBuffer();
    List<String> dsNames = _query.getDataSource().getNames();
    Collections.sort(dsNames, Ordering.natural());
    for (String s : dsNames) {
        sb.append(s);
    }
    List<Interval> intervals = _query.getIntervals();
    for (Interval interval : sortIntervals(intervals)) {
        long start = interval.getStartMillis();
        long end = interval.getEndMillis();
        sb.append(start).append(end);
    }
    byte[] filterKey = new byte[0];
    if (_query.hasFilters()) {
        if (_query instanceof TimeseriesQuery) {
            TimeseriesQuery query = (TimeseriesQuery) _query;
            filterKey = query.getDimensionsFilter().getCacheKey();
        } else if (_query instanceof TopNQuery) {
            TopNQuery query = (TopNQuery) _query;
            filterKey = query.getDimensionsFilter().getCacheKey();

        } else if (_query instanceof GroupByQuery) {
            GroupByQuery query = (GroupByQuery) _query;
            filterKey = query.getDimFilter().getCacheKey();
        }
    }
    String queryId = Integer.toHexString(sb.toString().hashCode());
    key = new Cache.NamedKey(queryId, ByteBuffer.allocate(queryId.getBytes().length + filterKey.length)
            .put(queryId.getBytes()).put(filterKey).array());
    return key;
}

From source file:io.druid.query.aggregation.atomcube.AtomCubeQuery.java

License:Apache License

private static List<Interval> sortIntervals(List<Interval> intervals) {
    List<Interval> _intervals = Lists.newCopyOnWriteArrayList();
    List<Interval> ret = Lists.newCopyOnWriteArrayList();
    for (Interval interval : intervals) {
        _intervals.add(interval);// ww w.j a v a2 s . co m
    }
    while (!_intervals.isEmpty()) {
        Interval aInterval = null;
        for (Interval interval : _intervals) {
            if (aInterval == null) {
                aInterval = interval;
            } else {
                if (aInterval.getStartMillis() > interval.getStartMillis()) {
                    aInterval = interval;
                }
            }
        }
        ret.add(aInterval);
        _intervals.remove(aInterval);
    }
    return ret;
}