Example usage for org.joda.time Interval getEndMillis

List of usage examples for org.joda.time Interval getEndMillis

Introduction

In this page you can find the example usage for org.joda.time Interval getEndMillis.

Prototype

public long getEndMillis() 

Source Link

Document

Gets the end of this time interval which is exclusive.

Usage

From source file:com.github.dbourdette.otto.web.util.RandomDateUtils.java

License:Apache License

public static DateTime in(Interval interval) {
    Random random = new Random();

    long millis = (long) ((interval.getEndMillis() - interval.getStartMillis()) * random.nextDouble());

    return interval.getStart().plus(millis);
}

From source file:com.jeklsoft.cassandraclient.astyanax.AstyanaxProtocolBufferWithStandardColumnExample.java

License:Apache License

@Override
public List<Reading> querySensorReadingsByInterval(UUID sensorId, Interval interval, int maxToReturn) {

    List<Reading> readings = new ArrayList<Reading>();

    ByteBufferRange range = new RangeBuilder().setLimit(maxToReturn).setStart(interval.getStartMillis())
            .setEnd(interval.getEndMillis()).build();

    RowQuery<UUID, DateTime> query = keyspace.prepareQuery(columnFamilyInfo).getKey(sensorId).autoPaginate(true)
            .withColumnRange(range);/*from ww  w.java 2  s .  c o m*/

    try {
        // Again query.execute() is available here is synchronous behavior desired.

        Future<OperationResult<ColumnList<DateTime>>> future = query.executeAsync();

        // time passes...

        OperationResult<ColumnList<DateTime>> result = future.get();

        ColumnList columns = result.getResult();

        Iterator ii = columns.iterator();
        while (ii.hasNext()) {
            Column column = (Column) ii.next();
            DateTime timestamp = (DateTime) column.getName();
            Reading reading = new Reading(sensorId, timestamp,
                    (Reading) column.getValue(ReadingSerializer.get()));
            readings.add(reading);
        }
    } catch (ConnectionException e) {
        throw new RuntimeException("Query failed", e);
    } catch (InterruptedException e) {
        throw new RuntimeException("Query failed", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Query failed", e);
    }

    return readings;
}

From source file:com.jeklsoft.cassandraclient.hector.HectorHeterogeneousSuperColumnExample.java

License:Apache License

@Override
public List<Reading> querySensorReadingsByInterval(UUID sensorId, Interval interval, int maxToReturn) {
    SuperSliceQuery query = HFactory.createSuperSliceQuery(keyspace, us, ls, ss, ByteBufferSerializer.get());

    query.setColumnFamily(columnFamilyName).setKey(sensorId).setRange(interval.getStartMillis(),
            interval.getEndMillis(), false, maxToReturn);

    QueryResult<SuperSlice<UUID, String, ByteBuffer>> result = query.execute();

    List<HSuperColumn<UUID, String, ByteBuffer>> rows = result.get().getSuperColumns();

    List<Reading> readings = new ArrayList<Reading>();

    for (HSuperColumn row : rows) {
        Reading reading = getReadingFromSuperColumn(sensorId, row);
        readings.add(reading);//from   w w w  .j  a v a 2 s . c  o m
    }
    return readings;
}

From source file:com.jeklsoft.cassandraclient.hector.HectorProtocolBufferWithStandardColumnExample.java

License:Apache License

@Override
public List<Reading> querySensorReadingsByInterval(UUID sensorId, Interval interval, int maxToReturn) {
    SliceQuery<UUID, DateTime, Reading> query = HFactory.createSliceQuery(keyspace, us, ds, rs);

    query.setColumnFamily(columnFamilyName).setKey(sensorId).setRange(new DateTime(interval.getStartMillis()),
            new DateTime(interval.getEndMillis()), false, maxToReturn);

    QueryResult<ColumnSlice<DateTime, Reading>> result = query.execute();

    List<HColumn<DateTime, Reading>> columns = result.get().getColumns();

    List<Reading> readings = new ArrayList<Reading>();

    for (HColumn column : columns) {
        DateTime timestamp = (DateTime) column.getName();
        Reading reading = new Reading(sensorId, timestamp, (Reading) column.getValue());
        readings.add(reading);/*from  w  w w. ja v  a2  s. com*/
    }

    return readings;
}

From source file:com.linkedin.pinot.controller.api.resources.PinotSegmentUploadRestletResource.java

License:Apache License

/**
 * Returns true if:/*from   w  w  w.ja v a 2s .  co  m*/
 * - Segment does not have a start/end time, OR
 * - The start/end time are in a valid range (Jan 01 1971 - Jan 01, 2071)
 * @param metadata Segment metadata
 * @return
 */
private boolean isSegmentTimeValid(SegmentMetadata metadata) {
    Interval interval = metadata.getTimeInterval();
    if (interval == null) {
        return true;
    }

    long startMillis = interval.getStartMillis();
    long endMillis = interval.getEndMillis();

    if (!TimeUtils.timeValueInValidRange(startMillis) || !TimeUtils.timeValueInValidRange(endMillis)) {
        Date minDate = new Date(TimeUtils.getValidMinTimeMillis());
        Date maxDate = new Date(TimeUtils.getValidMaxTimeMillis());

        LOGGER.error(
                "Invalid start time '{}ms' or end time '{}ms' for segment {}, must be between '{}' and '{}' (timecolumn {}, timeunit {})",
                interval.getStartMillis(), interval.getEndMillis(), metadata.getName(), minDate, maxDate,
                metadata.getTimeColumn(), metadata.getTimeUnit().toString());
        return false;
    }

    return true;
}

From source file:com.linkedin.pinot.controller.api.restlet.resources.PinotSegmentUploadRestletResource.java

License:Apache License

/**
 * Returns true if://from   ww  w .  jav  a 2  s . c  o  m
 * - Segment does not have a start/end time, OR
 * - The start/end time are in a valid range (Jan 01 1971 - Jan 01, 2071)
 * @param metadata
 * @return
 */
private boolean isSegmentTimeValid(SegmentMetadata metadata) {
    Interval interval = metadata.getTimeInterval();
    if (interval == null) {
        return true;
    }

    long startMillis = interval.getStartMillis();
    long endMillis = interval.getEndMillis();

    if (!TimeUtils.timeValueInValidRange(startMillis) || !TimeUtils.timeValueInValidRange(endMillis)) {
        Date startDate = new Date(interval.getStartMillis());
        Date endDate = new Date(interval.getEndMillis());

        Date minDate = new Date(TimeUtils.getValidMinTimeMillis());
        Date maxDate = new Date(TimeUtils.getValidMaxTimeMillis());

        LOGGER.error("Invalid start time '{}' or end time '{}' for segment, must be between '{}' and '{}'",
                startDate, endDate, minDate, maxDate);
        return false;
    }

    return true;
}

From source file:com.linkedin.pinot.controller.api.restlet.resources.PinotSegmentUploadRestletResource.java

License:Apache License

/**
 * Returns true if segment start and end time are between a valid range, or if
 * segment does not have a time interval.
 * The current valid range is between 1971 and 2071.
 * @param metadata//from   w  w w .j  ava  2s  .  c om
 * @return
 */
private boolean validateSegmentTimeRange(SegmentMetadata metadata) {
    Interval timeInterval = metadata.getTimeInterval();
    return (timeInterval == null || (TimeUtils.timeValueInValidRange(timeInterval.getStartMillis()))
            && TimeUtils.timeValueInValidRange(timeInterval.getEndMillis()));
}

From source file:com.linkedin.pinot.controller.validation.ValidationManager.java

License:Apache License

/**
 * Runs a validation pass over the currently loaded tables.
 *///from ww  w.j  a v  a  2  s. c o  m
public void runValidation() {
    if (!_pinotHelixResourceManager.isLeader()) {
        LOGGER.info("Skipping validation, not leader!");
        return;
    }

    LOGGER.info("Starting validation");
    // Fetch the list of tables
    List<String> allTableNames = _pinotHelixResourceManager.getAllPinotTableNames();
    ZkHelixPropertyStore<ZNRecord> propertyStore = _pinotHelixResourceManager.getPropertyStore();
    for (String tableName : allTableNames) {
        // For each table, fetch the metadata for all its segments
        if (TableNameBuilder.getTableTypeFromTableName(tableName) != TableType.OFFLINE) {
            continue;
        }
        List<OfflineSegmentZKMetadata> offlineSegmentZKMetadatas = ZKMetadataProvider
                .getOfflineSegmentZKMetadataListForTable(propertyStore, tableName);
        List<SegmentMetadata> segmentMetadataList = new ArrayList<SegmentMetadata>();
        for (OfflineSegmentZKMetadata offlineSegmentZKMetadata : offlineSegmentZKMetadatas) {
            SegmentMetadata segmentMetadata = new SegmentMetadataImpl(offlineSegmentZKMetadata);
            segmentMetadataList.add(segmentMetadata);
        }

        int missingSegmentCount = 0;

        // Compute the missing segments if there are at least two
        if (2 < segmentMetadataList.size()) {
            List<Interval> segmentIntervals = new ArrayList<Interval>();
            for (SegmentMetadata segmentMetadata : segmentMetadataList) {
                Interval timeInterval = segmentMetadata.getTimeInterval();
                if (timeInterval != null)
                    segmentIntervals.add(timeInterval);
            }

            List<Interval> missingIntervals = computeMissingIntervals(segmentIntervals,
                    segmentMetadataList.get(0).getTimeGranularity());
            missingSegmentCount = missingIntervals.size();
        }

        // Update the gauge that contains the number of missing segments
        _validationMetrics.updateMissingSegmentsGauge(tableName, missingSegmentCount);

        // Compute the max segment end time and max segment push time
        long maxSegmentEndTime = Long.MIN_VALUE;
        long maxSegmentPushTime = Long.MIN_VALUE;

        for (SegmentMetadata segmentMetadata : segmentMetadataList) {
            Interval segmentInterval = segmentMetadata.getTimeInterval();

            if (segmentInterval != null && maxSegmentEndTime < segmentInterval.getEndMillis()) {
                maxSegmentEndTime = segmentInterval.getEndMillis();
            }

            long segmentPushTime = segmentMetadata.getPushTime();
            long segmentRefreshTime = segmentMetadata.getRefreshTime();
            long segmentUpdateTime = Math.max(segmentPushTime, segmentRefreshTime);

            if (maxSegmentPushTime < segmentUpdateTime) {
                maxSegmentPushTime = segmentUpdateTime;
            }
        }

        // Update the gauges that contain the delay between the current time and last segment end time
        _validationMetrics.updateOfflineSegmentDelayGauge(tableName, maxSegmentEndTime);
        _validationMetrics.updateLastPushTimeGauge(tableName, maxSegmentPushTime);
    }
    LOGGER.info("Validation completed");
}

From source file:com.linkedin.pinot.controller.validation.ValidationManager.java

License:Apache License

/**
 * Computes a list of missing intervals, given a list of existing intervals and the expected frequency of the
 * intervals./*from   w w w.  j  a v  a 2 s .  c  o m*/
 *
 * @param segmentIntervals The list of existing intervals
 * @param frequency The expected interval frequency
 * @return The list of missing intervals
 */
public static List<Interval> computeMissingIntervals(List<Interval> segmentIntervals, Duration frequency) {
    // Sanity check for freuency
    if (frequency == null) {
        return Collections.emptyList();
    }

    // Default segment granularity to day level if its small than hours.
    if (frequency.getMillis() < Duration.standardHours(1).getMillis()) {
        frequency = Duration.standardDays(1);
    }

    // If there are less than two segments, none can be missing
    if (segmentIntervals.size() < 2) {
        return Collections.emptyList();
    }

    // Sort the intervals by ascending starting time
    List<Interval> sortedSegmentIntervals = new ArrayList<Interval>(segmentIntervals);
    Collections.sort(sortedSegmentIntervals, new Comparator<Interval>() {
        @Override
        public int compare(Interval first, Interval second) {
            if (first.getStartMillis() < second.getStartMillis())
                return -1;
            else if (second.getStartMillis() < first.getStartMillis())
                return 1;
            return 0;
        }
    });

    // Find the minimum starting time and maximum ending time
    final long startTime = sortedSegmentIntervals.get(0).getStartMillis();
    long endTime = Long.MIN_VALUE;
    for (Interval sortedSegmentInterval : sortedSegmentIntervals) {
        if (endTime < sortedSegmentInterval.getEndMillis()) {
            endTime = sortedSegmentInterval.getEndMillis();
        }
    }

    final long frequencyMillis = frequency.getMillis();
    int lastEndIntervalCount = 0;
    List<Interval> missingIntervals = new ArrayList<Interval>(10);
    for (Interval segmentInterval : sortedSegmentIntervals) {
        int startIntervalCount = (int) ((segmentInterval.getStartMillis() - startTime) / frequencyMillis);
        int endIntervalCount = (int) ((segmentInterval.getEndMillis() - startTime) / frequencyMillis);

        // If there is at least one complete missing interval between the end of the previous interval and the start of
        // the current interval, then mark the missing interval(s) as missing
        if (lastEndIntervalCount < startIntervalCount - 1) {
            for (int missingIntervalIndex = lastEndIntervalCount
                    + 1; missingIntervalIndex < startIntervalCount; ++missingIntervalIndex) {
                missingIntervals.add(new Interval(startTime + frequencyMillis * missingIntervalIndex,
                        startTime + frequencyMillis * (missingIntervalIndex + 1) - 1));
            }
        }

        lastEndIntervalCount = Math.max(lastEndIntervalCount, endIntervalCount);
    }

    return missingIntervals;
}

From source file:com.metamx.druid.client.CachingClusteredClient.java

License:Open Source License

private Cache.NamedKey computeSegmentCacheKey(String segmentIdentifier, SegmentDescriptor descriptor,
        byte[] queryCacheKey) {
    final Interval segmentQueryInterval = descriptor.getInterval();
    final byte[] versionBytes = descriptor.getVersion().getBytes();

    return new Cache.NamedKey(segmentIdentifier,
            ByteBuffer.allocate(16 + versionBytes.length + 4 + queryCacheKey.length)
                    .putLong(segmentQueryInterval.getStartMillis()).putLong(segmentQueryInterval.getEndMillis())
                    .put(versionBytes).putInt(descriptor.getPartitionNumber()).put(queryCacheKey).array());
}