List of usage examples for org.joda.time Interval toDurationMillis
public long toDurationMillis()
From source file:eu.hydrologis.jgrass.geonotes.GeonotesHandler.java
License:Open Source License
/** * Fetches a gps coordinate from the database nearest to a supplied time and date. * /*from w w w. j ava 2s.co m*/ * @param dateTime the time to search for. * @return the coordinate of the nearest time. * @throws Exception */ public static Coordinate getGpsCoordinateForTimeStamp(DateTime dateTime, int minutesThreshold) throws Exception { DateTime from = dateTime.minusMinutes(minutesThreshold); DateTime to = dateTime.plusMinutes(minutesThreshold); Session session = null; try { session = DatabasePlugin.getDefault().getActiveDatabaseConnection().openSession(); Criteria criteria = session.createCriteria(GpsLogTable.class); String utcTimeStr = "utcTime"; criteria.add(between(utcTimeStr, from, to)); criteria.addOrder(asc(utcTimeStr)); List<GpsLogTable> resultsList = criteria.list(); for (int i = 0; i < resultsList.size() - 1; i++) { GpsLogTable gpsLog1 = resultsList.get(i); GpsLogTable gpsLog2 = resultsList.get(i + 1); DateTime utcTimeBefore = gpsLog1.getUtcTime(); DateTime utcTimeAfter = gpsLog2.getUtcTime(); Interval interval = new Interval(utcTimeBefore, utcTimeAfter); if (interval.contains(dateTime)) { // take the nearest Interval intervalBefore = new Interval(utcTimeBefore, dateTime); Interval intervalAfter = new Interval(dateTime, utcTimeAfter); long beforeMillis = intervalBefore.toDurationMillis(); long afterMillis = intervalAfter.toDurationMillis(); if (beforeMillis < afterMillis) { Coordinate coord = new Coordinate(gpsLog1.getEast(), gpsLog1.getNorth()); return coord; } else { Coordinate coord = new Coordinate(gpsLog2.getEast(), gpsLog2.getNorth()); return coord; } } } } finally { session.close(); } return null; }
From source file:google.registry.monitoring.metrics.stackdriver.StackdriverWriter.java
License:Open Source License
private static TimeInterval encodeTimeInterval(Interval nativeInterval, Kind metricKind) { TimeInterval encodedInterval = new TimeInterval() .setStartTime(DATETIME_FORMATTER.print(nativeInterval.getStart())); DateTime endTimestamp = nativeInterval.toDurationMillis() == 0 && metricKind != Kind.GAUGE ? nativeInterval.getEnd().plusMillis(1) : nativeInterval.getEnd();//from ww w . j a v a2 s .c o m return encodedInterval.setEndTime(DATETIME_FORMATTER.print(endTimestamp)); }
From source file:io.coala.dsol.util.DsolAccumulator.java
License:Apache License
/** helper method */ protected void updateValue() { final DateTime changeTime = getDateTime(); if (!changeTime.isAfter(this.lastChangeTime)) return;// w ww . j a v a 2 s. c o m final Interval interval = DsolUtil.crop(new Interval(this.lastChangeTime, changeTime), getTreatment()); if (interval.getEndMillis() != changeTime.getMillis()) LOG.warn(String.format("Cropped interval end time %s to %s", changeTime, interval)); this.lastChangeTime = changeTime; final double oldRate = getRate().doubleValue(); double deltaRate = oldRate; if (this.integrateMin != null && this.integrateMin.doubleValue() > deltaRate) { // LOG.trace("Integrating " + getRateTitle() + " with minimum " // + this.integrateMin); deltaRate = this.integrateMin.doubleValue(); } else if (this.integrateMax != null && deltaRate > this.integrateMax.doubleValue()) { // LOG.trace("Integrating " + getRateTitle() + " with maximum " // + this.integrateMax); deltaRate = this.integrateMax.doubleValue(); } final double deltaValue = DsolUtil .toTimeUnit(this.timeUnit, deltaRate * interval.toDurationMillis(), TimeUnitInterface.MILLISECOND) .doubleValue(); addValue(deltaValue); }
From source file:io.druid.indexing.common.task.AbstractFixedIntervalTask.java
License:Apache License
protected AbstractFixedIntervalTask(String id, String groupId, TaskResource taskResource, String dataSource, Interval interval, Map<String, Object> context) { super(id, groupId, taskResource, dataSource, context); this.interval = Preconditions.checkNotNull(interval, "interval"); Preconditions.checkArgument(interval.toDurationMillis() > 0, "interval empty"); }
From source file:io.druid.indexing.overlord.TaskLockbox.java
License:Apache License
/** * Attempt to lock a task, without removing it from the queue. Can safely be called multiple times on the same task. * This method will attempt to assign version strings that obey the invariant that every version string is * lexicographically greater than any other version string previously assigned to the same interval. This invariant * is only mostly guaranteed, however; we assume clock monotonicity and we assume that callers specifying * {@code preferredVersion} are doing the right thing. * * @param task task that wants a lock * @param interval interval to lock * @param preferredVersion use this version string if one has not yet been assigned * * @return lock version if lock was acquired, absent otherwise * @throws IllegalStateException if the task is not a valid active task *//* w w w. ja v a 2s . c om*/ private Optional<TaskLock> tryLock(final Task task, final Interval interval, final Optional<String> preferredVersion) { giant.lock(); try { if (!activeTasks.contains(task.getId())) { throw new ISE("Unable to grant lock to inactive Task [%s]", task.getId()); } Preconditions.checkArgument(interval.toDurationMillis() > 0, "interval empty"); final String dataSource = task.getDataSource(); final List<TaskLockPosse> foundPosses = findLockPossesForInterval(dataSource, interval); final TaskLockPosse posseToUse; if (foundPosses.size() > 1) { // Too many existing locks. return Optional.absent(); } else if (foundPosses.size() == 1) { // One existing lock -- check if we can add to it. final TaskLockPosse foundPosse = Iterables.getOnlyElement(foundPosses); if (foundPosse.getTaskLock().getInterval().contains(interval) && foundPosse.getTaskLock().getGroupId().equals(task.getGroupId())) { posseToUse = foundPosse; } else { return Optional.absent(); } } else { // No existing locks. We can make a new one. if (!running.containsKey(dataSource)) { running.put(dataSource, new TreeMap<Interval, TaskLockPosse>(Comparators.intervalsByStartThenEnd())); } // Create new TaskLock and assign it a version. // Assumption: We'll choose a version that is greater than any previously-chosen version for our interval. (This // may not always be true, unfortunately. See below.) final String version; if (preferredVersion.isPresent()) { // We have a preferred version. We'll trust our caller to not break our ordering assumptions and just use it. version = preferredVersion.get(); } else { // We are running under an interval lock right now, so just using the current time works as long as we can trust // our clock to be monotonic and have enough resolution since the last time we created a TaskLock for the same // interval. This may not always be true; to assure it we would need to use some method of timekeeping other // than the wall clock. version = new DateTime().toString(); } posseToUse = new TaskLockPosse(new TaskLock(task.getGroupId(), dataSource, interval, version)); running.get(dataSource).put(interval, posseToUse); log.info("Created new TaskLockPosse: %s", posseToUse); } // Add to existing TaskLockPosse, if necessary if (posseToUse.getTaskIds().add(task.getId())) { log.info("Added task[%s] to TaskLock[%s]", task.getId(), posseToUse.getTaskLock().getGroupId()); // Update task storage facility. If it fails, revoke the lock. try { taskStorage.addLock(task.getId(), posseToUse.getTaskLock()); return Optional.of(posseToUse.getTaskLock()); } catch (Exception e) { log.makeAlert("Failed to persist lock in storage").addData("task", task.getId()) .addData("dataSource", posseToUse.getTaskLock().getDataSource()) .addData("interval", posseToUse.getTaskLock().getInterval()) .addData("version", posseToUse.getTaskLock().getVersion()).emit(); unlock(task, interval); return Optional.absent(); } } else { log.info("Task[%s] already present in TaskLock[%s]", task.getId(), posseToUse.getTaskLock().getGroupId()); return Optional.of(posseToUse.getTaskLock()); } } finally { giant.unlock(); } }
From source file:io.druid.server.coordinator.CostBalancerStrategy.java
License:Apache License
/** * This defines the unnormalized cost function between two segments. There is a base cost given by * the minimum size of the two segments and additional penalties. * recencyPenalty: it is more likely that recent segments will be queried together * dataSourcePenalty: if two segments belong to the same data source, they are more likely to be involved * in the same queries//from w w w . j ava2 s . c om * gapPenalty: it is more likely that segments close together in time will be queried together * * @param segment1 The first DataSegment. * @param segment2 The second DataSegment. * * @return The joint cost of placing the two DataSegments together on one node. */ public double computeJointSegmentCosts(final DataSegment segment1, final DataSegment segment2) { final Interval gap = segment1.getInterval().gap(segment2.getInterval()); final double baseCost = Math.min(segment1.getSize(), segment2.getSize()); double recencyPenalty = 1; double dataSourcePenalty = 1; double gapPenalty = 1; if (segment1.getDataSource().equals(segment2.getDataSource())) { dataSourcePenalty = 2; } double segment1diff = referenceTimestamp - segment1.getInterval().getEndMillis(); double segment2diff = referenceTimestamp - segment2.getInterval().getEndMillis(); if (segment1diff < SEVEN_DAYS_IN_MILLIS && segment2diff < SEVEN_DAYS_IN_MILLIS) { recencyPenalty = (2 - segment1diff / SEVEN_DAYS_IN_MILLIS) * (2 - segment2diff / SEVEN_DAYS_IN_MILLIS); } /** gap is null if the two segment intervals overlap or if they're adjacent */ if (gap == null) { gapPenalty = 2; } else { long gapMillis = gap.toDurationMillis(); if (gapMillis < THIRTY_DAYS_IN_MILLIS) { gapPenalty = 2 - gapMillis / THIRTY_DAYS_IN_MILLIS; } } final double cost = baseCost * recencyPenalty * dataSourcePenalty * gapPenalty; return cost; }
From source file:li.klass.fhem.accesibility.MyAccessibilityService.java
License:Open Source License
@Override public void onAccessibilityEvent(AccessibilityEvent accessibilityEvent) { DateTime now = new DateTime(); Interval interval = new Interval(lastCommandTime, now); long millis = interval.toDurationMillis(); if (millis < 3000) return;// w ww . j av a2 s . c o m lastCommandTime = now; List<CharSequence> texts = accessibilityEvent.getText(); if (texts.isEmpty()) return; String command = texts.get(0).toString(); command = command.toLowerCase(Locale.getDefault()); startService(new Intent(Actions.RECOGNIZE_VOICE_COMMAND).setClass(this, VoiceCommandIntentService.class) .putExtra(BundleExtraKeys.COMMAND, command)); Log.d(MyAccessibilityService.class.getName(), command); }
From source file:net.eledge.android.europeana.search.task.RecordTask.java
License:Apache License
@Override protected void onPostExecute(RecordObject result) { DateTime endTime = DateTime.now();//from w w w . ja va 2s .c om Interval interval = new Interval(startTime, endTime); Tracker tracker = ((EuropeanaApplication) mActivity.getApplication()).getAnalyticsTracker(); tracker.send(new HitBuilders.TimingBuilder().setCategory("Tasks").setValue(interval.toDurationMillis()) .setVariable("RecordTask").setLabel(recordId).build()); recordController.record = result; mActivity.runOnUiThread(new ListenerNotifier<>(recordController.listeners.values(), result)); }
From source file:net.sourceforge.fenixedu.presentationTier.Action.academicAdministration.executionCourseManagement.CourseLoadOverviewBean.java
License:Open Source License
private BigDecimal getShiftCourseLoad(final Shift shift) { BigDecimal result = BigDecimal.ZERO; for (final Lesson lesson : shift.getAssociatedLessonsSet()) { for (final Interval interval : lesson.getAllLessonIntervals()) { final BigDecimal duration = new BigDecimal(interval.toDurationMillis()); result = result.add(duration.divide(MILIS_TO_HOURS_DIVOSOR)); }/*ww w. ja v a 2 s .c om*/ } return result; }
From source file:org.apache.beam.runners.dataflow.worker.DataflowWorkUnitClient.java
License:Apache License
/** Reports the status of the most recently requested work item. */ @Override/*from ww w .j a v a 2s .c om*/ public WorkItemServiceState reportWorkItemStatus(WorkItemStatus workItemStatus) throws IOException { DateTime endTime = DateTime.now(); workItemStatus.setFactory(Transport.getJsonFactory()); logger.debug("Reporting work status: {}", workItemStatus); // Log the stage execution time of finished stages that have a stage name. This will not be set // in the event this status is associated with a dummy work item. if (firstNonNull(workItemStatus.getCompleted(), Boolean.FALSE) && DataflowWorkerLoggingMDC.getStageName() != null) { DateTime startTime = stageStartTime.get(); if (startTime != null) { // This thread should have been tagged with the stage start time during getWorkItem(), Interval elapsed = new Interval(startTime, endTime); int numErrors = workItemStatus.getErrors() == null ? 0 : workItemStatus.getErrors().size(); logger.info("Finished processing stage {} with {} errors in {} seconds ", DataflowWorkerLoggingMDC.getStageName(), numErrors, (double) elapsed.toDurationMillis() / 1000); } } shortIdCache.shortenIdsIfAvailable(workItemStatus.getCounterUpdates()); ReportWorkItemStatusRequest request = new ReportWorkItemStatusRequest().setWorkerId(options.getWorkerId()) .setWorkItemStatuses(Collections.singletonList(workItemStatus)) .setCurrentWorkerTime(toCloudTime(endTime)); ReportWorkItemStatusResponse result = dataflow.projects().locations().jobs().workItems() .reportStatus(options.getProject(), options.getRegion(), options.getJobId(), request).execute(); if (result == null) { logger.warn("Report work item status response: null"); throw new IOException("Got null work item status response"); } if (result.getWorkItemServiceStates() == null) { logger.warn("Report work item status response: {}", result); throw new IOException("Report work item status contained no work item service states"); } if (result.getWorkItemServiceStates().size() != 1) { logger.warn("Report work item status response: {}", result); throw new IOException( "This version of the SDK expects exactly one work item service state from the service " + "but got " + result.getWorkItemServiceStates().size() + " states"); } shortIdCache.storeNewShortIds(request, result); WorkItemServiceState state = result.getWorkItemServiceStates().get(0); logger.debug("ReportWorkItemStatus result: {}", state); return state; }