List of usage examples for org.joda.time Duration millis
public static Duration millis(long millis)
From source file:org.apache.beam.runners.dataflow.DataflowPipelineJob.java
License:Apache License
@Override @Nullable public State waitUntilFinish() { return waitUntilFinish(Duration.millis(-1)); }
From source file:org.apache.beam.runners.dataflow.DataflowPipelineJob.java
License:Apache License
/** * Reset backoff. If duration is limited, calculate time remaining, otherwise just reset retry * count.//from ww w.ja v a 2 s.c o m * * <p>If a total duration for all backoff has been set, update the new cumulative sleep time to be * the remaining total backoff duration, stopping if we have already exceeded the allotted time. */ private static BackOff resetBackoff(Duration duration, NanoClock nanoClock, long startNanos) { BackOff backoff; if (duration.isLongerThan(Duration.ZERO)) { long nanosConsumed = nanoClock.nanoTime() - startNanos; Duration consumed = Duration.millis((nanosConsumed + 999999) / 1000000); Duration remaining = duration.minus(consumed); if (remaining.isLongerThan(Duration.ZERO)) { backoff = getMessagesBackoff(remaining); } else { backoff = BackOff.STOP_BACKOFF; } } else { backoff = getMessagesBackoff(duration); } return backoff; }
From source file:org.apache.beam.runners.dataflow.util.TimeUtil.java
License:Apache License
/** * Converts a Dataflow API duration string into a {@link Duration}. * * @return the parsed duration, or null if a parse error occurs *//*from w w w .j a v a 2 s . c o m*/ @Nullable public static Duration fromCloudDuration(String duration) { Matcher matcher = DURATION_PATTERN.matcher(duration); if (!matcher.matches()) { return null; } long millis = Long.parseLong(matcher.group(1)) * 1000; String frac = matcher.group(2); if (frac != null) { long fracs = Long.parseLong(frac); if (frac.length() == 3) { // millisecond resolution millis += fracs; } else if (frac.length() == 6) { // microsecond resolution millis += fracs / 1000; } else if (frac.length() == 9) { // nanosecond resolution millis += fracs / 1000000; } else { return null; } } return Duration.millis(millis); }
From source file:org.apache.beam.runners.dataflow.worker.DataflowBatchWorkerHarness.java
License:Apache License
/** Helper for initializing the BackOff used for retries. */ private static BackOff createBackOff() { return FluentBackoff.DEFAULT.withInitialBackoff(Duration.millis(BACKOFF_INITIAL_INTERVAL_MILLIS)) .withMaxBackoff(Duration.millis(BACKOFF_MAX_INTERVAL_MILLIS)).backoff(); }
From source file:org.apache.beam.runners.dataflow.worker.DataflowWorkProgressUpdater.java
License:Apache License
@Override protected void reportProgressHelper() throws Exception { WorkItemServiceState result = workItemStatusClient.reportUpdate(dynamicSplitResultToReport, Duration.millis(requestedLeaseDurationMs)); if (result != null) { if (result.getHotKeyDetection() != null && result.getHotKeyDetection().getUserStepName() != null) { HotKeyDetection hotKeyDetection = result.getHotKeyDetection(); hotKeyLogger.logHotKeyDetection(hotKeyDetection.getUserStepName(), TimeUtil.fromCloudDuration(hotKeyDetection.getHotKeyAge())); }/*ww w . j a v a 2 s . com*/ // Resets state after a successful progress report. dynamicSplitResultToReport = null; progressReportIntervalMs = nextProgressReportInterval( fromCloudDuration(result.getReportStatusInterval()).getMillis(), leaseRemainingTime(getLeaseExpirationTimestamp(result))); ApproximateSplitRequest suggestedStopPoint = result.getSplitRequest(); if (suggestedStopPoint != null) { LOG.info("Proposing dynamic split of work unit {} at {}", workString(), suggestedStopPoint); dynamicSplitResultToReport = worker .requestDynamicSplit(SourceTranslationUtils.toDynamicSplitRequest(suggestedStopPoint)); } } }
From source file:org.apache.beam.runners.dataflow.worker.DataflowWorkUnitClient.java
License:Apache License
private Optional<WorkItem> getWorkItemInternal(List<String> workItemTypes, List<String> capabilities) throws IOException { LeaseWorkItemRequest request = new LeaseWorkItemRequest(); request.setFactory(Transport.getJsonFactory()); request.setWorkItemTypes(workItemTypes); request.setWorkerCapabilities(capabilities); request.setWorkerId(options.getWorkerId()); request.setCurrentWorkerTime(toCloudTime(DateTime.now())); // This shouldn't be necessary, but a valid cloud duration string is // required by the Google API parsing framework. TODO: Fix the framework // so that an empty or not-present string can be used as a default value. request.setRequestedLeaseDuration(//from w w w . java 2 s. c o m toCloudDuration(Duration.millis(WorkProgressUpdater.DEFAULT_LEASE_DURATION_MILLIS))); logger.debug("Leasing work: {}", request); LeaseWorkItemResponse response = dataflow.projects().locations().jobs().workItems() .lease(options.getProject(), options.getRegion(), options.getJobId(), request).execute(); logger.debug("Lease work response: {}", response); List<WorkItem> workItems = response.getWorkItems(); if (workItems == null || workItems.isEmpty()) { // We didn't lease any work. return Optional.absent(); } else if (workItems.size() > 1) { throw new IOException( "This version of the SDK expects no more than one work item from the service: " + response); } WorkItem work = response.getWorkItems().get(0); // Looks like the work's a'ight. return Optional.of(work); }
From source file:org.apache.beam.runners.dataflow.worker.StreamingDataflowWorker.java
License:Apache License
private void scheduleWorkItem(final ComputationState computationState, final Instant inputDataWatermark, final Instant synchronizedProcessingTime, final Windmill.WorkItem workItem) { Preconditions.checkNotNull(inputDataWatermark); // May be null if output watermark not yet known. @Nullable/* www.j av a 2 s . co m*/ final Instant outputDataWatermark = WindmillTimeUtils .windmillToHarnessWatermark(workItem.getOutputDataWatermark()); Preconditions.checkState(outputDataWatermark == null || !outputDataWatermark.isAfter(inputDataWatermark)); SdkWorkerHarness worker = sdkHarnessRegistry.getAvailableWorkerAndAssignWork(); if (workItem.hasHotKeyInfo()) { Windmill.HotKeyInfo hotKeyInfo = workItem.getHotKeyInfo(); Duration hotKeyAge = Duration.millis(hotKeyInfo.getHotKeyAgeUsec() / 1000); // The MapTask instruction is ordered by dependencies, such that the first element is // always going to be the shuffle task. String stepName = computationState.getMapTask().getInstructions().get(0).getName(); hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge); } Work work = new Work(workItem) { @Override public void run() { try { process(worker, computationState, inputDataWatermark, outputDataWatermark, synchronizedProcessingTime, this); } finally { // Reduce the work associated with the worker sdkHarnessRegistry.completeWork(worker); } } }; if (!computationState.activateWork(workItem.getKey(), work)) { // Free worker if the work was not activated. // This can happen if it's duplicate work or some other reason. sdkHarnessRegistry.completeWork(worker); } }
From source file:org.apache.beam.runners.dataflow.worker.StreamingDataflowWorker.java
License:Apache License
private void getConfig(String computation) { BackOff backoff = FluentBackoff.DEFAULT.withInitialBackoff(Duration.millis(100)) .withMaxBackoff(Duration.standardMinutes(1)).withMaxCumulativeBackoff(Duration.standardMinutes(5)) .backoff();// ww w .ja va2 s .com while (running.get()) { try { if (windmillServiceEnabled) { getConfigFromDataflowService(computation); } else { getConfigFromWindmill(computation); } return; } catch (IllegalArgumentException | IOException e) { LOG.warn("Error fetching config: ", e); try { if (!BackOffUtils.next(Sleeper.DEFAULT, backoff)) { return; } } catch (IOException ioe) { LOG.warn("Error backing off, will not retry: ", ioe); return; } catch (InterruptedException ie) { Thread.currentThread().interrupt(); return; } } } }
From source file:org.apache.beam.runners.dataflow.worker.StreamingDataflowWorker.java
License:Apache License
/** * Sends a GetData request to Windmill for all sufficiently old active work. * * <p>This informs Windmill that processing is ongoing and the work should not be retried. The age * threshold is determined by {@link//from w ww . j a v a 2 s. co m * StreamingDataflowWorkerOptions#getActiveWorkRefreshPeriodMillis}. */ private void refreshActiveWork() { Map<String, List<Windmill.KeyedGetDataRequest>> active = new HashMap<>(); Instant refreshDeadline = Instant.now().minus(Duration.millis(options.getActiveWorkRefreshPeriodMillis())); for (Map.Entry<String, ComputationState> entry : computationMap.entrySet()) { active.put(entry.getKey(), entry.getValue().getKeysToRefresh(refreshDeadline)); } metricTrackingWindmillServer.refreshActiveWork(active); }
From source file:org.apache.beam.runners.dataflow.worker.StreamingModeExecutionContext.java
License:Apache License
public void start(@Nullable Object key, Windmill.WorkItem work, Instant inputDataWatermark, @Nullable Instant outputDataWatermark, @Nullable Instant synchronizedProcessingTime, WindmillStateReader stateReader, StateFetcher stateFetcher, Windmill.WorkItemCommitRequest.Builder outputBuilder) { this.key = key; this.work = work; this.stateFetcher = stateFetcher; this.outputBuilder = outputBuilder; this.sideInputCache.clear(); clearSinkFullHint();/*from w w w . java 2 s . c om*/ Instant processingTime = Instant.now(); // Ensure that the processing time is greater than any fired processing time // timers. Otherwise a trigger could ignore the timer and orphan the window. for (Windmill.Timer timer : work.getTimers().getTimersList()) { if (timer.getType() == Windmill.Timer.Type.REALTIME) { Instant inferredFiringTime = WindmillTimeUtils.windmillToHarnessTimestamp(timer.getTimestamp()) .plus(Duration.millis(1)); if (inferredFiringTime.isAfter(processingTime)) { processingTime = inferredFiringTime; } } } for (StepContext stepContext : getAllStepContexts()) { stepContext.start(stateReader, inputDataWatermark, processingTime, outputDataWatermark, synchronizedProcessingTime); } }