Example usage for org.joda.time Duration standardMinutes

List of usage examples for org.joda.time Duration standardMinutes

Introduction

In this page you can find the example usage for org.joda.time Duration standardMinutes.

Prototype

public static Duration standardMinutes(long minutes) 

Source Link

Document

Create a duration with the specified number of minutes assuming that there are the standard number of milliseconds in a minute.

Usage

From source file:org.apache.beam.runners.dataflow.worker.ReaderCache.java

License:Apache License

/** ReaderCache with default 1 minute expiration for readers. */
ReaderCache() {
    this(Duration.standardMinutes(1));
}

From source file:org.apache.beam.runners.dataflow.worker.StreamingDataflowWorker.java

License:Apache License

private void getConfig(String computation) {
    BackOff backoff = FluentBackoff.DEFAULT.withInitialBackoff(Duration.millis(100))
            .withMaxBackoff(Duration.standardMinutes(1)).withMaxCumulativeBackoff(Duration.standardMinutes(5))
            .backoff();/*from  www  .j a v a2  s  .c o m*/
    while (running.get()) {
        try {
            if (windmillServiceEnabled) {
                getConfigFromDataflowService(computation);
            } else {
                getConfigFromWindmill(computation);
            }
            return;
        } catch (IllegalArgumentException | IOException e) {
            LOG.warn("Error fetching config: ", e);
            try {
                if (!BackOffUtils.next(Sleeper.DEFAULT, backoff)) {
                    return;
                }
            } catch (IOException ioe) {
                LOG.warn("Error backing off, will not retry: ", ioe);
                return;
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
                return;
            }
        }
    }
}

From source file:org.apache.beam.runners.flink.streaming.TopWikipediaSessionsITCase.java

License:Apache License

@Override
protected void testProgram() throws Exception {

    Pipeline p = FlinkTestPipeline.createForStreaming();

    Long now = (System.currentTimeMillis() + 10000) / 1000;

    PCollection<KV<String, Long>> output = p
            .apply(Create/*from w  w w .ja  v  a2  s  .c o m*/
                    .of(Arrays.asList(new TableRow().set("timestamp", now).set("contributor_username", "user1"),
                            new TableRow().set("timestamp", now + 10).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now).set("contributor_username", "user1"),
                            new TableRow().set("timestamp", now + 2).set("contributor_username", "user1"),
                            new TableRow().set("timestamp", now).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 1).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 5).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 7).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 8).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 200).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 230).set("contributor_username", "user1"),
                            new TableRow().set("timestamp", now + 230).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 240).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now + 245).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 235).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 236).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 237).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 238).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 239).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 240).set("contributor_username", "user3"),
                            new TableRow().set("timestamp", now + 241).set("contributor_username", "user2"),
                            new TableRow().set("timestamp", now).set("contributor_username", "user3"))))

            .apply(ParDo.of(new DoFn<TableRow, String>() {
                @ProcessElement
                public void processElement(ProcessContext c) throws Exception {
                    TableRow row = c.element();
                    long timestamp = (Integer) row.get("timestamp");
                    String userName = (String) row.get("contributor_username");
                    if (userName != null) {
                        // Sets the timestamp field to be used in windowing.
                        c.outputWithTimestamp(userName, new Instant(timestamp * 1000L));
                    }
                }
            }))

            .apply(Window.<String>into(Sessions.withGapDuration(Duration.standardMinutes(1))))

            .apply(Count.<String>perElement());

    PCollection<String> format = output.apply(ParDo.of(new DoFn<KV<String, Long>, String>() {
        @ProcessElement
        public void processElement(ProcessContext c) throws Exception {
            KV<String, Long> el = c.element();
            String out = "user: " + el.getKey() + " value:" + el.getValue();
            c.output(out);
        }
    }));

    format.apply(TextIO.Write.to(resultPath));

    p.run();
}

From source file:org.apache.beam.sdk.io.gcp.bigquery.WriteRename.java

License:Apache License

private void copy(JobService jobService, DatasetService datasetService, String jobIdPrefix, TableReference ref,
        List<TableReference> tempTables, WriteDisposition writeDisposition, CreateDisposition createDisposition,
        @Nullable String tableDescription) throws InterruptedException, IOException {
    JobConfigurationTableCopy copyConfig = new JobConfigurationTableCopy().setSourceTables(tempTables)
            .setDestinationTable(ref).setWriteDisposition(writeDisposition.name())
            .setCreateDisposition(createDisposition.name());

    String projectId = ref.getProjectId();
    Job lastFailedCopyJob = null;//from w  w w .ja v  a  2 s. c om
    RetryJobId jobId = new RetryJobId(jobIdPrefix, 0);
    String bqLocation = BigQueryHelpers.getDatasetLocation(datasetService, ref.getProjectId(),
            ref.getDatasetId());
    BackOff backoff = BackOffAdapter.toGcpBackOff(
            FluentBackoff.DEFAULT.withMaxRetries(maxRetryJobs).withInitialBackoff(Duration.standardSeconds(1))
                    .withMaxBackoff(Duration.standardMinutes(1)).backoff());
    Sleeper sleeper = Sleeper.DEFAULT;
    int i = 0;
    do {
        ++i;
        JobReference jobRef = new JobReference().setProjectId(projectId).setJobId(jobId.getJobId())
                .setLocation(bqLocation);
        LOG.info("Starting copy job for table {} using  {}, attempt {}", ref, jobRef, i);
        try {
            jobService.startCopyJob(jobRef, copyConfig);
        } catch (IOException e) {
            LOG.warn("Copy job {} failed with {}", jobRef, e);
            // It's possible that the job actually made it to BQ even though we got a failure here.
            // For example, the response from BQ may have timed out returning. getRetryJobId will
            // return the correct job id to use on retry, or a job id to continue polling (if it turns
            // out the the job has not actually failed yet).
            RetryJobIdResult result = BigQueryHelpers.getRetryJobId(jobId, projectId, bqLocation, jobService);
            jobId = result.jobId;
            if (result.shouldRetry) {
                // Try the load again with the new job id.
                continue;
            }
            // Otherwise,the job has reached BigQuery and is in either the PENDING state or has
            // completed successfully.
        }
        Job copyJob = jobService.pollJob(jobRef, BatchLoads.LOAD_JOB_POLL_MAX_RETRIES);
        Status jobStatus = BigQueryHelpers.parseStatus(copyJob);
        switch (jobStatus) {
        case SUCCEEDED:
            if (tableDescription != null) {
                datasetService.patchTableDescription(ref, tableDescription);
            }
            return;
        case UNKNOWN:
            // This might happen if BigQuery's job listing is slow. Retry with the same
            // job id.
            LOG.info("Copy job {} finished in unknown state: {}: {}", jobRef, copyJob.getStatus(),
                    (i < maxRetryJobs - 1) ? "will retry" : "will not retry");
            lastFailedCopyJob = copyJob;
            continue;
        case FAILED:
            lastFailedCopyJob = copyJob;
            jobId = BigQueryHelpers.getRetryJobId(jobId, projectId, bqLocation, jobService).jobId;
            continue;
        default:
            throw new IllegalStateException(String.format("Unexpected status [%s] of load job: %s.", jobStatus,
                    BigQueryHelpers.jobToPrettyString(copyJob)));
        }
    } while (nextBackOff(sleeper, backoff));
    throw new RuntimeException(String.format(
            "Failed to create copy job with id prefix %s, "
                    + "reached max retries: %d, last failed copy job: %s.",
            jobIdPrefix, maxRetryJobs, BigQueryHelpers.jobToPrettyString(lastFailedCopyJob)));
}

From source file:org.apache.beam.sdk.io.gcp.bigquery.WriteTables.java

License:Apache License

private void load(JobService jobService, DatasetService datasetService, String jobIdPrefix, TableReference ref,
        TimePartitioning timePartitioning, @Nullable TableSchema schema, List<String> gcsUris,
        WriteDisposition writeDisposition, CreateDisposition createDisposition,
        @Nullable String tableDescription) throws InterruptedException, IOException {
    JobConfigurationLoad loadConfig = new JobConfigurationLoad().setDestinationTable(ref).setSchema(schema)
            .setSourceUris(gcsUris).setWriteDisposition(writeDisposition.name())
            .setCreateDisposition(createDisposition.name()).setSourceFormat("NEWLINE_DELIMITED_JSON")
            .setIgnoreUnknownValues(ignoreUnknownValues);
    if (timePartitioning != null) {
        loadConfig.setTimePartitioning(timePartitioning);
    }// w w  w.  j  av a2s.  c  om
    String projectId = loadJobProjectId == null ? ref.getProjectId() : loadJobProjectId.get();
    Job lastFailedLoadJob = null;
    String bqLocation = BigQueryHelpers.getDatasetLocation(datasetService, ref.getProjectId(),
            ref.getDatasetId());

    BackOff backoff = BackOffAdapter.toGcpBackOff(
            FluentBackoff.DEFAULT.withMaxRetries(maxRetryJobs).withInitialBackoff(Duration.standardSeconds(1))
                    .withMaxBackoff(Duration.standardMinutes(1)).backoff());
    Sleeper sleeper = Sleeper.DEFAULT;
    // First attempt is always jobIdPrefix-0.
    RetryJobId jobId = new RetryJobId(jobIdPrefix, 0);
    int i = 0;
    do {
        ++i;
        JobReference jobRef = new JobReference().setProjectId(projectId).setJobId(jobId.getJobId())
                .setLocation(bqLocation);

        LOG.info("Loading {} files into {} using job {}, attempt {}", gcsUris.size(), ref, jobRef, i);
        try {
            jobService.startLoadJob(jobRef, loadConfig);
        } catch (IOException e) {
            LOG.warn("Load job {} failed with {}", jobRef, e);
            // It's possible that the job actually made it to BQ even though we got a failure here.
            // For example, the response from BQ may have timed out returning. getRetryJobId will
            // return the correct job id to use on retry, or a job id to continue polling (if it turns
            // out the the job has not actually failed yet).
            RetryJobIdResult result = BigQueryHelpers.getRetryJobId(jobId, projectId, bqLocation, jobService);
            jobId = result.jobId;
            if (result.shouldRetry) {
                // Try the load again with the new job id.
                continue;
            }
            // Otherwise,the job has reached BigQuery and is in either the PENDING state or has
            // completed successfully.
        }
        LOG.info("Load job {} started", jobRef);
        // Try to wait until the job is done (succeeded or failed).
        Job loadJob = jobService.pollJob(jobRef, BatchLoads.LOAD_JOB_POLL_MAX_RETRIES);

        Status jobStatus = BigQueryHelpers.parseStatus(loadJob);
        switch (jobStatus) {
        case SUCCEEDED:
            LOG.info("Load job {} succeeded. Statistics: {}", jobRef, loadJob.getStatistics());
            if (tableDescription != null) {
                datasetService.patchTableDescription(
                        ref.clone().setTableId(BigQueryHelpers.stripPartitionDecorator(ref.getTableId())),
                        tableDescription);
            }
            return;
        case UNKNOWN:
            // This might happen if BigQuery's job listing is slow. Retry with the same
            // job id.
            LOG.info("Load job {} finished in unknown state: {}: {}", jobRef, loadJob.getStatus(),
                    (i < maxRetryJobs - 1) ? "will retry" : "will not retry");
            lastFailedLoadJob = loadJob;
            continue;
        case FAILED:
            lastFailedLoadJob = loadJob;
            jobId = BigQueryHelpers.getRetryJobId(jobId, projectId, bqLocation, jobService).jobId;
            LOG.info("Load job {} failed, {}: {}. Next job id {}", jobRef,
                    (i < maxRetryJobs - 1) ? "will retry" : "will not retry", loadJob.getStatus(), jobId);
            continue;
        default:
            throw new IllegalStateException(String.format("Unexpected status [%s] of load job: %s.",
                    loadJob.getStatus(), BigQueryHelpers.jobToPrettyString(loadJob)));
        }
    } while (nextBackOff(sleeper, backoff));
    throw new RuntimeException(String.format(
            "Failed to create load job with id prefix %s, "
                    + "reached max retries: %d, last failed load job: %s.",
            jobIdPrefix, maxRetryJobs, BigQueryHelpers.jobToPrettyString(lastFailedLoadJob)));
}

From source file:org.apache.beam.sdk.nexmark.NexmarkLauncher.java

License:Apache License

/**
 * Monitor the performance and progress of a running job. Return final performance if it was
 * measured./*from   w w w  .  java  2s  .  c om*/
 */
@Nullable
private NexmarkPerf monitor(NexmarkQuery query) {
    if (!options.getMonitorJobs()) {
        return null;
    }

    if (configuration.debug) {
        NexmarkUtils.console("Waiting for main pipeline to 'finish'");
    } else {
        NexmarkUtils.console("--debug=false, so job will not self-cancel");
    }

    PipelineResult job = mainResult;
    PipelineResult publisherJob = publisherResult;
    List<NexmarkPerf.ProgressSnapshot> snapshots = new ArrayList<>();
    long startMsSinceEpoch = System.currentTimeMillis();
    long endMsSinceEpoch = -1;
    if (options.getRunningTimeMinutes() != null) {
        endMsSinceEpoch = startMsSinceEpoch
                + Duration.standardMinutes(options.getRunningTimeMinutes()).getMillis()
                - Duration.standardSeconds(configuration.preloadSeconds).getMillis();
    }
    long lastActivityMsSinceEpoch = -1;
    NexmarkPerf perf = null;
    boolean waitingForShutdown = false;
    boolean cancelJob = false;
    boolean publisherCancelled = false;
    List<String> errors = new ArrayList<>();

    while (true) {
        long now = System.currentTimeMillis();
        if (endMsSinceEpoch >= 0 && now > endMsSinceEpoch && !waitingForShutdown) {
            NexmarkUtils.console("Reached end of test, cancelling job");
            try {
                cancelJob = true;
                job.cancel();
            } catch (IOException e) {
                throw new RuntimeException("Unable to cancel main job: ", e);
            }
            if (publisherResult != null) {
                try {
                    publisherJob.cancel();
                } catch (IOException e) {
                    throw new RuntimeException("Unable to cancel publisher job: ", e);
                }
                publisherCancelled = true;
            }
            waitingForShutdown = true;
        }

        PipelineResult.State state = job.getState();
        NexmarkUtils.console("%s %s%s", state, queryName, waitingForShutdown ? " (waiting for shutdown)" : "");

        NexmarkPerf currPerf;
        if (configuration.debug) {
            currPerf = currentPerf(startMsSinceEpoch, now, job, snapshots, query.eventMonitor,
                    query.resultMonitor);
        } else {
            currPerf = null;
        }

        if (perf == null || perf.anyActivity(currPerf)) {
            lastActivityMsSinceEpoch = now;
        }

        if (options.isStreaming() && !waitingForShutdown) {
            Duration quietFor = new Duration(lastActivityMsSinceEpoch, now);
            long fatalCount = new MetricsReader(job, query.getName()).getCounterMetric("fatal");

            if (fatalCount == -1) {
                fatalCount = 0;
            }

            if (fatalCount > 0) {
                NexmarkUtils.console("ERROR: job has fatal errors, cancelling.");
                errors.add(String.format("Pipeline reported %s fatal errors", fatalCount));
                waitingForShutdown = true;
                cancelJob = true;
            } else if (configuration.debug && configuration.numEvents > 0
                    && currPerf.numEvents == configuration.numEvents && currPerf.numResults >= 0
                    && quietFor.isLongerThan(DONE_DELAY)) {
                NexmarkUtils.console("streaming query appears to have finished waiting for completion.");
                waitingForShutdown = true;
            } else if (quietFor.isLongerThan(STUCK_TERMINATE_DELAY)) {
                NexmarkUtils.console(
                        "ERROR: streaming query appears to have been stuck for %d minutes, cancelling job.",
                        quietFor.getStandardMinutes());
                errors.add(String.format("Cancelling streaming job since it appeared stuck for %d min.",
                        quietFor.getStandardMinutes()));
                waitingForShutdown = true;
                cancelJob = true;
            } else if (quietFor.isLongerThan(STUCK_WARNING_DELAY)) {
                NexmarkUtils.console("WARNING: streaming query appears to have been stuck for %d min.",
                        quietFor.getStandardMinutes());
            }

            if (cancelJob) {
                try {
                    job.cancel();
                } catch (IOException e) {
                    throw new RuntimeException("Unable to cancel main job: ", e);
                }
            }
        }

        perf = currPerf;

        boolean running = true;
        switch (state) {
        case UNKNOWN:
        case UNRECOGNIZED:
        case STOPPED:
        case RUNNING:
            // Keep going.
            break;
        case DONE:
            // All done.
            running = false;
            break;
        case CANCELLED:
            running = false;
            if (!cancelJob) {
                errors.add("Job was unexpectedly cancelled");
            }
            break;
        case FAILED:
        case UPDATED:
            // Abnormal termination.
            running = false;
            errors.add("Job was unexpectedly updated");
            break;
        }

        if (!running) {
            break;
        }

        if (lastActivityMsSinceEpoch == now) {
            NexmarkUtils.console("new perf %s", perf);
        } else {
            NexmarkUtils.console("no activity");
        }

        try {
            Thread.sleep(PERF_DELAY.getMillis());
        } catch (InterruptedException e) {
            Thread.interrupted();
            NexmarkUtils.console("Interrupted: pipeline is still running");
        }
    }

    perf.errors = errors;
    perf.snapshots = snapshots;

    if (publisherResult != null) {
        NexmarkUtils.console("Shutting down publisher pipeline.");
        try {
            if (!publisherCancelled) {
                publisherJob.cancel();
            }
            publisherJob.waitUntilFinish(Duration.standardMinutes(5));
        } catch (IOException e) {
            throw new RuntimeException("Unable to cancel publisher job: ", e);
        }
    }

    return perf;
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

private DynamoDBFibonacciRetryer getRetryDriver() {
    return new DynamoDBFibonacciRetryer(Duration.standardMinutes(DEFAULT_RETRY_DURATION));
}

From source file:org.apache.hadoop.dynamodb.IopsController.java

License:Open Source License

/**
 * This method generates a random duration between 5 and 10 minutes. This is the duration used
 * to get the updated capacity unit information from the table.
 *//* w  w  w .  ja  v  a  2 s . co  m*/
private Duration getUpdateDuration() {
    Random random = new Random(System.currentTimeMillis());
    long randomDuration = random.nextInt(5 * 60 * 1000);
    return Duration.standardMinutes(5).plus(randomDuration);
}

From source file:org.atlasapi.equiv.EquivModule.java

License:Apache License

private ContentEquivalenceUpdater.Builder<Item> standardItemUpdater(Set<Publisher> acceptablePublishers,
        Set<? extends EquivalenceScorer<Item>> scorers, Predicate<? super Broadcast> filter) {
    return ContentEquivalenceUpdater.<Item>builder()
            .withGenerators(ImmutableSet.<EquivalenceGenerator<Item>>of(
                    new BroadcastMatchingItemEquivalenceGenerator(scheduleResolver, channelResolver,
                            acceptablePublishers, Duration.standardMinutes(10), filter)))
            .withScorers(scorers).withCombiner(new NullScoreAwareAveragingCombiner<Item>())
            .withFilter(this.<Item>standardFilter())
            .withExtractor(//ww w  .j a  va 2  s.  co  m
                    PercentThresholdEquivalenceExtractor.<Item>moreThanPercent(90))
            .withHandler((EquivalenceResultHandler<Item>) new BroadcastingEquivalenceResultHandler<Item>(
                    ImmutableList.of(
                            EpisodeFilteringEquivalenceResultHandler
                                    .relaxed(new LookupWritingEquivalenceHandler<Item>(lookupWriter,
                                            acceptablePublishers), equivSummaryStore),
                            new ResultWritingEquivalenceHandler<Item>(equivalenceResultStore()),
                            new EquivalenceSummaryWritingHandler<Item>(equivSummaryStore),
                            new MessageQueueingResultHandler<Item>(equivAssertDestination(),
                                    acceptablePublishers))));
}

From source file:org.atlasapi.equiv.EquivModule.java

License:Apache License

private SourceSpecificEquivalenceUpdater roviUpdater(Publisher roviSource,
        ImmutableSet<Publisher> roviMatchPublishers) {
    SourceSpecificEquivalenceUpdater roviUpdater = SourceSpecificEquivalenceUpdater.builder(roviSource)
            .withItemUpdater(ContentEquivalenceUpdater.<Item>builder()
                    .withGenerators(ImmutableSet.of(
                            new BroadcastMatchingItemEquivalenceGenerator(scheduleResolver, channelResolver,
                                    roviMatchPublishers, Duration.standardMinutes(10)),
                            new ContainerCandidatesItemEquivalenceGenerator(contentResolver, equivSummaryStore),
                            new FilmEquivalenceGenerator(searchResolver, roviMatchPublishers, true)))
                    .withScorers(ImmutableSet.of(new TitleMatchingItemScorer(), new SequenceItemScorer()))
                    .withCombiner(new RequiredScoreFilteringCombiner<Item>(
                            new NullScoreAwareAveragingCombiner<Item>(), TitleMatchingItemScorer.NAME))
                    .withFilter(this.<Item>standardFilter())
                    .withExtractor(PercentThresholdEquivalenceExtractor.<Item>moreThanPercent(90))
                    .withHandler(new BroadcastingEquivalenceResultHandler<Item>(ImmutableList.of(
                            EpisodeFilteringEquivalenceResultHandler
                                    .strict(new LookupWritingEquivalenceHandler<Item>(lookupWriter,
                                            roviMatchPublishers), equivSummaryStore),
                            new ResultWritingEquivalenceHandler<Item>(equivalenceResultStore()),
                            new EquivalenceSummaryWritingHandler<Item>(equivSummaryStore))))
                    .build())/*from   w  w  w. j  av  a  2  s.com*/
            .withNonTopLevelContainerUpdater(NullEquivalenceUpdater.<Container>get())
            .withTopLevelContainerUpdater(topLevelContainerUpdater(roviMatchPublishers)).build();
    return roviUpdater;
}