Example usage for org.joda.time Duration isLongerThan

List of usage examples for org.joda.time Duration isLongerThan

Introduction

In this page you can find the example usage for org.joda.time Duration isLongerThan.

Prototype

public boolean isLongerThan(ReadableDuration duration) 

Source Link

Document

Is the length of this duration longer than the duration passed in.

Usage

From source file:com.google.cloud.dataflow.sdk.util.FluentBackoff.java

License:Apache License

/**
 * Returns a copy of this {@link FluentBackoff} that limits the total time spent in backoff
 * returned across all calls to {@link BackOff#nextBackOffMillis()}.
 *
 * <p>Does not modify this object.
 *
 * @see FluentBackoff//w  w w.j av  a  2s. c o  m
 */
public FluentBackoff withMaxCumulativeBackoff(Duration maxCumulativeBackoff) {
    checkArgument(maxCumulativeBackoff.isLongerThan(Duration.ZERO),
            "maxCumulativeBackoff %s must be at least 1 millisecond", maxCumulativeBackoff);
    return new FluentBackoff(exponent, initialBackoff, maxBackoff, maxCumulativeBackoff, maxRetries);
}

From source file:com.google.cloud.pubsub.PollingSubscriberConnection.java

License:Open Source License

private void pullMessages(final Duration backoff) {
    ListenableFuture<PullResponse> pullResult = stub
            .withDeadlineAfter(DEFAULT_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS)
            .pull(PullRequest.newBuilder().setSubscription(subscription).setMaxMessages(DEFAULT_MAX_MESSAGES)
                    .setReturnImmediately(true).build());

    Futures.addCallback(pullResult, new FutureCallback<PullResponse>() {
        @Override//from   w  w w  .  ja v  a2 s.  c o  m
        public void onSuccess(PullResponse pullResponse) {
            processReceivedMessages(pullResponse.getReceivedMessagesList());
            if (pullResponse.getReceivedMessagesCount() == 0) {
                // No messages in response, possibly caught up in backlog, we backoff to avoid 
                // slamming the server.
                executor.schedule(new Runnable() {
                    @Override
                    public void run() {
                        Duration newBackoff = backoff.multipliedBy(2);
                        if (newBackoff.isLongerThan(MAX_BACKOFF)) {
                            newBackoff = MAX_BACKOFF;
                        }
                        pullMessages(newBackoff);
                    }
                }, backoff.getMillis(), TimeUnit.MILLISECONDS);
                return;
            }
            pullMessages(INITIAL_BACKOFF);
        }

        @Override
        public void onFailure(Throwable cause) {
            if (!(cause instanceof StatusRuntimeException)
                    || isRetryable(((StatusRuntimeException) cause).getStatus())) {
                logger.error("Failed to pull messages (recoverable): " + cause.getMessage(), cause);
                executor.schedule(new Runnable() {
                    @Override
                    public void run() {
                        Duration newBackoff = backoff.multipliedBy(2);
                        if (newBackoff.isLongerThan(MAX_BACKOFF)) {
                            newBackoff = MAX_BACKOFF;
                        }
                        pullMessages(newBackoff);
                    }
                }, backoff.getMillis(), TimeUnit.MILLISECONDS);
                return;
            }
            notifyFailed(cause);
        }
    });
}

From source file:com.google.cloud.pubsub.spi.v1.PollingSubscriberConnection.java

License:Open Source License

private void pullMessages(final Duration backoff) {
    ListenableFuture<PullResponse> pullResult = stub
            .withDeadlineAfter(DEFAULT_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS)
            .pull(PullRequest.newBuilder().setSubscription(subscription).setMaxMessages(DEFAULT_MAX_MESSAGES)
                    .setReturnImmediately(true).build());

    Futures.addCallback(pullResult, new FutureCallback<PullResponse>() {
        @Override//from   w  w w.  j  a v a  2 s. c o  m
        public void onSuccess(PullResponse pullResponse) {
            messageDispatcher.processReceivedMessages(pullResponse.getReceivedMessagesList());
            if (pullResponse.getReceivedMessagesCount() == 0) {
                // No messages in response, possibly caught up in backlog, we backoff to avoid
                // slamming the server.
                executor.schedule(new Runnable() {
                    @Override
                    public void run() {
                        Duration newBackoff = backoff.multipliedBy(2);
                        if (newBackoff.isLongerThan(MAX_BACKOFF)) {
                            newBackoff = MAX_BACKOFF;
                        }
                        pullMessages(newBackoff);
                    }
                }, backoff.getMillis(), TimeUnit.MILLISECONDS);
                return;
            }
            pullMessages(INITIAL_BACKOFF);
        }

        @Override
        public void onFailure(Throwable cause) {
            if (!(cause instanceof StatusRuntimeException)
                    || isRetryable(((StatusRuntimeException) cause).getStatus())) {
                logger.log(Level.SEVERE, "Failed to pull messages (recoverable): ", cause);
                executor.schedule(new Runnable() {
                    @Override
                    public void run() {
                        Duration newBackoff = backoff.multipliedBy(2);
                        if (newBackoff.isLongerThan(MAX_BACKOFF)) {
                            newBackoff = MAX_BACKOFF;
                        }
                        pullMessages(newBackoff);
                    }
                }, backoff.getMillis(), TimeUnit.MILLISECONDS);
                return;
            }
            notifyFailed(cause);
        }
    });
}

From source file:com.mastfrog.acteur.util.CacheControl.java

License:Open Source License

public boolean isExpired() {
    if (contains(CacheControlTypes.no_cache) || contains(CacheControlTypes.no_store)) {
        return true;
    }/*from  ww  w  .j a  v  a2s  .co  m*/
    Long maxAgeSeconds = get(CacheControlTypes.max_age);
    if (maxAgeSeconds != null) {
        Duration dur = new Duration(new DateTime(), creationTime);
        Duration target = Duration.standardSeconds(maxAgeSeconds);
        if (dur.isLongerThan(target)) {
            return true;
        }
    }
    return false;
}

From source file:com.metamx.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    final WorkerSetupData workerSetupData = workerSetupdDataRef.get();

    final String minVersion = workerSetupData.getMinVersion() == null ? config.getWorkerVersion()
            : workerSetupData.getMinVersion();
    int maxNumWorkers = workerSetupData.getMaxNumWorkers();

    int currValidWorkers = 0;
    for (ZkWorker zkWorker : zkWorkers) {
        if (zkWorker.isValidVersion(minVersion)) {
            currValidWorkers++;//from  w  w w  .j a v a 2 s  . c o m
        }
    }

    if (currValidWorkers >= maxNumWorkers) {
        log.debug("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }

    List<String> workerNodeIds = autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
            autoScalingStrategy.terminate(nodeIps);
            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:com.metamx.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override//  www  . j a  va 2 s .  c o  m
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config
                                        .getMaxWorkerIdleTimeMillisBeforeDeletion();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}

From source file:com.metamx.druid.merger.coordinator.RemoteTaskRunner.java

License:Open Source License

@LifecycleStart
public void start() {
    try {/*from w w  w .  j  av  a  2 s  .  c  om*/
        workerPathCache.getListenable().addListener(new PathChildrenCacheListener() {
            @Override
            public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event)
                    throws Exception {
                if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                    final Worker worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    log.info("New worker[%s] found!", worker.getHost());
                    addWorker(worker);
                } else if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_REMOVED)) {
                    final Worker worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    log.info("Worker[%s] removed!", worker.getHost());
                    removeWorker(worker);
                }
            }
        });
        workerPathCache.start();

        // Schedule termination of worker nodes periodically
        Period period = new Period(config.getTerminateResourcesDuration());
        PeriodGranularity granularity = new PeriodGranularity(period,
                config.getTerminateResourcesOriginDateTime(), null);
        final long startTime = granularity.next(granularity.truncate(new DateTime().getMillis()));

        ScheduledExecutors.scheduleAtFixedRate(scheduledExec,
                new Duration(System.currentTimeMillis(), startTime), config.getTerminateResourcesDuration(),
                new Runnable() {
                    @Override
                    public void run() {
                        if (currentlyTerminating.isEmpty()) {
                            if (zkWorkers.size() <= workerSetupManager.getWorkerSetupData()
                                    .getMinNumWorkers()) {
                                return;
                            }

                            int workerCount = 0;
                            List<WorkerWrapper> thoseLazyWorkers = Lists.newArrayList();
                            for (WorkerWrapper workerWrapper : zkWorkers.values()) {
                                workerCount++;

                                if (workerCount > workerSetupManager.getWorkerSetupData().getMinNumWorkers()
                                        && workerWrapper.getRunningTasks().isEmpty()
                                        && System.currentTimeMillis()
                                                - workerWrapper.getLastCompletedTaskTime().getMillis() > config
                                                        .getMaxWorkerIdleTimeMillisBeforeDeletion()) {
                                    thoseLazyWorkers.add(workerWrapper);
                                }
                            }

                            AutoScalingData terminated = strategy.terminate(
                                    Lists.transform(thoseLazyWorkers, new Function<WorkerWrapper, String>() {
                                        @Override
                                        public String apply(WorkerWrapper input) {
                                            return input.getWorker().getIp();
                                        }
                                    }));

                            if (terminated != null) {
                                currentlyTerminating.addAll(terminated.getNodeIds());
                                lastTerminateTime = new DateTime();
                            }
                        } else {
                            Duration durSinceLastTerminate = new Duration(new DateTime(), lastTerminateTime);
                            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration())) {
                                log.makeAlert("Worker node termination taking too long")
                                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                                        .addData("terminatingCount", currentlyTerminating.size()).emit();
                            }

                            log.info(
                                    "%s still terminating. Wait for all nodes to terminate before trying again.",
                                    currentlyTerminating);
                        }
                    }
                });
        started = true;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.metamx.druid.merger.coordinator.RemoteTaskRunner.java

License:Open Source License

private WorkerWrapper findWorkerForTask() {
    try {/*from  www .j  av  a  2  s.  c  o m*/
        final MinMaxPriorityQueue<WorkerWrapper> workerQueue = MinMaxPriorityQueue
                .<WorkerWrapper>orderedBy(new Comparator<WorkerWrapper>() {
                    @Override
                    public int compare(WorkerWrapper w1, WorkerWrapper w2) {
                        return -Ints.compare(w1.getRunningTasks().size(), w2.getRunningTasks().size());
                    }
                }).create(FunctionalIterable.create(zkWorkers.values()).filter(new Predicate<WorkerWrapper>() {
                    @Override
                    public boolean apply(WorkerWrapper input) {
                        return (!input.isAtCapacity() && input.getWorker().getVersion()
                                .compareTo(workerSetupManager.getWorkerSetupData().getMinVersion()) >= 0);
                    }
                }));

        if (workerQueue.isEmpty()) {
            log.info("Worker nodes do not have capacity to run any more tasks!");

            if (currentlyProvisioning.isEmpty()) {
                AutoScalingData provisioned = strategy.provision();
                if (provisioned != null) {
                    currentlyProvisioning.addAll(provisioned.getNodeIds());
                    lastProvisionTime = new DateTime();
                }
            } else {
                Duration durSinceLastProvision = new Duration(new DateTime(), lastProvisionTime);
                if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
                    log.makeAlert("Worker node provisioning taking too long")
                            .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                            .addData("provisioningCount", currentlyProvisioning.size()).emit();
                }

                log.info(
                        "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker.",
                        currentlyProvisioning);
            }
            return null;
        }

        return workerQueue.peek();
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.metamx.druid.merger.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<TaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    if (zkWorkers.size() >= workerSetupdDataRef.get().getMaxNumWorkers()) {
        log.info("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }/* w  w  w.  j  ava 2 s . c om*/

    List<String> workerNodeIds = autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:com.metamx.druid.merger.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<TaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override//from w ww.ja va 2  s. com
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config
                                        .getMaxWorkerIdleTimeMillisBeforeDeletion();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}