Example usage for org.joda.time Duration isLongerThan

List of usage examples for org.joda.time Duration isLongerThan

Introduction

In this page you can find the example usage for org.joda.time Duration isLongerThan.

Prototype

public boolean isLongerThan(ReadableDuration duration) 

Source Link

Document

Is the length of this duration longer than the duration passed in.

Usage

From source file:com.mirth.connect.server.util.PasswordRequirementsChecker.java

License:Open Source License

private String checkReusePeriod(List<Credentials> previousCredentials, String plainPassword, int reusePeriod) {
    // Return without checking if the reuse policies are off
    if (reusePeriod == 0) {
        return null;
    }//from ww  w. j  a  v a2s  . co  m

    UserController userController = ControllerFactory.getFactory().createUserController();

    // Let -1 mean the duration is infinite
    Duration reusePeriodDuration = null;
    if (reusePeriod != -1) {
        reusePeriodDuration = Duration.standardDays(reusePeriod);
    }

    for (Credentials credentials : previousCredentials) {
        boolean checkPassword = false;
        if (reusePeriodDuration == null) {
            checkPassword = true;
        } else {
            checkPassword = reusePeriodDuration.isLongerThan(
                    new Duration(credentials.getPasswordDate().getTimeInMillis(), System.currentTimeMillis()));
        }

        if (checkPassword && userController.checkPassword(plainPassword, credentials.getPassword())) {
            if (reusePeriod == -1) {
                return "You cannot reuse the same password.";
            }
            return "You cannot reuse the same password within " + reusePeriod + " days.";
        }
    }

    return null;
}

From source file:com.tomtom.speedtools.metrics.MetricsCollector.java

License:Apache License

public MetricsCollector(@Nonnull final Duration totalMetricDuration, final int maxEntries) {
    assert totalMetricDuration != null;
    assert totalMetricDuration.isLongerThan(Duration.millis(1));
    assert maxEntries > 0;
    this.totalMetricDuration = totalMetricDuration;
    this.timeSlotDuration = Duration.millis(totalMetricDuration.getMillis() / maxEntries);
    this.values = new ArrayDeque<>();
}

From source file:com.vaushell.superpipes.nodes.buffer.N_Buffer.java

License:Open Source License

private Duration getTimeToWait(final DateTime from) {
    // Best slot//w ww  .  j  av a  2  s  .  c o  m
    Duration minDuration;
    if (slots.isEmpty()) {
        minDuration = new Duration(0L);
    } else {
        minDuration = null;
        for (final Slot slot : slots) {
            final Duration duration = slot.getSmallestDiff(from);
            if (minDuration == null || duration.isShorterThan(minDuration)) {
                minDuration = duration;

                if (minDuration.getMillis() <= 0L) {
                    break;
                }
            }
        }
    }

    // Anti burst
    if (getProperties().containsKey("flow-limit") && lastWrite != null) {
        final Duration diff = new Duration(lastWrite, from);

        final Duration toAdd = getProperties().getConfigDuration("flow-limit").minus(diff);
        if (toAdd.isLongerThan(minDuration)) {
            minDuration = toAdd;
        }
    }

    // First message
    if (!messageIDs.isEmpty()) {
        final long firstID = messageIDs.first();
        final DateTime first = new DateTime(firstID);

        if (first.isAfter(from)) {
            final Duration diff = new Duration(from, first);
            if (diff.isLongerThan(minDuration)) {
                minDuration = diff;
            }
        }
    }

    // Result
    return minDuration;
}

From source file:de.sub.goobi.helper.tasks.TaskSitter.java

License:Open Source License

/**
 * The function run() examines the task list, deletes threads that have
 * died, replaces threads that are to be restarted by new copies of
 * themselves and finally starts new threads up to the given limit.
 *
 * <p>// w  w w. ja  va2 s  .c om
 * Several limits are configurable: There are both limits in number and in
 * time for successfully finished or erroneous threads which can be set in
 * the configuration. There are internal default values for these settings
 * too, which will be applied in case of missing configuration entries.
 * Since zombie processes will still occupy all their resources and arent
 * available for garbage collection, these values have been chosen rather
 * restrictive. For the limit for auto starting threads, see
 * {@link #setAutoRunningThreads(boolean)}.
 * </p>
 *
 * <p>
 * If the task list is empty, the method will exit without further delay,
 * otherwise it will initialise its variables and read the configuration.
 * Reading the configuration is done again in each iteration so
 * configuration changes will propagate here.
 * </p>
 *
 * <p>
 * Then the function iterates along the task list and takes care for each
 * task. To be able to modify the list in passing, we need a
 * {@link java.util.ListIterator} here.
 * </p>
 *
 * <p>
 * Running tasks reduce the clearance to run new tasks. (However, the
 * clearance must not become negative.) New tasks will be added to the
 * launch list, except if they have already been marked for removal, of
 * course. If a task has terminated, it is handled as specified by its
 * behaviour variable: All tasks that are marked DELETE_IMMEDIATELY will
 * instantly be disposed of; otherwise, they will be kept as long as
 * configured and only be removed if their dead body has become older. Tasks
 * marked PREPARE_FOR_RESTART will be replaced (because a
 * {@link java.lang.Thread} cannot be started a second time) by a copy of
 * them.
 * </p>
 *
 * <p>
 * If a ConcurrentModificationException arises during list examination, the
 * method will behave like a polite servant and retire silently until the
 * lordship has scarpered. This is not a pity because it will be started
 * every some seconds.
 * </p>
 *
 * </p>
 * After having finished iterating, the method will reduce the absolute
 * number of expired threads as configured. (Since new threads will be added
 * to the bottom of the list and we therefore want to remove older ones
 * top-down we cannot do this before we know their count, thus we cannot do
 * this while iterating.) Last, new threads will be started up to the
 * remaining available clearance.
 * </p>
 *
 * @see java.lang.Runnable#run()
 */
@Override
public void run() {
    TaskManager taskManager = TaskManager.singleton();
    if (taskManager.taskList.size() == 0) {
        return;
    }

    LinkedList<EmptyTask> launchableThreads = new LinkedList<>();
    LinkedList<EmptyTask> finishedThreads = new LinkedList<>();
    LinkedList<EmptyTask> failedThreads = new LinkedList<>();
    int availableClearance = autoRunLimit;

    int successfulMaxCount = ConfigCore.getIntParameter("taskManager.keepThreads.successful.count",
            KEEP_SUCCESSFUL);
    int failedMaxCount = ConfigCore.getIntParameter("taskManager.keepThreads.failed.count", KEEP_FAILED);
    Duration successfulMaxAge = ConfigCore.getDurationParameter("taskManager.keepThreads.successful.minutes",
            TimeUnit.MINUTES, KEEP_SUCCESSFUL_MINS);
    Duration failedMaxAge = ConfigCore.getDurationParameter("taskManager.keepThreads.failed.minutes",
            TimeUnit.MINUTES, KEEP_FAILED_MINS);

    ListIterator<EmptyTask> position = taskManager.taskList.listIterator();
    EmptyTask task;
    try {
        while (position.hasNext()) {
            task = position.next();
            switch (task.getTaskState()) {
            case WORKING:
            case STOPPING:
                availableClearance = Math.max(availableClearance - 1, 0);
                break;
            case NEW:
                if (Behaviour.DELETE_IMMEDIATELY.equals(task.getBehaviour())) {
                    position.remove();
                } else {
                    launchableThreads.addLast(task);
                }
                break;
            default: // cases STOPPED, FINISHED, CRASHED
                switch (task.getBehaviour()) {
                case DELETE_IMMEDIATELY:
                    position.remove();
                    break;
                default: // case KEEP_FOR_A_WHILE
                    boolean taskFinishedSuccessfully = task.getException() == null;
                    Duration durationDead = task.getDurationDead();
                    if (durationDead == null) {
                        task.setTimeOfDeath();
                    } else if (durationDead
                            .isLongerThan(taskFinishedSuccessfully ? successfulMaxAge : failedMaxAge)) {
                        position.remove();
                        break;
                    }
                    if (taskFinishedSuccessfully) {
                        finishedThreads.add(task);
                    } else {
                        failedThreads.add(task);
                    }
                    break;
                case PREPARE_FOR_RESTART:
                    EmptyTask replacement = task.replace();
                    if (replacement != null) {
                        position.set(replacement);
                        launchableThreads.addLast(replacement);
                    }
                    break;
                }
            }
        }
    } catch (ConcurrentModificationException e) {
        return;
    }

    while (finishedThreads.size() > successfulMaxCount && (task = finishedThreads.pollFirst()) != null) {
        taskManager.taskList.remove(task);
    }

    while (failedThreads.size() > failedMaxCount && (task = failedThreads.pollFirst()) != null) {
        taskManager.taskList.remove(task);
    }

    while (launchableThreads.size() > availableClearance) {
        launchableThreads.removeLast();
    }
    while ((task = launchableThreads.pollFirst()) != null) {
        task.start();
    }
}

From source file:google.registry.rde.PendingDepositChecker.java

License:Open Source License

private ImmutableSetMultimap<String, PendingDeposit> getTldsAndWatermarksPendingDeposit(RdeMode mode,
        CursorType cursorType, Duration interval, DateTime startingPoint) {
    checkArgument(interval.isLongerThan(Duration.ZERO));
    ImmutableSetMultimap.Builder<String, PendingDeposit> builder = new ImmutableSetMultimap.Builder<>();
    DateTime now = clock.nowUtc();/*from  w w w .  j  av  a2s . c om*/
    for (String tld : Registries.getTldsOfType(TldType.REAL)) {
        Registry registry = Registry.get(tld);
        if (!registry.getEscrowEnabled()) {
            continue;
        }
        // Avoid creating a transaction unless absolutely necessary.
        Cursor cursor = ofy().load().key(Cursor.createKey(cursorType, registry)).now();
        DateTime cursorValue = (cursor != null ? cursor.getCursorTime() : startingPoint);
        if (isBeforeOrAt(cursorValue, now)) {
            DateTime watermark = (cursor != null ? cursor.getCursorTime()
                    : transactionallyInitializeCursor(registry, cursorType, startingPoint));
            if (isBeforeOrAt(watermark, now)) {
                builder.put(tld, PendingDeposit.create(tld, watermark, mode, cursorType, interval));
            }
        }
    }
    return builder.build();
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    final WorkerSetupData workerSetupData = workerSetupdDataRef.get();

    final String minVersion = workerSetupData.getMinVersion() == null ? config.getWorkerVersion()
            : workerSetupData.getMinVersion();
    int maxNumWorkers = workerSetupData.getMaxNumWorkers();

    int currValidWorkers = 0;
    for (ZkWorker zkWorker : zkWorkers) {
        if (zkWorker.isValidVersion(minVersion)) {
            currValidWorkers++;//from   w  w w  .  j a v a 2s . c o  m
        }
    }

    if (currValidWorkers >= maxNumWorkers) {
        log.debug("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }

    List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(Lists
            .newArrayList(Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
            autoScalingStrategy.terminate(nodeIps);
            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override/*  w  w  w .  java  2s.co m*/
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config.getWorkerIdleTimeout()
                                        .getMillis();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

private boolean hasTaskPendingBeyondThreshold(Collection<RemoteTaskRunnerWorkItem> pendingTasks) {
    long now = System.currentTimeMillis();
    for (TaskRunnerWorkItem pendingTask : pendingTasks) {
        final Duration durationSinceInsertion = new Duration(pendingTask.getQueueInsertionTime().getMillis(),
                now);/*w  w  w. j  ava  2s . c  o m*/
        final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
        if (durationSinceInsertion.isEqual(timeoutDuration)
                || durationSinceInsertion.isLongerThan(timeoutDuration)) {
            return true;
        }
    }
    return false;
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doProvision(WorkerTaskRunner runner) {
    Collection<Task> pendingTasks = runner.getPendingTaskPayloads();
    Collection<ImmutableWorkerInfo> workers = runner.getWorkers();
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.error("No workerConfig available, cannot provision new workers.");
            return false;
        }//from  w  w  w.j a va2 s. co  m

        final Collection<String> workerNodeIds = getWorkerNodeIDs(
                Collections2.transform(workers, new Function<ImmutableWorkerInfo, Worker>() {
                    @Override
                    public Worker apply(ImmutableWorkerInfo input) {
                        return input.getWorker();
                    }
                }), workerConfig);
        currentlyProvisioning.removeAll(workerNodeIds);
        if (currentlyProvisioning.isEmpty()) {
            int want = getScaleUpNodeCount(runner.getConfig(), workerConfig, pendingTasks, workers);
            while (want > 0) {
                final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
                final List<String> newNodes = provisioned == null ? ImmutableList.<String>of()
                        : provisioned.getNodeIds();
                if (newNodes.isEmpty()) {
                    log.warn("NewNodes is empty, returning from provision loop");
                    break;
                } else {
                    currentlyProvisioning.addAll(newNodes);
                    lastProvisionTime = new DateTime();
                    scalingStats.addProvisionEvent(provisioned);
                    want -= provisioned.getNodeIds().size();
                    didProvision = true;
                }
            }
        } else {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);
            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doTerminate(WorkerTaskRunner runner) {
    Collection<ImmutableWorkerInfo> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }//from   w ww .j  av  a 2s .  c om

        if (!currentlyProvisioning.isEmpty()) {
            log.debug("Already provisioning nodes, Not Terminating any nodes.");
            return false;
        }

        boolean didTerminate = false;
        final Collection<String> workerNodeIds = getWorkerNodeIDs(runner.getLazyWorkers(), workerConfig);
        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        if (currentlyTerminating.isEmpty()) {
            final int maxWorkersToTerminate = maxWorkersToTerminate(zkWorkers, workerConfig);
            final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil
                    .createLazyWorkerPredicate(config);
            final List<String> laziestWorkerIps = Lists.newArrayList(
                    Collections2.transform(runner.markWorkersLazy(isLazyWorker, maxWorkersToTerminate),
                            new Function<Worker, String>() {
                                @Override
                                public String apply(Worker zkWorker) {
                                    return zkWorker.getIp();
                                }
                            }));
            if (laziestWorkerIps.isEmpty()) {
                log.debug("Found no lazy workers");
            } else {
                log.info("Terminating %,d lazy workers: %s", laziestWorkerIps.size(),
                        Joiner.on(", ").join(laziestWorkerIps));

                final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                if (terminated != null) {
                    currentlyTerminating.addAll(terminated.getNodeIds());
                    lastTerminateTime = new DateTime();
                    scalingStats.addTerminateEvent(terminated);
                    didTerminate = true;
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}