Example usage for org.joda.time Duration Duration

List of usage examples for org.joda.time Duration Duration

Introduction

In this page you can find the example usage for org.joda.time Duration Duration.

Prototype

public Duration(ReadableInstant start, ReadableInstant end) 

Source Link

Document

Creates a duration from the given interval endpoints.

Usage

From source file:io.cassandrareaper.resources.view.RepairRunStatus.java

License:Apache License

public RepairRunStatus(UUID runId, String clusterName, String keyspaceName, Collection<String> columnFamilies,
        int segmentsRepaired, int totalSegments, RepairRun.RunState state, DateTime startTime, DateTime endTime,
        String cause, String owner, String lastEvent, DateTime creationTime, DateTime pauseTime,
        double intensity, boolean incrementalRepair, RepairParallelism repairParallelism,
        Collection<String> nodes, Collection<String> datacenters, Collection<String> blacklistedTables,
        int repairThreadCount) {

    this.id = runId;
    this.cause = cause;
    this.owner = owner;
    this.clusterName = clusterName;
    this.columnFamilies = columnFamilies;
    this.keyspaceName = keyspaceName;
    this.state = state;
    this.creationTime = creationTime;
    this.startTime = startTime;
    this.endTime = endTime;
    this.pauseTime = pauseTime;
    this.currentTime = DateTime.now();
    this.intensity = roundDoubleNicely(intensity);
    this.incrementalRepair = incrementalRepair;
    this.totalSegments = totalSegments;
    this.repairParallelism = repairParallelism;
    this.segmentsRepaired = segmentsRepaired;
    this.lastEvent = lastEvent;

    this.nodes = nodes;
    this.datacenters = datacenters;
    this.blacklistedTables = blacklistedTables;
    this.repairThreadCount = repairThreadCount;

    if (startTime == null) {
        duration = null;/*  www  . ja  va  2 s .c  o  m*/
    } else {
        if (state == RepairRun.RunState.RUNNING || state == RepairRun.RunState.PAUSED) {
            duration = DurationFormatUtils.formatDurationWords(
                    new Duration(startTime.toInstant(), currentTime.toInstant()).getMillis(), true, false);
        } else if (state == RepairRun.RunState.ABORTED) {
            duration = DurationFormatUtils.formatDurationWords(
                    new Duration(startTime.toInstant(), pauseTime.toInstant()).getMillis(), true, false);
        } else if (endTime != null) {
            duration = DurationFormatUtils.formatDurationWords(
                    new Duration(startTime.toInstant(), endTime.toInstant()).getMillis(), true, false);
        } else {
            duration = null;
        }
    }

    if (startTime == null) {
        estimatedTimeOfArrival = null;
    } else {
        if (state == RepairRun.RunState.ERROR || state == RepairRun.RunState.DELETED
                || state == RepairRun.RunState.ABORTED || segmentsRepaired == 0) {
            estimatedTimeOfArrival = null;
        } else {
            long now = DateTime.now().getMillis();
            long currentDuration = now - startTime.getMillis();
            long millisecondsPerSegment = currentDuration / segmentsRepaired;
            int segmentsLeft = totalSegments - segmentsRepaired;
            estimatedTimeOfArrival = new DateTime(now + millisecondsPerSegment * segmentsLeft);
        }
    }
}

From source file:io.druid.indexing.coordinator.scaling.ResourceManagementScheduler.java

License:Open Source License

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }//from   ww w.  jav  a  2  s .c  om

        log.info("Started Resource Management Scheduler");

        ScheduledExecutors.scheduleAtFixedRate(exec, config.getProvisionPeriod().toStandardDuration(),
                new Runnable() {
                    @Override
                    public void run() {
                        resourceManagementStrategy.doProvision(taskRunner.getPendingTasks(),
                                taskRunner.getWorkers());
                    }
                });

        // Schedule termination of worker nodes periodically
        Period period = config.getTerminatePeriod();
        PeriodGranularity granularity = new PeriodGranularity(period, config.getOriginTime(), null);
        final long startTime = granularity.next(granularity.truncate(new DateTime().getMillis()));

        ScheduledExecutors.scheduleAtFixedRate(exec, new Duration(System.currentTimeMillis(), startTime),
                config.getTerminatePeriod().toStandardDuration(), new Runnable() {
                    @Override
                    public void run() {
                        resourceManagementStrategy.doTerminate(taskRunner.getPendingTasks(),
                                taskRunner.getWorkers());
                    }
                });

        started = true;
    }
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    final WorkerSetupData workerSetupData = workerSetupdDataRef.get();

    final String minVersion = workerSetupData.getMinVersion() == null ? config.getWorkerVersion()
            : workerSetupData.getMinVersion();
    int maxNumWorkers = workerSetupData.getMaxNumWorkers();

    int currValidWorkers = 0;
    for (ZkWorker zkWorker : zkWorkers) {
        if (zkWorker.isValidVersion(minVersion)) {
            currValidWorkers++;/*from w  ww  . j av  a 2s .c o  m*/
        }
    }

    if (currValidWorkers >= maxNumWorkers) {
        log.debug("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }

    List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(Lists
            .newArrayList(Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
            autoScalingStrategy.terminate(nodeIps);
            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override// ww w. j ava2  s  .  co m
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config.getWorkerIdleTimeout()
                                        .getMillis();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

private boolean hasTaskPendingBeyondThreshold(Collection<RemoteTaskRunnerWorkItem> pendingTasks) {
    long now = System.currentTimeMillis();
    for (TaskRunnerWorkItem pendingTask : pendingTasks) {
        final Duration durationSinceInsertion = new Duration(pendingTask.getQueueInsertionTime().getMillis(),
                now);/*w  w  w .  j  a v a  2  s.co m*/
        final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
        if (durationSinceInsertion.isEqual(timeoutDuration)
                || durationSinceInsertion.isLongerThan(timeoutDuration)) {
            return true;
        }
    }
    return false;
}

From source file:io.druid.indexing.overlord.autoscaling.AbstractWorkerResourceManagementStrategy.java

License:Apache License

@Override
public void startManagement(final WorkerTaskRunner runner) {
    if (!lifecycleLock.canStart()) {
        return;//from  w  w  w .j  a  va 2 s.  c om
    }
    try {

        log.info("Started Resource Management Scheduler");

        ScheduledExecutors.scheduleAtFixedRate(exec,
                resourceManagementSchedulerConfig.getProvisionPeriod().toStandardDuration(), new Runnable() {
                    @Override
                    public void run() {
                        // Any Errors are caught by ScheduledExecutors
                        doProvision(runner);
                    }
                });

        // Schedule termination of worker nodes periodically
        Period period = resourceManagementSchedulerConfig.getTerminatePeriod();
        PeriodGranularity granularity = new PeriodGranularity(period,
                resourceManagementSchedulerConfig.getOriginTime(), null);
        final long startTime = granularity.bucketEnd(new DateTime()).getMillis();

        ScheduledExecutors.scheduleAtFixedRate(exec, new Duration(System.currentTimeMillis(), startTime),
                resourceManagementSchedulerConfig.getTerminatePeriod().toStandardDuration(), new Runnable() {
                    @Override
                    public void run() {
                        // Any Errors are caught by ScheduledExecutors
                        doTerminate(runner);
                    }
                });
        lifecycleLock.started();
    } finally {
        lifecycleLock.exitStart();
    }
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doProvision(WorkerTaskRunner runner) {
    Collection<Task> pendingTasks = runner.getPendingTaskPayloads();
    Collection<ImmutableWorkerInfo> workers = runner.getWorkers();
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.error("No workerConfig available, cannot provision new workers.");
            return false;
        }// www .  j av  a2 s . c o m

        final Collection<String> workerNodeIds = getWorkerNodeIDs(
                Collections2.transform(workers, new Function<ImmutableWorkerInfo, Worker>() {
                    @Override
                    public Worker apply(ImmutableWorkerInfo input) {
                        return input.getWorker();
                    }
                }), workerConfig);
        currentlyProvisioning.removeAll(workerNodeIds);
        if (currentlyProvisioning.isEmpty()) {
            int want = getScaleUpNodeCount(runner.getConfig(), workerConfig, pendingTasks, workers);
            while (want > 0) {
                final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
                final List<String> newNodes = provisioned == null ? ImmutableList.<String>of()
                        : provisioned.getNodeIds();
                if (newNodes.isEmpty()) {
                    log.warn("NewNodes is empty, returning from provision loop");
                    break;
                } else {
                    currentlyProvisioning.addAll(newNodes);
                    lastProvisionTime = new DateTime();
                    scalingStats.addProvisionEvent(provisioned);
                    want -= provisioned.getNodeIds().size();
                    didProvision = true;
                }
            }
        } else {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);
            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doTerminate(WorkerTaskRunner runner) {
    Collection<ImmutableWorkerInfo> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }/*from w w w  .  j  a v  a 2  s.com*/

        if (!currentlyProvisioning.isEmpty()) {
            log.debug("Already provisioning nodes, Not Terminating any nodes.");
            return false;
        }

        boolean didTerminate = false;
        final Collection<String> workerNodeIds = getWorkerNodeIDs(runner.getLazyWorkers(), workerConfig);
        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        if (currentlyTerminating.isEmpty()) {
            final int maxWorkersToTerminate = maxWorkersToTerminate(zkWorkers, workerConfig);
            final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil
                    .createLazyWorkerPredicate(config);
            final List<String> laziestWorkerIps = Lists.newArrayList(
                    Collections2.transform(runner.markWorkersLazy(isLazyWorker, maxWorkersToTerminate),
                            new Function<Worker, String>() {
                                @Override
                                public String apply(Worker zkWorker) {
                                    return zkWorker.getIp();
                                }
                            }));
            if (laziestWorkerIps.isEmpty()) {
                log.debug("Found no lazy workers");
            } else {
                log.info("Terminating %,d lazy workers: %s", laziestWorkerIps.size(),
                        Joiner.on(", ").join(laziestWorkerIps));

                final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                if (terminated != null) {
                    currentlyTerminating.addAll(terminated.getNodeIds());
                    lastTerminateTime = new DateTime();
                    scalingStats.addTerminateEvent(terminated);
                    didTerminate = true;
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.ResourceManagementScheduler.java

License:Apache License

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }//from   ww  w  . j a  v a2s . c o m

        log.info("Started Resource Management Scheduler");

        ScheduledExecutors.scheduleAtFixedRate(exec, config.getProvisionPeriod().toStandardDuration(),
                new Runnable() {
                    @Override
                    public void run() {
                        resourceManagementStrategy.doProvision(taskRunner);
                    }
                });

        // Schedule termination of worker nodes periodically
        Period period = config.getTerminatePeriod();
        PeriodGranularity granularity = new PeriodGranularity(period, config.getOriginTime(), null);
        final long startTime = granularity.next(granularity.truncate(new DateTime().getMillis()));

        ScheduledExecutors.scheduleAtFixedRate(exec, new Duration(System.currentTimeMillis(), startTime),
                config.getTerminatePeriod().toStandardDuration(), new Runnable() {
                    @Override
                    public void run() {
                        resourceManagementStrategy.doTerminate(taskRunner);
                    }
                });

        started = true;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleResourceManagementStrategy.java

License:Apache License

@Override
public boolean doProvision(RemoteTaskRunner runner) {
    Collection<RemoteTaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    Collection<ZkWorker> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.warn("No workerConfig available, cannot provision new workers.");
            return false;
        }/*from   w  w w  .j  a v  a  2 s  . co  m*/
        final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config);
        final int currValidWorkers = Collections2.filter(zkWorkers, isValidWorker).size();

        final List<String> workerNodeIds = workerConfig.getAutoScaler().ipToIdLookup(Lists.newArrayList(
                Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerConfig, pendingTasks, zkWorkers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}