Example usage for org.joda.time Duration Duration

List of usage examples for org.joda.time Duration Duration

Introduction

In this page you can find the example usage for org.joda.time Duration Duration.

Prototype

public Duration(ReadableInstant start, ReadableInstant end) 

Source Link

Document

Creates a duration from the given interval endpoints.

Usage

From source file:io.druid.indexing.overlord.autoscaling.SimpleResourceManagementStrategy.java

License:Apache License

@Override
public boolean doTerminate(RemoteTaskRunner runner) {
    Collection<RemoteTaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }/*from w  w  w .j  a  v a2s .  c o m*/

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(workerConfig.getAutoScaler().ipToIdLookup(Lists
                .newArrayList(Iterables.transform(runner.getLazyWorkers(), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        Collection<ZkWorker> workers = runner.getWorkers();
        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        if (currentlyTerminating.isEmpty()) {

            final int excessWorkers = (workers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config);
                final List<String> laziestWorkerIps = Lists.transform(
                        runner.markWorkersLazy(isLazyWorker, excessWorkers), new Function<ZkWorker, String>() {
                            @Override
                            public String apply(ZkWorker zkWorker) {
                                return zkWorker.getWorker().getIp();
                            }
                        });
                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleResourceManagementStrategy.java

License:Apache License

private boolean hasTaskPendingBeyondThreshold(Collection<RemoteTaskRunnerWorkItem> pendingTasks) {
    synchronized (lock) {
        long now = System.currentTimeMillis();
        for (TaskRunnerWorkItem pendingTask : pendingTasks) {
            final Duration durationSinceInsertion = new Duration(
                    pendingTask.getQueueInsertionTime().getMillis(), now);
            final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
            if (durationSinceInsertion.isEqual(timeoutDuration)
                    || durationSinceInsertion.isLongerThan(timeoutDuration)) {
                return true;
            }//from  w w w. java2 s  .  co m
        }
        return false;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleWorkerResourceManagementStrategy.java

License:Apache License

protected boolean doProvision(WorkerTaskRunner runner) {
    Collection<? extends TaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    Collection<ImmutableWorkerInfo> workers = getWorkers(runner);
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.warn("No workerConfig available, cannot provision new workers.");
            return false;
        }/*from   ww w.  ja  v a  2  s . c  o  m*/

        final Predicate<ImmutableWorkerInfo> isValidWorker = ResourceManagementUtil
                .createValidWorkerPredicate(config);
        final int currValidWorkers = Collections2.filter(workers, isValidWorker).size();

        final List<String> workerNodeIds = workerConfig.getAutoScaler().ipToIdLookup(
                Lists.newArrayList(Iterables.transform(workers, new Function<ImmutableWorkerInfo, String>() {
                    @Override
                    public String apply(ImmutableWorkerInfo input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleWorkerResourceManagementStrategy.java

License:Apache License

boolean doTerminate(WorkerTaskRunner runner) {
    Collection<? extends TaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }/*w ww  .  j  av  a  2  s.  c  om*/

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(workerConfig.getAutoScaler().ipToIdLookup(
                Lists.newArrayList(Iterables.transform(runner.getLazyWorkers(), new Function<Worker, String>() {
                    @Override
                    public String apply(Worker input) {
                        return input.getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        Collection<ImmutableWorkerInfo> workers = getWorkers(runner);
        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        if (currentlyTerminating.isEmpty()) {

            final int excessWorkers = (workers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil
                        .createLazyWorkerPredicate(config);
                final Collection<String> laziestWorkerIps = Collections2.transform(
                        runner.markWorkersLazy(isLazyWorker, excessWorkers), new Function<Worker, String>() {
                            @Override
                            public String apply(Worker worker) {
                                return worker.getIp();
                            }
                        });
                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = workerConfig.getAutoScaler()
                            .terminate(ImmutableList.copyOf(laziestWorkerIps));
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleWorkerResourceManagementStrategy.java

License:Apache License

private boolean hasTaskPendingBeyondThreshold(Collection<? extends TaskRunnerWorkItem> pendingTasks) {
    synchronized (lock) {
        long now = System.currentTimeMillis();
        for (TaskRunnerWorkItem pendingTask : pendingTasks) {
            final Duration durationSinceInsertion = new Duration(
                    pendingTask.getQueueInsertionTime().getMillis(), now);
            final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
            if (durationSinceInsertion.isEqual(timeoutDuration)
                    || durationSinceInsertion.isLongerThan(timeoutDuration)) {
                return true;
            }//www.  j  av  a2 s .  c o m
        }
        return false;
    }
}

From source file:io.druid.indexing.overlord.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerSetupData workerSetupData = workerSetupDataRef.get();
        if (workerSetupData == null) {
            log.warn("No workerSetupData available, cannot provision new workers.");
            return false;
        }//  w w w .jav a2  s . c o m
        final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config, workerSetupData);
        final int currValidWorkers = Collections2.filter(zkWorkers, isValidWorker).size();

        final List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(Lists.newArrayList(
                Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = autoScalingStrategy.provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                autoScalingStrategy.terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    synchronized (lock) {
        final WorkerSetupData workerSetupData = workerSetupDataRef.get();
        if (workerSetupData == null) {
            log.warn("No workerSetupData available, cannot terminate workers.");
            return false;
        }/* ww w .  j a v a2 s.c  om*/

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy.ipToIdLookup(
                Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);

        final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config, workerSetupData);
        if (currentlyTerminating.isEmpty()) {
            final int excessWorkers = (zkWorkers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final List<String> laziestWorkerIps = FluentIterable.from(zkWorkers).filter(isLazyWorker)
                        .limit(excessWorkers).transform(new Function<ZkWorker, String>() {
                            @Override
                            public String apply(ZkWorker zkWorker) {
                                return zkWorker.getWorker().getIp();
                            }
                        }).toList();

                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = autoScalingStrategy.terminate(laziestWorkerIps);
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.bucketStart(new DateTime());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow).getMillis() + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingCallable<ScheduledExecutors.Signal>(StringUtils.format("%s-overseer-%d",
                    schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
                @Override//from  w w  w  .ja  v  a  2 s .c  o m
                public ScheduledExecutors.Signal doCall() {
                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    }

                    mergeAndPush();

                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                }
            });
}

From source file:io.druid.segment.realtime.plumber.FlushingPlumber.java

License:Apache License

private void startFlushThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final DateTime truncatedNow = segmentGranularity.truncate(new DateTime());
    final long windowMillis = config.getWindowPeriod().toStandardDuration().getMillis();

    log.info("Expect to run at [%s]",
            new DateTime().plus(new Duration(System.currentTimeMillis(),
                    schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis()
                            + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(flushScheduledExec,
            new Duration(System.currentTimeMillis(),
                    schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis()
                            + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-flusher-%d",
                    getSchema().getDataSource(), getConfig().getShardSpec().getPartitionNum())) {
                @Override//from   w ww. jav a  2s  . c  o  m
                public ScheduledExecutors.Signal doCall() {
                    if (stopped) {
                        log.info("Stopping flusher thread");
                        return ScheduledExecutors.Signal.STOP;
                    }

                    long minTimestamp = segmentGranularity
                            .truncate(getRejectionPolicy().getCurrMaxTime().minus(windowMillis)).getMillis();

                    List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                    for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) {
                        final Long intervalStart = entry.getKey();
                        if (intervalStart < minTimestamp) {
                            log.info("Adding entry[%s] to flush.", entry);
                            sinksToPush.add(entry);
                        }
                    }

                    for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                        flushAfterDuration(entry.getKey(), entry.getValue());
                    }

                    if (stopped) {
                        log.info("Stopping flusher thread");
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                }
            });
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

protected void startPersistThread() {
    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final Period windowPeriod = config.getWindowPeriod();

    final DateTime truncatedNow = segmentGranularity.truncate(new DateTime());
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow).getMillis() + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow).getMillis() + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                    schema.getDataSource(), config.getShardSpec().getPartitionNum())) {
                @Override//from  w  ww.ja v a2  s  .  c  om
                public ScheduledExecutors.Signal doCall() {
                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    }

                    mergeAndPush();

                    if (stopped) {
                        log.info("Stopping merge-n-push overseer thread");
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                }
            });
}