Example usage for org.joda.time Duration getMillis

List of usage examples for org.joda.time Duration getMillis

Introduction

In this page you can find the example usage for org.joda.time Duration getMillis.

Prototype

public long getMillis() 

Source Link

Document

Gets the length of this duration in milliseconds.

Usage

From source file:io.druid.indexing.materializedview.MaterializedViewSupervisor.java

License:Apache License

@Override
public void start() {
    synchronized (stateLock) {
        Preconditions.checkState(!started, "already started");

        DataSourceMetadata metadata = metadataStorageCoordinator.getDataSourceMetadata(dataSource);
        if (null == metadata) {
            metadataStorageCoordinator.insertDataSourceMetadata(dataSource, new DerivativeDataSourceMetadata(
                    spec.getBaseDataSource(), spec.getDimensions(), spec.getMetrics()));
        }/* ww w .  j a  v a  2s. c  o m*/
        exec = MoreExecutors.listeningDecorator(Execs.scheduledSingleThreaded(supervisorId));
        final Duration delay = config.getTaskCheckDuration().toStandardDuration();
        future = exec.scheduleWithFixedDelay(new Runnable() {
            @Override
            public void run() {
                try {
                    DataSourceMetadata metadata = metadataStorageCoordinator.getDataSourceMetadata(dataSource);
                    if (metadata instanceof DerivativeDataSourceMetadata
                            && spec.getBaseDataSource()
                                    .equals(((DerivativeDataSourceMetadata) metadata).getBaseDataSource())
                            && spec.getDimensions()
                                    .equals(((DerivativeDataSourceMetadata) metadata).getDimensions())
                            && spec.getMetrics()
                                    .equals(((DerivativeDataSourceMetadata) metadata).getMetrics())) {
                        checkSegmentsAndSubmitTasks();
                    } else {
                        log.error(
                                "Failed to start %s. Metadata in database(%s) is different from new dataSource metadata(%s)",
                                supervisorId, metadata, spec);
                    }
                } catch (Exception e) {
                    log.makeAlert(e, StringUtils.format("uncaught exception in %s.", supervisorId)).emit();
                }
            }
        }, 0, delay.getMillis(), TimeUnit.MILLISECONDS);
        started = true;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doProvision(WorkerTaskRunner runner) {
    Collection<Task> pendingTasks = runner.getPendingTaskPayloads();
    Collection<ImmutableWorkerInfo> workers = runner.getWorkers();
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.error("No workerConfig available, cannot provision new workers.");
            return false;
        }/*from  w ww. j  a va 2 s .c  o m*/

        final Collection<String> workerNodeIds = getWorkerNodeIDs(
                Collections2.transform(workers, new Function<ImmutableWorkerInfo, Worker>() {
                    @Override
                    public Worker apply(ImmutableWorkerInfo input) {
                        return input.getWorker();
                    }
                }), workerConfig);
        currentlyProvisioning.removeAll(workerNodeIds);
        if (currentlyProvisioning.isEmpty()) {
            int want = getScaleUpNodeCount(runner.getConfig(), workerConfig, pendingTasks, workers);
            while (want > 0) {
                final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
                final List<String> newNodes = provisioned == null ? ImmutableList.<String>of()
                        : provisioned.getNodeIds();
                if (newNodes.isEmpty()) {
                    log.warn("NewNodes is empty, returning from provision loop");
                    break;
                } else {
                    currentlyProvisioning.addAll(newNodes);
                    lastProvisionTime = new DateTime();
                    scalingStats.addProvisionEvent(provisioned);
                    want -= provisioned.getNodeIds().size();
                    didProvision = true;
                }
            }
        } else {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);
            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.PendingTaskBasedWorkerResourceManagementStrategy.java

License:Apache License

@Override
public boolean doTerminate(WorkerTaskRunner runner) {
    Collection<ImmutableWorkerInfo> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }/*from  ww  w  .  j av  a  2s . c  o  m*/

        if (!currentlyProvisioning.isEmpty()) {
            log.debug("Already provisioning nodes, Not Terminating any nodes.");
            return false;
        }

        boolean didTerminate = false;
        final Collection<String> workerNodeIds = getWorkerNodeIDs(runner.getLazyWorkers(), workerConfig);
        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        if (currentlyTerminating.isEmpty()) {
            final int maxWorkersToTerminate = maxWorkersToTerminate(zkWorkers, workerConfig);
            final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil
                    .createLazyWorkerPredicate(config);
            final List<String> laziestWorkerIps = Lists.newArrayList(
                    Collections2.transform(runner.markWorkersLazy(isLazyWorker, maxWorkersToTerminate),
                            new Function<Worker, String>() {
                                @Override
                                public String apply(Worker zkWorker) {
                                    return zkWorker.getIp();
                                }
                            }));
            if (laziestWorkerIps.isEmpty()) {
                log.debug("Found no lazy workers");
            } else {
                log.info("Terminating %,d lazy workers: %s", laziestWorkerIps.size(),
                        Joiner.on(", ").join(laziestWorkerIps));

                final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                if (terminated != null) {
                    currentlyTerminating.addAll(terminated.getNodeIds());
                    lastTerminateTime = new DateTime();
                    scalingStats.addTerminateEvent(terminated);
                    didTerminate = true;
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleResourceManagementStrategy.java

License:Apache License

@Override
public boolean doProvision(RemoteTaskRunner runner) {
    Collection<RemoteTaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    Collection<ZkWorker> zkWorkers = runner.getWorkers();
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.warn("No workerConfig available, cannot provision new workers.");
            return false;
        }// ww w . j a v  a 2  s .c  o m
        final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config);
        final int currValidWorkers = Collections2.filter(zkWorkers, isValidWorker).size();

        final List<String> workerNodeIds = workerConfig.getAutoScaler().ipToIdLookup(Lists.newArrayList(
                Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerConfig, pendingTasks, zkWorkers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleResourceManagementStrategy.java

License:Apache License

@Override
public boolean doTerminate(RemoteTaskRunner runner) {
    Collection<RemoteTaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }/*from w  w w .  j  a v a2  s.  co  m*/

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(workerConfig.getAutoScaler().ipToIdLookup(Lists
                .newArrayList(Iterables.transform(runner.getLazyWorkers(), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        Collection<ZkWorker> workers = runner.getWorkers();
        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        if (currentlyTerminating.isEmpty()) {

            final int excessWorkers = (workers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config);
                final List<String> laziestWorkerIps = Lists.transform(
                        runner.markWorkersLazy(isLazyWorker, excessWorkers), new Function<ZkWorker, String>() {
                            @Override
                            public String apply(ZkWorker zkWorker) {
                                return zkWorker.getWorker().getIp();
                            }
                        });
                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = workerConfig.getAutoScaler().terminate(laziestWorkerIps);
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleWorkerResourceManagementStrategy.java

License:Apache License

protected boolean doProvision(WorkerTaskRunner runner) {
    Collection<? extends TaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    Collection<ImmutableWorkerInfo> workers = getWorkers(runner);
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null || workerConfig.getAutoScaler() == null) {
            log.warn("No workerConfig available, cannot provision new workers.");
            return false;
        }//w  w  w  .  ja  v  a2s  .co m

        final Predicate<ImmutableWorkerInfo> isValidWorker = ResourceManagementUtil
                .createValidWorkerPredicate(config);
        final int currValidWorkers = Collections2.filter(workers, isValidWorker).size();

        final List<String> workerNodeIds = workerConfig.getAutoScaler().ipToIdLookup(
                Lists.newArrayList(Iterables.transform(workers, new Function<ImmutableWorkerInfo, String>() {
                    @Override
                    public String apply(ImmutableWorkerInfo input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = workerConfig.getAutoScaler().provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                workerConfig.getAutoScaler().terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.autoscaling.SimpleWorkerResourceManagementStrategy.java

License:Apache License

boolean doTerminate(WorkerTaskRunner runner) {
    Collection<? extends TaskRunnerWorkItem> pendingTasks = runner.getPendingTasks();
    synchronized (lock) {
        final WorkerBehaviorConfig workerConfig = workerConfigRef.get();
        if (workerConfig == null) {
            log.warn("No workerConfig available, cannot terminate workers.");
            return false;
        }//from   w  w  w  . j a va2s . co  m

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(workerConfig.getAutoScaler().ipToIdLookup(
                Lists.newArrayList(Iterables.transform(runner.getLazyWorkers(), new Function<Worker, String>() {
                    @Override
                    public String apply(Worker input) {
                        return input.getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        Collection<ImmutableWorkerInfo> workers = getWorkers(runner);
        updateTargetWorkerCount(workerConfig, pendingTasks, workers);

        if (currentlyTerminating.isEmpty()) {

            final int excessWorkers = (workers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final Predicate<ImmutableWorkerInfo> isLazyWorker = ResourceManagementUtil
                        .createLazyWorkerPredicate(config);
                final Collection<String> laziestWorkerIps = Collections2.transform(
                        runner.markWorkersLazy(isLazyWorker, excessWorkers), new Function<Worker, String>() {
                            @Override
                            public String apply(Worker worker) {
                                return worker.getIp();
                            }
                        });
                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = workerConfig.getAutoScaler()
                            .terminate(ImmutableList.copyOf(laziestWorkerIps));
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.indexing.overlord.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    synchronized (lock) {
        boolean didProvision = false;
        final WorkerSetupData workerSetupData = workerSetupDataRef.get();
        if (workerSetupData == null) {
            log.warn("No workerSetupData available, cannot provision new workers.");
            return false;
        }//from  w  w  w .ja va  2s .  c  om
        final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config, workerSetupData);
        final int currValidWorkers = Collections2.filter(zkWorkers, isValidWorker).size();

        final List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(Lists.newArrayList(
                Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                })));
        currentlyProvisioning.removeAll(workerNodeIds);

        updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);

        int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
        while (want > 0) {
            final AutoScalingData provisioned = autoScalingStrategy.provision();
            final List<String> newNodes;
            if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
                break;
            } else {
                currentlyProvisioning.addAll(newNodes);
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);
                want -= provisioned.getNodeIds().size();
                didProvision = true;
            }
        }

        if (!currentlyProvisioning.isEmpty()) {
            Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

            log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);

            if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node provisioning taking too long!")
                        .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                        .addData("provisioningCount", currentlyProvisioning.size()).emit();

                autoScalingStrategy.terminateWithIds(Lists.newArrayList(currentlyProvisioning));
                currentlyProvisioning.clear();
            }
        }

        return didProvision;
    }
}

From source file:io.druid.indexing.overlord.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    synchronized (lock) {
        final WorkerSetupData workerSetupData = workerSetupDataRef.get();
        if (workerSetupData == null) {
            log.warn("No workerSetupData available, cannot terminate workers.");
            return false;
        }//from   ww  w .ja  v a  2 s  . c o  m

        boolean didTerminate = false;
        final Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy.ipToIdLookup(
                Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }))));

        final Set<String> stillExisting = Sets.newHashSet();
        for (String s : currentlyTerminating) {
            if (workerNodeIds.contains(s)) {
                stillExisting.add(s);
            }
        }
        currentlyTerminating.clear();
        currentlyTerminating.addAll(stillExisting);

        updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);

        final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config, workerSetupData);
        if (currentlyTerminating.isEmpty()) {
            final int excessWorkers = (zkWorkers.size() + currentlyProvisioning.size()) - targetWorkerCount;
            if (excessWorkers > 0) {
                final List<String> laziestWorkerIps = FluentIterable.from(zkWorkers).filter(isLazyWorker)
                        .limit(excessWorkers).transform(new Function<ZkWorker, String>() {
                            @Override
                            public String apply(ZkWorker zkWorker) {
                                return zkWorker.getWorker().getIp();
                            }
                        }).toList();

                if (laziestWorkerIps.isEmpty()) {
                    log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!",
                            excessWorkers);
                } else {
                    log.info("Terminating %,d workers (wanted %,d): %s", laziestWorkerIps.size(), excessWorkers,
                            Joiner.on(", ").join(laziestWorkerIps));

                    final AutoScalingData terminated = autoScalingStrategy.terminate(laziestWorkerIps);
                    if (terminated != null) {
                        currentlyTerminating.addAll(terminated.getNodeIds());
                        lastTerminateTime = new DateTime();
                        scalingStats.addTerminateEvent(terminated);
                        didTerminate = true;
                    }
                }
            }
        } else {
            Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

            log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);

            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
                log.makeAlert("Worker node termination taking too long!")
                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                        .addData("terminatingCount", currentlyTerminating.size()).emit();

                currentlyTerminating.clear();
            }
        }

        return didTerminate;
    }
}

From source file:io.druid.java.util.http.client.NettyHttpClient.java

License:Apache License

NettyHttpClient(ResourcePool<String, ChannelFuture> pool, Duration defaultReadTimeout,
        HttpClientConfig.CompressionCodec compressionCodec, Timer timer) {
    this.pool = Preconditions.checkNotNull(pool, "pool");
    this.defaultReadTimeout = defaultReadTimeout;
    this.compressionCodec = Preconditions.checkNotNull(compressionCodec);
    this.timer = timer;

    if (defaultReadTimeout != null && defaultReadTimeout.getMillis() > 0) {
        Preconditions.checkNotNull(timer, "timer");
    }/*ww  w.j  a v a2s . c o m*/
}