Example usage for org.joda.time Duration getMillis

List of usage examples for org.joda.time Duration getMillis

Introduction

In this page you can find the example usage for org.joda.time Duration getMillis.

Prototype

public long getMillis() 

Source Link

Document

Gets the length of this duration in milliseconds.

Usage

From source file:com.mastfrog.giulius.settings.etcd.EtcdSettings.java

License:Open Source License

/**
 * Create a Settings which takes its values from an etcd subnode.
 *
 * @param client An etcd client//from ww w. jav  a 2  s .  c  o m
 * @param namespace The subnode under which to find settings, like /mystuff
 * @param svc A thread pool to use for refreshing data
 * @param interval The frequency with which to refresh data
 * @param errs An error handler to log exceptions - don't want to dictate
 * how that is handled here
 */
@Inject
public EtcdSettings(MetaEtcdClient client, @Named(SETTINGS_KEY_ETCD_NAMESPACE) String namespace,
        @Named(GUICE_BINDING_ETCD_REFRESH_THREAD_POOL) ScheduledExecutorService svc,
        @Named(GUICE_BINDING_ETCD_REFRESH_INTERVAL) Duration interval, EtcdErrorHandler errs) {
    this.namespace = namespace.startsWith("/") ? namespace : '/' + namespace;
    pairs.set(new HashMap<String, String>());
    Refresher r = new Refresher(pairs, namespace, client, errs);
    r.run();
    svc.scheduleAtFixedRate(r, interval.getMillis(), interval.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:com.mastfrog.netty.http.client.HttpClient.java

License:Open Source License

private void submit(final URL url, HttpRequest rq, final AtomicBoolean cancelled, final ResponseFuture handle,
        final ResponseHandler<?> r, RequestInfo info, Duration timeout, boolean noAggregate) {
    if (info != null && info.isExpired()) {
        cancelled.set(true);/*from   w  ww  .j a  va 2s  .  c o m*/
    }
    if (cancelled.get()) {
        handle.event(new State.Cancelled());
        return;
    }
    try {
        for (RequestInterceptor i : interceptors) {
            rq = i.intercept(rq);
        }
        final HttpRequest req = rq;
        Bootstrap bootstrap;
        if (url.getProtocol().isSecure()) {
            bootstrap = startSsl(url.getHostAndPort());
        } else {
            bootstrap = start(url.getHostAndPort());
        }
        if (!url.isValid()) {
            throw new IllegalArgumentException(url.getProblems() + "");
        }
        TimeoutTimerTask tt = null;
        if (info == null) {
            info = new RequestInfo(url, req, cancelled, handle, r, timeout, tt, noAggregate);
            if (timeout != null) {
                tt = new TimeoutTimerTask(cancelled, handle, r, info);
                timer.schedule(tt, timeout.getMillis());
            }
            info.timer = tt;
        }
        if (info.isExpired()) {
            handle.event(new State.Timeout(info.age()));
            return;
        }
        handle.event(new State.Connecting());
        //XXX who is escaping this?
        req.setUri(req.getUri().replaceAll("%5f", "_"));
        ChannelFuture fut = bootstrap.connect(url.getHost().toString(), url.getPort().intValue());
        if (tt != null) {
            fut.channel().closeFuture().addListener(tt);
        }
        fut.channel().attr(KEY).set(info);
        handle.setFuture(fut);
        if (!monitors.isEmpty()) {
            for (ActivityMonitor m : monitors) {
                m.onStartRequest(url);
            }
            fut.channel().closeFuture().addListener(new AdapterCloseNotifier(url));
        }

        fut.addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (!future.isSuccess()) {
                    Throwable cause = future.cause();
                    if (cause == null) {
                        cause = new ConnectException(url.getHost().toString());
                    }
                    handle.event(new State.Error(cause));
                    if (r != null) {
                        r.onError(cause);
                    }
                    cancelled.set(true);
                }
                if (cancelled.get()) {
                    future.cancel(true);
                    if (future.channel().isOpen()) {
                        future.channel().close();
                    }
                    for (ActivityMonitor m : monitors) {
                        m.onEndRequest(url);
                    }
                    return;
                }
                handle.event(new State.Connected(future.channel()));
                handle.event(new State.SendRequest(req));
                future = future.channel().writeAndFlush(req);
                future.addListener(new ChannelFutureListener() {

                    @Override
                    public void operationComplete(ChannelFuture future) throws Exception {
                        if (cancelled.get()) {
                            future.cancel(true);
                            future.channel().close();
                        }
                        handle.event(new State.AwaitingResponse());
                    }

                });
            }

        });
    } catch (Exception ex) {
        Exceptions.chuck(ex);
    }
}

From source file:com.mastfrog.netty.http.client.RequestBuilder.java

License:Open Source License

@Override
public HttpRequestBuilder setTimeout(Duration timeout) {
    if (timeout != null && timeout.getMillis() == 0) {
        throw new IllegalArgumentException("Cannot set timeout to 0");
    }/*from  ww w.  ja v a  2  s .  c  o  m*/
    this.timeout = timeout;
    return this;
}

From source file:com.metamx.common.concurrent.ScheduledExecutors.java

License:Apache License

/**
 * Run callable repeatedly with the given delay between calls, until it
 * returns Signal.STOP. Exceptions are caught and logged as errors.
 *//*from  ww w  .  j av  a2  s  .  c om*/
public static void scheduleWithFixedDelay(final ScheduledExecutorService exec, final Duration initialDelay,
        final Duration delay, final Callable<Signal> callable) {
    log.debug("Scheduling repeatedly: %s with delay %s", callable, delay);
    exec.schedule(new Runnable() {
        @Override
        public void run() {
            try {
                log.debug("Running %s (delay %s)", callable, delay);
                if (callable.call() == Signal.REPEAT) {
                    log.debug("Rescheduling %s (delay %s)", callable, delay);
                    exec.schedule(this, delay.getMillis(), TimeUnit.MILLISECONDS);
                } else {
                    log.debug("Stopped rescheduling %s (delay %s)", callable, delay);
                }
            } catch (Throwable e) {
                log.error(e, "Uncaught exception.");
            }
        }
    }, initialDelay.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:com.metamx.common.concurrent.ScheduledExecutors.java

License:Apache License

public static void scheduleAtFixedRate(final ScheduledExecutorService exec, final Duration initialDelay,
        final Duration rate, final Callable<Signal> callable) {
    log.debug("Scheduling periodically: %s with period %s", callable, rate);
    exec.schedule(new Runnable() {
        private volatile Signal prevSignal = null;

        @Override//from   www  .  j  a  va  2  s  .c om
        public void run() {
            if (prevSignal == null || prevSignal == Signal.REPEAT) {
                exec.schedule(this, rate.getMillis(), TimeUnit.MILLISECONDS);
            }

            try {
                log.debug("Running %s (period %s)", callable, rate);
                prevSignal = callable.call();
            } catch (Throwable e) {
                log.error(e, "Uncaught exception.");
            }
        }
    }, initialDelay.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:com.metamx.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    final WorkerSetupData workerSetupData = workerSetupdDataRef.get();

    final String minVersion = workerSetupData.getMinVersion() == null ? config.getWorkerVersion()
            : workerSetupData.getMinVersion();
    int maxNumWorkers = workerSetupData.getMaxNumWorkers();

    int currValidWorkers = 0;
    for (ZkWorker zkWorker : zkWorkers) {
        if (zkWorker.isValidVersion(minVersion)) {
            currValidWorkers++;//from  www  .j  a  v a  2  s .co m
        }
    }

    if (currValidWorkers >= maxNumWorkers) {
        log.debug("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }

    List<String> workerNodeIds = autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
            autoScalingStrategy.terminate(nodeIps);
            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:com.metamx.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override/* ww w  . j av  a  2s  .  c  o m*/
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config
                                        .getMaxWorkerIdleTimeMillisBeforeDeletion();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}

From source file:com.metamx.druid.merger.coordinator.RemoteTaskRunner.java

License:Open Source License

@LifecycleStart
public void start() {
    try {/*from  w w  w.  j  a  v a2s .  com*/
        workerPathCache.getListenable().addListener(new PathChildrenCacheListener() {
            @Override
            public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event)
                    throws Exception {
                if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                    final Worker worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    log.info("New worker[%s] found!", worker.getHost());
                    addWorker(worker);
                } else if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_REMOVED)) {
                    final Worker worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    log.info("Worker[%s] removed!", worker.getHost());
                    removeWorker(worker);
                }
            }
        });
        workerPathCache.start();

        // Schedule termination of worker nodes periodically
        Period period = new Period(config.getTerminateResourcesDuration());
        PeriodGranularity granularity = new PeriodGranularity(period,
                config.getTerminateResourcesOriginDateTime(), null);
        final long startTime = granularity.next(granularity.truncate(new DateTime().getMillis()));

        ScheduledExecutors.scheduleAtFixedRate(scheduledExec,
                new Duration(System.currentTimeMillis(), startTime), config.getTerminateResourcesDuration(),
                new Runnable() {
                    @Override
                    public void run() {
                        if (currentlyTerminating.isEmpty()) {
                            if (zkWorkers.size() <= workerSetupManager.getWorkerSetupData()
                                    .getMinNumWorkers()) {
                                return;
                            }

                            int workerCount = 0;
                            List<WorkerWrapper> thoseLazyWorkers = Lists.newArrayList();
                            for (WorkerWrapper workerWrapper : zkWorkers.values()) {
                                workerCount++;

                                if (workerCount > workerSetupManager.getWorkerSetupData().getMinNumWorkers()
                                        && workerWrapper.getRunningTasks().isEmpty()
                                        && System.currentTimeMillis()
                                                - workerWrapper.getLastCompletedTaskTime().getMillis() > config
                                                        .getMaxWorkerIdleTimeMillisBeforeDeletion()) {
                                    thoseLazyWorkers.add(workerWrapper);
                                }
                            }

                            AutoScalingData terminated = strategy.terminate(
                                    Lists.transform(thoseLazyWorkers, new Function<WorkerWrapper, String>() {
                                        @Override
                                        public String apply(WorkerWrapper input) {
                                            return input.getWorker().getIp();
                                        }
                                    }));

                            if (terminated != null) {
                                currentlyTerminating.addAll(terminated.getNodeIds());
                                lastTerminateTime = new DateTime();
                            }
                        } else {
                            Duration durSinceLastTerminate = new Duration(new DateTime(), lastTerminateTime);
                            if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration())) {
                                log.makeAlert("Worker node termination taking too long")
                                        .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                                        .addData("terminatingCount", currentlyTerminating.size()).emit();
                            }

                            log.info(
                                    "%s still terminating. Wait for all nodes to terminate before trying again.",
                                    currentlyTerminating);
                        }
                    }
                });
        started = true;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.metamx.druid.merger.coordinator.RemoteTaskRunner.java

License:Open Source License

private WorkerWrapper findWorkerForTask() {
    try {//from   w w  w.j  ava  2  s.c om
        final MinMaxPriorityQueue<WorkerWrapper> workerQueue = MinMaxPriorityQueue
                .<WorkerWrapper>orderedBy(new Comparator<WorkerWrapper>() {
                    @Override
                    public int compare(WorkerWrapper w1, WorkerWrapper w2) {
                        return -Ints.compare(w1.getRunningTasks().size(), w2.getRunningTasks().size());
                    }
                }).create(FunctionalIterable.create(zkWorkers.values()).filter(new Predicate<WorkerWrapper>() {
                    @Override
                    public boolean apply(WorkerWrapper input) {
                        return (!input.isAtCapacity() && input.getWorker().getVersion()
                                .compareTo(workerSetupManager.getWorkerSetupData().getMinVersion()) >= 0);
                    }
                }));

        if (workerQueue.isEmpty()) {
            log.info("Worker nodes do not have capacity to run any more tasks!");

            if (currentlyProvisioning.isEmpty()) {
                AutoScalingData provisioned = strategy.provision();
                if (provisioned != null) {
                    currentlyProvisioning.addAll(provisioned.getNodeIds());
                    lastProvisionTime = new DateTime();
                }
            } else {
                Duration durSinceLastProvision = new Duration(new DateTime(), lastProvisionTime);
                if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
                    log.makeAlert("Worker node provisioning taking too long")
                            .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                            .addData("provisioningCount", currentlyProvisioning.size()).emit();
                }

                log.info(
                        "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker.",
                        currentlyProvisioning);
            }
            return null;
        }

        return workerQueue.peek();
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.metamx.druid.merger.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<TaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    if (zkWorkers.size() >= workerSetupdDataRef.get().getMaxNumWorkers()) {
        log.info("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }//w w  w .jav a2  s.  co  m

    List<String> workerNodeIds = autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            currentlyProvisioning.clear();
        }
    }

    return false;
}