Example usage for org.joda.time Duration getMillis

List of usage examples for org.joda.time Duration getMillis

Introduction

In this page you can find the example usage for org.joda.time Duration getMillis.

Prototype

public long getMillis() 

Source Link

Document

Gets the length of this duration in milliseconds.

Usage

From source file:io.coala.dsol.util.DsolUtil.java

License:Apache License

/** @return the specified duration in the replication's time unit */
public static double simTime(final TimeUnitInterface timeUnit, final Duration duration) {
    return toTimeUnit(timeUnit, duration.getMillis(), TimeUnitInterface.MILLISECOND).doubleValue();
}

From source file:io.coala.dsol.util.TreatmentBuilder.java

License:Apache License

public TreatmentBuilder withRunLength(final Duration runLength) {
    final TimeUnitInterface toTimeUnit = getTimeUnit();
    return withRunLength(DsolUtil.toTimeUnit(toTimeUnit, runLength.getMillis(), TimeUnitInterface.MILLISECOND)
            .doubleValue());//from  w ww .  ja  v  a2 s. c om
}

From source file:io.coala.dsol.util.TreatmentBuilder.java

License:Apache License

public TreatmentBuilder withWarmupPeriod(final Duration warmupPeriod) {
    final TimeUnitInterface toTimeUnit = getTimeUnit();
    return withWarmupPeriod(DsolUtil
            .toTimeUnit(toTimeUnit, warmupPeriod.getMillis(), TimeUnitInterface.MILLISECOND).doubleValue());
}

From source file:io.druid.indexing.common.actions.RemoteTaskActionClient.java

License:Apache License

@Override
public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException {
    log.info("Performing action for task[%s]: %s", task.getId(), taskAction);

    byte[] dataToSend = jsonMapper.writeValueAsBytes(new TaskActionHolder(task, taskAction));

    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();

    while (true) {
        try {/*  ww w.ja  v  a  2  s.com*/
            final Server server;
            final URI serviceUri;
            try {
                server = getServiceInstance();
                serviceUri = makeServiceUri(server);
            } catch (Exception e) {
                // Want to retry, so throw an IOException.
                throw new IOException("Failed to locate service uri", e);
            }

            final StatusResponseHolder response;

            log.info("Submitting action for task[%s] to overlord[%s]: %s", task.getId(), serviceUri,
                    taskAction);

            try {
                response = httpClient.go(new Request(HttpMethod.POST, serviceUri.toURL()).setContent(
                        MediaType.APPLICATION_JSON, dataToSend), new StatusResponseHandler(Charsets.UTF_8))
                        .get();
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
                Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
                throw Throwables.propagate(e);
            }

            if (response.getStatus().getCode() / 200 == 1) {
                final Map<String, Object> responseDict = jsonMapper.readValue(response.getContent(),
                        new TypeReference<Map<String, Object>>() {
                        });
                return jsonMapper.convertValue(responseDict.get("result"), taskAction.getReturnTypeReference());
            } else {
                // Want to retry, so throw an IOException.
                throw new IOException(String.format(
                        "Scary HTTP status returned: %s. Check your overlord[%s] logs for exceptions.",
                        response.getStatus(), server.getHost()));
            }
        } catch (IOException | ChannelException e) {
            log.warn(e, "Exception submitting action for task[%s]", task.getId());

            final Duration delay = retryPolicy.getAndIncrementRetryDelay();
            if (delay == null) {
                throw e;
            } else {
                try {
                    final long sleepTime = jitter(delay.getMillis());
                    log.info("Will try again in [%s].", new Duration(sleepTime).toString());
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    throw Throwables.propagate(e2);
                }
            }
        }
    }
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    final WorkerSetupData workerSetupData = workerSetupdDataRef.get();

    final String minVersion = workerSetupData.getMinVersion() == null ? config.getWorkerVersion()
            : workerSetupData.getMinVersion();
    int maxNumWorkers = workerSetupData.getMaxNumWorkers();

    int currValidWorkers = 0;
    for (ZkWorker zkWorker : zkWorkers) {
        if (zkWorker.isValidVersion(minVersion)) {
            currValidWorkers++;//from  w  w w.j  av  a 2s.c  o m
        }
    }

    if (currValidWorkers >= maxNumWorkers) {
        log.debug("Cannot scale anymore. Num workers = %d, Max num workers = %d", zkWorkers.size(),
                workerSetupdDataRef.get().getMaxNumWorkers());
        return false;
    }

    List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(Lists
            .newArrayList(Iterables.<ZkWorker, String>transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            })));

    currentlyProvisioning.removeAll(workerNodeIds);
    boolean nothingProvisioning = currentlyProvisioning.isEmpty();

    if (nothingProvisioning) {
        if (hasTaskPendingBeyondThreshold(pendingTasks)) {
            AutoScalingData provisioned = autoScalingStrategy.provision();

            if (provisioned != null) {
                currentlyProvisioning.addAll(provisioned.getNodeIds());
                lastProvisionTime = new DateTime();
                scalingStats.addProvisionEvent(provisioned);

                return true;
            }
        }
    } else {
        Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());

        log.info(
                "%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
                currentlyProvisioning, durSinceLastProvision);

        if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node provisioning taking too long!")
                    .addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
                    .addData("provisioningCount", currentlyProvisioning.size()).emit();

            List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
            autoScalingStrategy.terminate(nodeIps);
            currentlyProvisioning.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.coordinator.scaling.SimpleResourceManagementStrategy.java

License:Open Source License

@Override
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers) {
    Set<String> workerNodeIds = Sets.newHashSet(autoScalingStrategy
            .ipToIdLookup(Lists.newArrayList(Iterables.transform(zkWorkers, new Function<ZkWorker, String>() {
                @Override/*from  w  w w.j  a  va2s  . c o  m*/
                public String apply(ZkWorker input) {
                    return input.getWorker().getIp();
                }
            }))));

    Set<String> stillExisting = Sets.newHashSet();
    for (String s : currentlyTerminating) {
        if (workerNodeIds.contains(s)) {
            stillExisting.add(s);
        }
    }
    currentlyTerminating.clear();
    currentlyTerminating.addAll(stillExisting);
    boolean nothingTerminating = currentlyTerminating.isEmpty();

    if (nothingTerminating) {
        final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
        if (zkWorkers.size() <= minNumWorkers) {
            log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(),
                    minNumWorkers);
            return false;
        }

        List<ZkWorker> thoseLazyWorkers = Lists
                .newArrayList(FunctionalIterable.create(zkWorkers).filter(new Predicate<ZkWorker>() {
                    @Override
                    public boolean apply(ZkWorker input) {
                        return input.getRunningTasks().isEmpty() && System.currentTimeMillis()
                                - input.getLastCompletedTaskTime().getMillis() >= config.getWorkerIdleTimeout()
                                        .getMillis();
                    }
                }));

        int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
        int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
        if (numNodesToTerminate <= 0) {
            log.info("Found no nodes to terminate.");
            return false;
        }

        AutoScalingData terminated = autoScalingStrategy.terminate(Lists
                .transform(thoseLazyWorkers.subList(0, numNodesToTerminate), new Function<ZkWorker, String>() {
                    @Override
                    public String apply(ZkWorker input) {
                        return input.getWorker().getIp();
                    }
                }));

        if (terminated != null) {
            currentlyTerminating.addAll(terminated.getNodeIds());
            lastTerminateTime = new DateTime();
            scalingStats.addTerminateEvent(terminated);

            return true;
        }
    } else {
        Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());

        log.info("%s still terminating. Wait for all nodes to terminate before trying again.",
                currentlyTerminating);

        if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
            log.makeAlert("Worker node termination taking too long!")
                    .addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
                    .addData("terminatingCount", currentlyTerminating.size()).emit();

            currentlyTerminating.clear();
        }
    }

    return false;
}

From source file:io.druid.indexing.jdbc.JDBCIndexTaskClient.java

License:Apache License

public Map<Integer, Long> pause(final String id, final long timeout) {
    log.debug("Pause task[%s] timeout[%d]", id, timeout);

    try {//  ww  w. j  a va 2s.  co m
        final FullResponseHolder response = submitRequest(id, HttpMethod.POST, "pause",
                timeout > 0 ? String.format("timeout=%d", timeout) : null, true);

        if (response.getStatus().equals(HttpResponseStatus.OK)) {
            log.info("Task [%s] paused successfully", id);
            return jsonMapper.readValue(response.getContent(), new TypeReference<Map<Integer, Long>>() {
            });
        }

        final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
        while (true) {
            if (getStatus(id) == JDBCIndexTask.Status.PAUSED) {
                return getCurrentOffsets(id, true);
            }

            final Duration delay = retryPolicy.getAndIncrementRetryDelay();
            if (delay == null) {
                log.error("Task [%s] failed to pause, aborting", id);
                throw new ISE("Task [%s] failed to pause, aborting", id);
            } else {
                final long sleepTime = delay.getMillis();
                log.info("Still waiting for task [%s] to pause; will try again in [%s]", id,
                        new Duration(sleepTime).toString());
                Thread.sleep(sleepTime);
            }
        }
    } catch (NoTaskLocationException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        return ImmutableMap.of();
    } catch (IOException | InterruptedException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        throw Throwables.propagate(e);
    }
}

From source file:io.druid.indexing.jdbc.JDBCIndexTaskClient.java

License:Apache License

private FullResponseHolder submitRequest(String id, HttpMethod method, String pathSuffix, String query,
        byte[] content, boolean retry) {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
    while (true) {
        FullResponseHolder response = null;
        Request request = null;/* w  w w .  jav  a 2s. c o  m*/
        TaskLocation location = TaskLocation.unknown();
        String path = String.format("%s/%s/%s", BASE_PATH, id, pathSuffix);

        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(id);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(
                    String.format("Aborting request because task [%s] is not runnable", id));
        }

        try {
            location = taskInfoProvider.getTaskLocation(id);
            if (location.equals(TaskLocation.unknown())) {
                throw new NoTaskLocationException(String.format("No TaskLocation available for task [%s]", id));
            }

            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(location.getHost(), location.getPort());

            try {
                URI serviceUri = new URI("http", null, location.getHost(), location.getPort(), path, query,
                        null);
                request = new Request(method, serviceUri.toURL());

                // used to validate that we are talking to the correct worker
                request.addHeader(ChatHandlerResource.TASK_ID_HEADER, id);

                if (content.length > 0) {
                    request.setContent(MediaType.APPLICATION_JSON, content);
                }

                log.debug("HTTP %s: %s", method.getName(), serviceUri.toString());
                response = httpClient.go(request, new FullResponseHandler(Charsets.UTF_8), httpTimeout).get();
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
                Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
                throw Throwables.propagate(e);
            }

            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) { // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOException(String.format("Received status [%d]", responseCode));
            }
        } catch (IOException | ChannelException e) {

            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.

            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER);
                if (headerId != null && !headerId.equals(id)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", id,
                            headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }

            String urlForLog = (request != null ? request.getUrl().toString()
                    : String.format("http://%s:%d%s", location.getHost(), location.getPort(), path));
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                Throwables.propagate(e);
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                Throwables.propagate(e);
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])",
                            (response != null ? response.getStatus().getCode() : "no response"), urlForLog,
                            new Duration(sleepTime).toString(),
                            (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Throwables.propagate(e2);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info(
                    "No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or "
                            + "may have already completed",
                    id);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}

From source file:io.druid.indexing.kafka.KafkaIndexTaskClient.java

License:Apache License

public Map<Integer, Long> pause(final String id, final long timeout) {
    log.debug("Pause task[%s] timeout[%d]", id, timeout);

    try {// w w  w.j ava  2s .c o  m
        final FullResponseHolder response = submitRequest(id, HttpMethod.POST, "pause",
                timeout > 0 ? StringUtils.format("timeout=%d", timeout) : null, true);

        if (response.getStatus().equals(HttpResponseStatus.OK)) {
            log.info("Task [%s] paused successfully", id);
            return jsonMapper.readValue(response.getContent(), new TypeReference<Map<Integer, Long>>() {
            });
        }

        final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
        while (true) {
            if (getStatus(id) == KafkaIndexTask.Status.PAUSED) {
                return getCurrentOffsets(id, true);
            }

            final Duration delay = retryPolicy.getAndIncrementRetryDelay();
            if (delay == null) {
                log.error("Task [%s] failed to pause, aborting", id);
                throw new ISE("Task [%s] failed to pause, aborting", id);
            } else {
                final long sleepTime = delay.getMillis();
                log.info("Still waiting for task [%s] to pause; will try again in [%s]", id,
                        new Duration(sleepTime).toString());
                Thread.sleep(sleepTime);
            }
        }
    } catch (NoTaskLocationException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        return ImmutableMap.of();
    } catch (IOException | InterruptedException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        throw Throwables.propagate(e);
    }
}

From source file:io.druid.indexing.kafka.KafkaIndexTaskClient.java

License:Apache License

private FullResponseHolder submitRequest(String id, HttpMethod method, String pathSuffix, String query,
        byte[] content, boolean retry) {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
    while (true) {
        FullResponseHolder response = null;
        Request request = null;/*  w  w w .  j a v a 2  s  .c  om*/
        TaskLocation location = TaskLocation.unknown();
        String path = StringUtils.format("%s/%s/%s", BASE_PATH, id, pathSuffix);

        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(id);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(
                    StringUtils.format("Aborting request because task [%s] is not runnable", id));
        }

        String host = location.getHost();
        String scheme = "";
        int port = -1;

        try {
            location = taskInfoProvider.getTaskLocation(id);
            if (location.equals(TaskLocation.unknown())) {
                throw new NoTaskLocationException(
                        StringUtils.format("No TaskLocation available for task [%s]", id));
            }

            host = location.getHost();
            scheme = location.getTlsPort() >= 0 ? "https" : "http";
            port = location.getTlsPort() >= 0 ? location.getTlsPort() : location.getPort();

            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(host, port);

            try {
                URI serviceUri = new URI(scheme, null, host, port, path, query, null);
                request = new Request(method, serviceUri.toURL());

                // used to validate that we are talking to the correct worker
                request.addHeader(ChatHandlerResource.TASK_ID_HEADER, id);

                if (content.length > 0) {
                    request.setContent(MediaType.APPLICATION_JSON, content);
                }

                log.debug("HTTP %s: %s", method.getName(), serviceUri.toString());
                response = httpClient.go(request, new FullResponseHandler(Charsets.UTF_8), httpTimeout).get();
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
                Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
                throw Throwables.propagate(e);
            }

            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) { // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOE("Received status [%d]", responseCode);
            }
        } catch (IOException | ChannelException e) {

            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.

            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER);
                if (headerId != null && !headerId.equals(id)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", id,
                            headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }
            String urlForLog = (request != null ? request.getUrl().toString()
                    : StringUtils.format("%s://%s:%d%s", scheme, host, port, path));
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                Throwables.propagate(e);
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                Throwables.propagate(e);
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])",
                            (response != null ? response.getStatus().getCode() : "no response"), urlForLog,
                            new Duration(sleepTime).toString(),
                            (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Throwables.propagate(e2);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info(
                    "No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or "
                            + "may have already completed",
                    id);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}