Example usage for org.joda.time Duration Duration

List of usage examples for org.joda.time Duration Duration

Introduction

In this page you can find the example usage for org.joda.time Duration Duration.

Prototype

public Duration(Object duration) 

Source Link

Document

Creates a duration from the specified object using the org.joda.time.convert.ConverterManager ConverterManager .

Usage

From source file:org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverCommand.java

License:Apache License

public OutOfBandManagementDriverCommand(final ImmutableMap<OutOfBandManagement.Option, String> options,
        final Long timeoutSeconds) {
    this.options = options;
    if (timeoutSeconds != null && timeoutSeconds > 0) {
        this.timeout = new Duration(timeoutSeconds * 1000);
    } else {/* w ww .  java  2  s .co  m*/
        this.timeout = Duration.ZERO;
    }
}

From source file:org.apache.druid.data.input.parquet.simple.ParquetGroupConverter.java

License:Apache License

/**
 * Convert a primitive group field to a "ingestion friendly" java object
 *
 * @return "ingestion ready" java object, or null
 *///from   ww w .  j a v a  2s .  c  om
@Nullable
private static Object convertPrimitiveField(Group g, int fieldIndex, int index, boolean binaryAsString) {
    PrimitiveType pt = (PrimitiveType) g.getType().getFields().get(fieldIndex);
    OriginalType ot = pt.getOriginalType();

    try {
        if (ot != null) {
            // convert logical types
            switch (ot) {
            case DATE:
                long ts = g.getInteger(fieldIndex, index) * MILLIS_IN_DAY;
                return ts;
            case TIME_MICROS:
                return g.getLong(fieldIndex, index);
            case TIME_MILLIS:
                return g.getInteger(fieldIndex, index);
            case TIMESTAMP_MICROS:
                return TimeUnit.MILLISECONDS.convert(g.getLong(fieldIndex, index), TimeUnit.MICROSECONDS);
            case TIMESTAMP_MILLIS:
                return g.getLong(fieldIndex, index);
            case INTERVAL:
                /*
                INTERVAL is used for an interval of time. It must annotate a fixed_len_byte_array of length 12.
                This array stores three little-endian unsigned integers that represent durations at different
                granularities of time. The first stores a number in months, the second stores a number in days,
                and the third stores a number in milliseconds. This representation is independent of any particular
                timezone or date.
                        
                Each component in this representation is independent of the others. For example, there is no
                requirement that a large number of days should be expressed as a mix of months and days because there is
                not a constant conversion from days to months.
                        
                The sort order used for INTERVAL is undefined. When writing data, no min/max statistics should be
                 saved for this type and if such non-compliant statistics are found during reading, they must be ignored.
                 */
                Binary intervalVal = g.getBinary(fieldIndex, index);
                IntBuffer intBuf = intervalVal.toByteBuffer().order(ByteOrder.LITTLE_ENDIAN).asIntBuffer();
                int months = intBuf.get(0);
                int days = intBuf.get(1);
                int millis = intBuf.get(2);
                StringBuilder periodBuilder = new StringBuilder("P");
                if (months > 0) {
                    periodBuilder.append(months).append("M");
                }
                if (days > 0) {
                    periodBuilder.append(days).append("D");
                }
                if (periodBuilder.length() > 1) {
                    Period p = Period.parse(periodBuilder.toString());
                    Duration d = p.toStandardDuration().plus(millis);
                    return d;
                } else {
                    return new Duration(millis);
                }
            case INT_8:
            case INT_16:
            case INT_32:
                return g.getInteger(fieldIndex, index);
            case INT_64:
                return g.getLong(fieldIndex, index);
            // todo: idk wtd about unsigned
            case UINT_8:
            case UINT_16:
            case UINT_32:
                return g.getInteger(fieldIndex, index);
            case UINT_64:
                return g.getLong(fieldIndex, index);
            case DECIMAL:
                /*
                  DECIMAL can be used to annotate the following types:
                    int32: for 1 <= precision <= 9
                    int64: for 1 <= precision <= 18; precision < 10 will produce a warning
                    fixed_len_byte_array: precision is limited by the array size. Length n can
                      store <= floor(log_10(2^(8*n - 1) - 1)) base-10 digits
                    binary: precision is not limited, but is required. The minimum number of bytes to store
                      the unscaled value should be used.
                 */
                int precision = pt.asPrimitiveType().getDecimalMetadata().getPrecision();
                int scale = pt.asPrimitiveType().getDecimalMetadata().getScale();
                switch (pt.getPrimitiveTypeName()) {
                case INT32:
                    return new BigDecimal(g.getInteger(fieldIndex, index));
                case INT64:
                    return new BigDecimal(g.getLong(fieldIndex, index));
                case FIXED_LEN_BYTE_ARRAY:
                case BINARY:
                    Binary value = g.getBinary(fieldIndex, index);
                    return convertBinaryToDecimal(value, precision, scale);
                default:
                    throw new RE(
                            "Unknown 'DECIMAL' type supplied to primitive conversion: %s (this should never happen)",
                            pt.getPrimitiveTypeName());
                }
            case UTF8:
            case ENUM:
            case JSON:
                return g.getString(fieldIndex, index);
            case LIST:
            case MAP:
            case MAP_KEY_VALUE:
            case BSON:
            default:
                throw new RE("Non-primitive supplied to primitive conversion: %s (this should never happen)",
                        ot.name());
            }
        } else {
            // fallback to handling the raw primitive type if no logical type mapping
            switch (pt.getPrimitiveTypeName()) {
            case BOOLEAN:
                return g.getBoolean(fieldIndex, index);
            case INT32:
                return g.getInteger(fieldIndex, index);
            case INT64:
                return g.getLong(fieldIndex, index);
            case FLOAT:
                return g.getFloat(fieldIndex, index);
            case DOUBLE:
                return g.getDouble(fieldIndex, index);
            case INT96:
                Binary tsBin = g.getInt96(fieldIndex, index);
                return convertInt96BinaryToTimestamp(tsBin);
            case FIXED_LEN_BYTE_ARRAY:
            case BINARY:
                Binary bin = g.getBinary(fieldIndex, index);
                byte[] bytes = bin.getBytes();
                if (binaryAsString) {
                    return StringUtils.fromUtf8(bytes);
                } else {
                    return bytes;
                }
            default:
                throw new RE("Unknown primitive conversion: %s", pt.getPrimitiveTypeName());
            }
        }
    } catch (Exception ex) {
        return null;
    }
}

From source file:org.apache.druid.indexing.common.actions.RemoteTaskActionClient.java

License:Apache License

@Override
public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException {
    log.info("Performing action for task[%s]: %s", task.getId(), taskAction);

    byte[] dataToSend = jsonMapper.writeValueAsBytes(new TaskActionHolder(task, taskAction));

    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();

    while (true) {
        try {/*ww  w . j  a  v  a 2s. co  m*/

            final FullResponseHolder fullResponseHolder;

            log.info("Submitting action for task[%s] to overlord: [%s].", task.getId(), taskAction);

            fullResponseHolder = druidLeaderClient
                    .go(druidLeaderClient.makeRequest(HttpMethod.POST, "/druid/indexer/v1/action")
                            .setContent(MediaType.APPLICATION_JSON, dataToSend));

            if (fullResponseHolder.getStatus().getCode() / 100 == 2) {
                final Map<String, Object> responseDict = jsonMapper.readValue(fullResponseHolder.getContent(),
                        JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT);
                return jsonMapper.convertValue(responseDict.get("result"), taskAction.getReturnTypeReference());
            } else {
                // Want to retry, so throw an IOException.
                throw new IOE("Error with status[%s] and message[%s]. Check overlord logs for details.",
                        fullResponseHolder.getStatus(), fullResponseHolder.getContent());
            }
        } catch (IOException | ChannelException e) {
            log.warn(e, "Exception submitting action for task[%s]", task.getId());

            final Duration delay = retryPolicy.getAndIncrementRetryDelay();
            if (delay == null) {
                throw e;
            } else {
                try {
                    final long sleepTime = jitter(delay.getMillis());
                    log.info("Will try again in [%s].", new Duration(sleepTime).toString());
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    throw new RuntimeException(e2);
                }
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }
}

From source file:org.apache.druid.indexing.common.IndexTaskClient.java

License:Apache License

/**
 * Sends an HTTP request to the task of the specified {@code taskId} and returns a response if it succeeded.
 *//*from w ww . j a  va2s . co  m*/
private FullResponseHolder submitRequest(String taskId, @Nullable String mediaType, // nullable if content is empty
        HttpMethod method, String encodedPathSuffix, @Nullable String encodedQueryString, byte[] content,
        boolean retry) throws IOException, ChannelException, NoTaskLocationException {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();

    while (true) {
        String path = StringUtils.format("%s/%s/%s", BASE_PATH, StringUtils.urlEncode(taskId),
                encodedPathSuffix);

        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(taskId);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(
                    StringUtils.format("Aborting request because task [%s] is not runnable", taskId));
        }

        final TaskLocation location = taskInfoProvider.getTaskLocation(taskId);
        if (location.equals(TaskLocation.unknown())) {
            throw new NoTaskLocationException(
                    StringUtils.format("No TaskLocation available for task [%s]", taskId));
        }

        final Request request = createRequest(taskId, location, path, encodedQueryString, method, mediaType,
                content);

        FullResponseHolder response = null;
        try {
            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(request.getUrl().getHost(), request.getUrl().getPort());

            response = submitRequest(request);

            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) { // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOE("Received status [%d] and content [%s]", responseCode, response.getContent());
            }
        } catch (IOException | ChannelException e) {

            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.

            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = StringUtils
                        .urlDecode(response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER));
                if (headerId != null && !headerId.equals(taskId)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s",
                            taskId, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }
            final String urlForLog = request.getUrl().toString();
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                throw e;
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                throw e;
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])",
                            (response != null ? response.getStatus().getCode() : "no response"), urlForLog,
                            new Duration(sleepTime).toString(),
                            (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Thread.currentThread().interrupt();
                    e.addSuppressed(e2);
                    throw new RuntimeException(e);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info(
                    "No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or "
                            + "may have already completed",
                    taskId);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}

From source file:org.apache.druid.indexing.firehose.IngestSegmentFirehoseFactory.java

License:Apache License

private List<TimelineObjectHolder<String, DataSegment>> getTimelineForInterval() {
    Preconditions.checkNotNull(interval);

    // This call used to use the TaskActionClient, so for compatibility we use the same retry configuration
    // as TaskActionClient.
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
    List<DataSegment> usedSegments;
    while (true) {
        try {/*from w  ww.  j  av a 2s  . co m*/
            usedSegments = coordinatorClient.getDatabaseSegmentDataSourceSegments(dataSource,
                    Collections.singletonList(interval));
            break;
        } catch (Throwable e) {
            log.warn(e, "Exception getting database segments");
            final Duration delay = retryPolicy.getAndIncrementRetryDelay();
            if (delay == null) {
                throw e;
            } else {
                final long sleepTime = jitter(delay.getMillis());
                log.info("Will try again in [%s].", new Duration(sleepTime).toString());
                try {
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    throw new RuntimeException(e2);
                }
            }
        }
    }

    return VersionedIntervalTimeline.forSegments(usedSegments).lookup(interval);
}

From source file:org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskClient.java

License:Apache License

public Map<PartitionIdType, SequenceOffsetType> pause(final String id) {
    log.debug("Pause task[%s]", id);

    try {/*from   ww w . j  av  a 2  s. c  o  m*/
        final FullResponseHolder response = submitRequestWithEmptyContent(id, HttpMethod.POST, "pause", null,
                true);

        final HttpResponseStatus responseStatus = response.getStatus();
        final String responseContent = response.getContent();

        if (responseStatus.equals(HttpResponseStatus.OK)) {
            log.info("Task [%s] paused successfully", id);
            return deserializeMap(responseContent, Map.class, getPartitionType(), getSequenceType());
        } else if (responseStatus.equals(HttpResponseStatus.ACCEPTED)) {
            // The task received the pause request, but its status hasn't been changed yet.
            while (true) {
                final SeekableStreamIndexTaskRunner.Status status = getStatus(id);
                if (status == SeekableStreamIndexTaskRunner.Status.PAUSED) {
                    return getCurrentOffsets(id, true);
                }

                final Duration delay = newRetryPolicy().getAndIncrementRetryDelay();
                if (delay == null) {
                    throw new ISE("Task [%s] failed to change its status from [%s] to [%s], aborting", id,
                            status, SeekableStreamIndexTaskRunner.Status.PAUSED);
                } else {
                    final long sleepTime = delay.getMillis();
                    log.info("Still waiting for task [%s] to change its status to [%s]; will try again in [%s]",
                            id, SeekableStreamIndexTaskRunner.Status.PAUSED,
                            new Duration(sleepTime).toString());
                    Thread.sleep(sleepTime);
                }
            }
        } else {
            throw new ISE("Pause request for task [%s] failed with response [%s] : [%s]", id, responseStatus,
                    responseContent);
        }
    } catch (NoTaskLocationException e) {
        log.error("Exception [%s] while pausing Task [%s]", e.getMessage(), id);
        return ImmutableMap.of();
    } catch (IOException | InterruptedException e) {
        throw new RE(e, "Exception [%s] while pausing Task [%s]", e.getMessage(), id);
    }
}

From source file:org.apache.druid.security.basic.authentication.db.updater.CoordinatorBasicAuthenticatorMetadataStorageUpdater.java

License:Apache License

@LifecycleStart
public void start() {
    if (!lifecycleLock.canStart()) {
        throw new ISE("can't start.");
    }//from  w  ww  . ja v  a2 s  .  c o m

    if (authenticatorMapper == null || authenticatorMapper.getAuthenticatorMap() == null) {
        return;
    }

    try {
        LOG.info("Starting CoordinatorBasicAuthenticatorMetadataStorageUpdater.");
        for (Map.Entry<String, Authenticator> entry : authenticatorMapper.getAuthenticatorMap().entrySet()) {
            Authenticator authenticator = entry.getValue();
            if (authenticator instanceof BasicHTTPAuthenticator) {
                String authenticatorName = entry.getKey();
                authenticatorPrefixes.add(authenticatorName);
                BasicHTTPAuthenticator basicHTTPAuthenticator = (BasicHTTPAuthenticator) authenticator;
                BasicAuthDBConfig dbConfig = basicHTTPAuthenticator.getDbConfig();
                byte[] userMapBytes = getCurrentUserMapBytes(authenticatorName);
                Map<String, BasicAuthenticatorUser> userMap = BasicAuthUtils
                        .deserializeAuthenticatorUserMap(objectMapper, userMapBytes);
                cachedUserMaps.put(authenticatorName,
                        new BasicAuthenticatorUserMapBundle(userMap, userMapBytes));

                if (dbConfig.getInitialAdminPassword() != null
                        && !userMap.containsKey(BasicAuthUtils.ADMIN_NAME)) {
                    createUserInternal(authenticatorName, BasicAuthUtils.ADMIN_NAME);
                    setUserCredentialsInternal(authenticatorName, BasicAuthUtils.ADMIN_NAME,
                            new BasicAuthenticatorCredentialUpdate(
                                    dbConfig.getInitialAdminPassword().getPassword(),
                                    BasicAuthUtils.DEFAULT_KEY_ITERATIONS));
                }

                if (dbConfig.getInitialInternalClientPassword() != null
                        && !userMap.containsKey(BasicAuthUtils.INTERNAL_USER_NAME)) {
                    createUserInternal(authenticatorName, BasicAuthUtils.INTERNAL_USER_NAME);
                    setUserCredentialsInternal(authenticatorName, BasicAuthUtils.INTERNAL_USER_NAME,
                            new BasicAuthenticatorCredentialUpdate(
                                    dbConfig.getInitialInternalClientPassword().getPassword(),
                                    BasicAuthUtils.DEFAULT_KEY_ITERATIONS));
                }
            }
        }

        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(commonCacheConfig.getPollingPeriod()),
                new Duration(commonCacheConfig.getPollingPeriod()), new Callable<ScheduledExecutors.Signal>() {
                    @Override
                    public ScheduledExecutors.Signal call() {
                        if (stopped) {
                            return ScheduledExecutors.Signal.STOP;
                        }
                        try {
                            LOG.debug("Scheduled db poll is running");
                            for (String authenticatorPrefix : authenticatorPrefixes) {

                                byte[] userMapBytes = getCurrentUserMapBytes(authenticatorPrefix);
                                Map<String, BasicAuthenticatorUser> userMap = BasicAuthUtils
                                        .deserializeAuthenticatorUserMap(objectMapper, userMapBytes);
                                if (userMapBytes != null) {
                                    cachedUserMaps.put(authenticatorPrefix,
                                            new BasicAuthenticatorUserMapBundle(userMap, userMapBytes));
                                }
                            }
                            LOG.debug("Scheduled db poll is done");
                        } catch (Throwable t) {
                            LOG.makeAlert(t, "Error occured while polling for cachedUserMaps.").emit();
                        }
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                });

        lifecycleLock.started();
    } finally {
        lifecycleLock.exitStart();
    }
}

From source file:org.apache.druid.security.basic.authorization.db.updater.CoordinatorBasicAuthorizerMetadataStorageUpdater.java

License:Apache License

@LifecycleStart
public void start() {
    if (!lifecycleLock.canStart()) {
        throw new ISE("can't start.");
    }//  w  w  w  .j a  v a  2 s. c  o  m

    if (authorizerMapper == null || authorizerMapper.getAuthorizerMap() == null) {
        return;
    }

    try {
        LOG.info("Starting CoordinatorBasicAuthorizerMetadataStorageUpdater");
        for (Map.Entry<String, Authorizer> entry : authorizerMapper.getAuthorizerMap().entrySet()) {
            Authorizer authorizer = entry.getValue();
            if (authorizer instanceof BasicRoleBasedAuthorizer) {
                String authorizerName = entry.getKey();
                authorizerNames.add(authorizerName);

                byte[] userMapBytes = getCurrentUserMapBytes(authorizerName);
                Map<String, BasicAuthorizerUser> userMap = BasicAuthUtils
                        .deserializeAuthorizerUserMap(objectMapper, userMapBytes);
                cachedUserMaps.put(authorizerName, new BasicAuthorizerUserMapBundle(userMap, userMapBytes));

                byte[] roleMapBytes = getCurrentRoleMapBytes(authorizerName);
                Map<String, BasicAuthorizerRole> roleMap = BasicAuthUtils
                        .deserializeAuthorizerRoleMap(objectMapper, roleMapBytes);
                cachedRoleMaps.put(authorizerName, new BasicAuthorizerRoleMapBundle(roleMap, roleMapBytes));

                initSuperusers(authorizerName, userMap, roleMap);
            }
        }

        ScheduledExecutors.scheduleWithFixedDelay(exec, new Duration(commonCacheConfig.getPollingPeriod()),
                new Duration(commonCacheConfig.getPollingPeriod()), new Callable<ScheduledExecutors.Signal>() {
                    @Override
                    public ScheduledExecutors.Signal call() {
                        if (stopped) {
                            return ScheduledExecutors.Signal.STOP;
                        }
                        try {
                            LOG.debug("Scheduled db poll is running");
                            for (String authorizerName : authorizerNames) {

                                byte[] userMapBytes = getCurrentUserMapBytes(authorizerName);
                                Map<String, BasicAuthorizerUser> userMap = BasicAuthUtils
                                        .deserializeAuthorizerUserMap(objectMapper, userMapBytes);
                                if (userMapBytes != null) {
                                    synchronized (cachedUserMaps) {
                                        cachedUserMaps.put(authorizerName,
                                                new BasicAuthorizerUserMapBundle(userMap, userMapBytes));
                                    }
                                }

                                byte[] roleMapBytes = getCurrentRoleMapBytes(authorizerName);
                                Map<String, BasicAuthorizerRole> roleMap = BasicAuthUtils
                                        .deserializeAuthorizerRoleMap(objectMapper, roleMapBytes);
                                if (roleMapBytes != null) {
                                    synchronized (cachedUserMaps) {
                                        cachedRoleMaps.put(authorizerName,
                                                new BasicAuthorizerRoleMapBundle(roleMap, roleMapBytes));
                                    }
                                }
                            }
                            LOG.debug("Scheduled db poll is done");
                        } catch (Throwable t) {
                            LOG.makeAlert(t, "Error occured while polling for cachedUserMaps.").emit();
                        }
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                });

        lifecycleLock.started();
    } finally {
        lifecycleLock.exitStart();
    }
}

From source file:org.apache.druid.server.coordinator.HttpLoadQueuePeon.java

License:Apache License

private void doSegmentManagement() {
    if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
        log.debug("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
        return;/*w ww  .j av  a  2s . c om*/
    }

    final int batchSize = config.getHttpLoadQueuePeonBatchSize();

    final List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);

    synchronized (lock) {
        Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators
                .concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());

        while (newRequests.size() < batchSize && iter.hasNext()) {
            Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
            if (entry.getValue().hasTimedOut()) {
                entry.getValue().requestFailed("timed out");
                iter.remove();
            } else {
                newRequests.add(entry.getValue().getChangeRequest());
            }
        }
    }

    if (newRequests.size() == 0) {
        log.debug("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].",
                serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
        mainLoopInProgress.set(false);
        return;
    }

    try {
        log.debug("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
        BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
        ListenableFuture<InputStream> future = httpClient.go(
                new Request(HttpMethod.POST, changeRequestURL)
                        .addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON)
                        .addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON)
                        .setContent(requestBodyWriter.writeValueAsBytes(newRequests)),
                responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));

        Futures.addCallback(future, new FutureCallback<InputStream>() {
            @Override
            public void onSuccess(InputStream result) {
                boolean scheduleNextRunImmediately = true;
                try {
                    if (responseHandler.getStatus() == HttpServletResponse.SC_NO_CONTENT) {
                        log.debug("Received NO CONTENT reseponse from [%s]", serverId);
                    } else if (HttpServletResponse.SC_OK == responseHandler.getStatus()) {
                        try {
                            List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper
                                    .readValue(result, RESPONSE_ENTITY_TYPE_REF);
                            log.debug("Server[%s] returned status response [%s].", serverId, statuses);
                            synchronized (lock) {
                                if (stopped) {
                                    log.debug("Ignoring response from Server[%s]. We are already stopped.",
                                            serverId);
                                    scheduleNextRunImmediately = false;
                                    return;
                                }

                                for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
                                    switch (e.getStatus().getState()) {
                                    case SUCCESS:
                                    case FAILED:
                                        handleResponseStatus(e.getRequest(), e.getStatus());
                                        break;
                                    case PENDING:
                                        log.debug("Request[%s] is still pending on server[%s].", e.getRequest(),
                                                serverId);
                                        break;
                                    default:
                                        scheduleNextRunImmediately = false;
                                        log.error("WTF! Server[%s] returned unknown state in status[%s].",
                                                serverId, e.getStatus());
                                    }
                                }
                            }
                        } catch (Exception ex) {
                            scheduleNextRunImmediately = false;
                            logRequestFailure(ex);
                        }
                    } else {
                        scheduleNextRunImmediately = false;
                        logRequestFailure(new RE("Unexpected Response Status."));
                    }
                } finally {
                    mainLoopInProgress.set(false);

                    if (scheduleNextRunImmediately) {
                        processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
                    }
                }
            }

            @Override
            public void onFailure(Throwable t) {
                try {
                    logRequestFailure(t);
                } finally {
                    mainLoopInProgress.set(false);
                }
            }

            private void logRequestFailure(Throwable t) {
                log.error(t, "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL,
                        responseHandler.getStatus(), responseHandler.getDescription());
            }
        }, processingExecutor);
    } catch (Throwable th) {
        log.error(th, "Error sending load/drop request to [%s].", serverId);
        mainLoopInProgress.set(false);
    }
}

From source file:org.apache.druid.server.coordinator.HttpLoadQueuePeon.java

License:Apache License

@Override
public void start() {
    synchronized (lock) {
        if (stopped) {
            throw new ISE("Can't start.");
        }/* ww  w  . j  av  a 2 s .  c om*/

        ScheduledExecutors.scheduleAtFixedRate(processingExecutor,
                new Duration(config.getHttpLoadQueuePeonRepeatDelay()), () -> {
                    if (!stopped) {
                        doSegmentManagement();
                    }

                    if (stopped) {
                        return ScheduledExecutors.Signal.STOP;
                    } else {
                        return ScheduledExecutors.Signal.REPEAT;
                    }
                });
    }
}