Example usage for com.google.common.util.concurrent Futures successfulAsList

List of usage examples for com.google.common.util.concurrent Futures successfulAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures successfulAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> successfulAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its successful input futures.

Usage

From source file:org.guldenj.core.Peer.java

protected ListenableFuture<Object> downloadDependenciesInternal(final int maxDepth, final int depth,
        final Transaction tx, final Object marker, final List<Transaction> results) {

    final SettableFuture<Object> resultFuture = SettableFuture.create();
    final Sha256Hash rootTxHash = tx.getHash();
    // We want to recursively grab its dependencies. This is so listeners can learn important information like
    // whether a transaction is dependent on a timelocked transaction or has an unexpectedly deep dependency tree
    // or depends on a no-fee transaction.

    // We may end up requesting transactions that we've already downloaded and thrown away here.
    Set<Sha256Hash> needToRequest = new CopyOnWriteArraySet<Sha256Hash>();
    for (TransactionInput input : tx.getInputs()) {
        // There may be multiple inputs that connect to the same transaction.
        needToRequest.add(input.getOutpoint().getHash());
    }/*from   w ww .  j a v a 2s  .c o  m*/
    lock.lock();
    try {
        // Build the request for the missing dependencies.
        List<ListenableFuture<Transaction>> futures = Lists.newArrayList();
        GetDataMessage getdata = new GetDataMessage(params);
        if (needToRequest.size() > 1)
            log.info("{}: Requesting {} transactions for depth {} dep resolution", getAddress(),
                    needToRequest.size(), depth + 1);
        for (Sha256Hash hash : needToRequest) {
            getdata.addTransaction(hash);
            GetDataRequest req = new GetDataRequest(hash, SettableFuture.create());
            futures.add(req.future);
            getDataFutures.add(req);
        }
        ListenableFuture<List<Transaction>> successful = Futures.successfulAsList(futures);
        Futures.addCallback(successful, new FutureCallback<List<Transaction>>() {
            @Override
            public void onSuccess(List<Transaction> transactions) {
                // Once all transactions either were received, or we know there are no more to come ...
                // Note that transactions will contain "null" for any positions that weren't successful.
                List<ListenableFuture<Object>> childFutures = Lists.newLinkedList();
                for (Transaction tx : transactions) {
                    if (tx == null)
                        continue;
                    log.info("{}: Downloaded dependency of {}: {}", getAddress(), rootTxHash,
                            tx.getHashAsString());
                    results.add(tx);
                    // Now recurse into the dependencies of this transaction too.
                    if (depth + 1 < maxDepth)
                        childFutures
                                .add(downloadDependenciesInternal(maxDepth, depth + 1, tx, marker, results));
                }
                if (childFutures.size() == 0) {
                    // Short-circuit: we're at the bottom of this part of the tree.
                    resultFuture.set(marker);
                } else {
                    // There are some children to download. Wait until it's done (and their children and their
                    // children...) to inform the caller that we're finished.
                    Futures.addCallback(Futures.successfulAsList(childFutures),
                            new FutureCallback<List<Object>>() {
                                @Override
                                public void onSuccess(List<Object> objects) {
                                    resultFuture.set(marker);
                                }

                                @Override
                                public void onFailure(Throwable throwable) {
                                    resultFuture.setException(throwable);
                                }
                            });
                }
            }

            @Override
            public void onFailure(Throwable throwable) {
                resultFuture.setException(throwable);
            }
        });
        // Start the operation.
        sendMessage(getdata);
    } catch (Exception e) {
        log.error("{}: Couldn't send getdata in downloadDependencies({})", this, tx.getHash());
        resultFuture.setException(e);
        return resultFuture;
    } finally {
        lock.unlock();
    }
    return resultFuture;
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * Monitors [pendingCompletionTaskGroups] for tasks that have completed. If any task in a task group has completed, we
 * can safely stop the rest of the tasks in that group. If a task group has exceeded its publishing timeout, then
 * we need to stop all tasks in not only that task group but also 1) any subsequent task group that is also pending
 * completion and 2) the current task group that is running, because the assumption that we have handled up to the
 * starting offset for subsequent task groups is no longer valid, and subsequent tasks would fail as soon as they
 * attempted to publish because of the contiguous range consistency check.
 *//*from w ww.  ja  va 2  s .  c  om*/
private void checkPendingCompletionTasks() throws ExecutionException, InterruptedException {
    List<ListenableFuture<Void>> futures = Lists.newArrayList();

    for (Map.Entry<Integer, CopyOnWriteArrayList<TaskGroup>> pendingGroupList : pendingCompletionTaskGroups
            .entrySet()) {

        boolean stopTasksInTaskGroup = false;
        Integer groupId = pendingGroupList.getKey();
        CopyOnWriteArrayList<TaskGroup> taskGroupList = pendingGroupList.getValue();
        List<TaskGroup> toRemove = Lists.newArrayList();

        for (TaskGroup group : taskGroupList) {
            boolean foundSuccess = false, entireTaskGroupFailed = false;

            if (stopTasksInTaskGroup) {
                // One of the earlier groups that was handling the same partition set timed out before the segments were
                // published so stop any additional groups handling the same partition set that are pending completion.
                futures.add(stopTasksInGroup(group));
                toRemove.add(group);
                continue;
            }

            Iterator<Map.Entry<String, TaskData>> iTask = group.tasks.entrySet().iterator();
            while (iTask.hasNext()) {
                Map.Entry<String, TaskData> task = iTask.next();

                if (task.getValue().status.isFailure()) {
                    iTask.remove(); // remove failed task
                    if (group.tasks.isEmpty()) {
                        // if all tasks in the group have failed, just nuke all task groups with this partition set and restart
                        entireTaskGroupFailed = true;
                        break;
                    }
                }

                if (task.getValue().status.isSuccess()) {
                    // If one of the pending completion tasks was successful, stop the rest of the tasks in the group as
                    // we no longer need them to publish their segment.
                    log.info("Task [%s] completed successfully, stopping tasks %s", task.getKey(),
                            group.tasks.keySet());
                    futures.add(stopTasksInGroup(group));
                    foundSuccess = true;
                    toRemove.add(group); // remove the TaskGroup from the list of pending completion task groups
                    break; // skip iterating the rest of the tasks in this group as they've all been stopped now
                }
            }

            if ((!foundSuccess && group.completionTimeout.isBeforeNow()) || entireTaskGroupFailed) {
                if (entireTaskGroupFailed) {
                    log.warn(
                            "All tasks in group [%d] failed to publish, killing all tasks for these partitions",
                            groupId);
                } else {
                    log.makeAlert("No task in [%s] succeeded before the completion timeout elapsed [%s]!",
                            group.tasks.keySet(), ioConfig.getCompletionTimeout()).emit();
                }

                // reset partitions offsets for this task group so that they will be re-read from metadata storage
                partitionGroups.remove(groupId);

                // stop all the tasks in this pending completion group
                futures.add(stopTasksInGroup(group));

                // set a flag so the other pending completion groups for this set of partitions will also stop
                stopTasksInTaskGroup = true;

                // stop all the tasks in the currently reading task group and remove the bad task group
                futures.add(stopTasksInGroup(taskGroups.remove(groupId)));

                toRemove.add(group);
            }
        }

        taskGroupList.removeAll(toRemove);
    }

    Futures.successfulAsList(futures).get(); // wait for all task shutdowns to complete before returning
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();
    final Map<Integer, TaskGroup> taskGroupsToVerify = new HashMap<>();

    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }//w w  w .  ja  v a 2 s . c  o m

        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()
                .iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {

                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<KafkaIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(KafkaIndexTask.Status status) {
                                try {
                                    log.debug("Task [%s], status [%s]", taskId, status);
                                    if (status == KafkaIndexTask.Status.PUBLISHING) {
                                        kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()
                                                .keySet().forEach(
                                                        partition -> addDiscoveredTaskToPendingCompletionTaskGroups(
                                                                getTaskGroupIdForPartition(partition), taskId,
                                                                kafkaTask.getIOConfig().getStartPartitions()
                                                                        .getPartitionOffsetMap()));

                                        // update partitionGroups with the publishing task's offsets (if they are greater than what is
                                        // existing) so that the next tasks will start reading from where this task left off
                                        Map<Integer, Long> publishingTaskEndOffsets = taskClient
                                                .getEndOffsets(taskId);

                                        for (Entry<Integer, Long> entry : publishingTaskEndOffsets.entrySet()) {
                                            Integer partition = entry.getKey();
                                            Long offset = entry.getValue();
                                            ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups
                                                    .get(getTaskGroupIdForPartition(partition));

                                            boolean succeeded;
                                            do {
                                                succeeded = true;
                                                Long previousOffset = partitionOffsets.putIfAbsent(partition,
                                                        offset);
                                                if (previousOffset != null && previousOffset < offset) {
                                                    succeeded = partitionOffsets.replace(partition,
                                                            previousOffset, offset);
                                                }
                                            } while (!succeeded);
                                        }
                                    } else {
                                        for (Integer partition : kafkaTask.getIOConfig().getStartPartitions()
                                                .getPartitionOffsetMap().keySet()) {
                                            if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                                log.warn(
                                                        "Stopping task [%s] which does not match the expected partition allocation",
                                                        taskId);
                                                try {
                                                    stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                            TimeUnit.SECONDS);
                                                } catch (InterruptedException | ExecutionException
                                                        | TimeoutException e) {
                                                    log.warn(e, "Exception while stopping task");
                                                }
                                                return false;
                                            }
                                        }
                                        // make sure the task's io and tuning configs match with the supervisor config
                                        // if it is current then only create corresponding taskGroup if it does not exist
                                        if (!isTaskCurrent(taskGroupId, taskId)) {
                                            log.info(
                                                    "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        } else {
                                            final TaskGroup taskGroup = taskGroups.computeIfAbsent(taskGroupId,
                                                    k -> {
                                                        log.info(
                                                                "Creating a new task group for taskGroupId[%d]",
                                                                taskGroupId);
                                                        return new TaskGroup(taskGroupId,
                                                                ImmutableMap.copyOf(kafkaTask.getIOConfig()
                                                                        .getStartPartitions()
                                                                        .getPartitionOffsetMap()),
                                                                kafkaTask.getIOConfig().getMinimumMessageTime(),
                                                                kafkaTask.getIOConfig()
                                                                        .getMaximumMessageTime());
                                                    });
                                            taskGroupsToVerify.put(taskGroupId, taskGroup);
                                            final TaskData prevTaskData = taskGroup.tasks.putIfAbsent(taskId,
                                                    new TaskData());
                                            if (prevTaskData != null) {
                                                throw new ISE(
                                                        "WTH? a taskData[%s] already exists for new task[%s]",
                                                        prevTaskData, taskId);
                                            }
                                        }
                                    }
                                    return true;
                                } catch (Throwable t) {
                                    log.error(t, "Something bad while discovering task [%s]", taskId);
                                    return null;
                                }
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);

    // make sure the checkpoints are consistent with each other and with the metadata store
    verifyAndMergeCheckpoints(taskGroupsToVerify.values());
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

/**
 * Monitors [pendingCompletionTaskGroups] for tasks that have completed. If any task in a task group has completed, we
 * can safely stop the rest of the tasks in that group. If a task group has exceeded its publishing timeout, then
 * we need to stop all tasks in not only that task group but also 1) any subsequent task group that is also pending
 * completion and 2) the current task group that is running, because the assumption that we have handled up to the
 * starting offset for subsequent task groups is no longer valid, and subsequent tasks would fail as soon as they
 * attempted to publish because of the contiguous range consistency check.
 *//*from w w w  . j a v a2 s .c  o  m*/
private void checkPendingCompletionTasks() throws ExecutionException, InterruptedException, TimeoutException {
    List<ListenableFuture<?>> futures = Lists.newArrayList();

    for (Map.Entry<Integer, CopyOnWriteArrayList<TaskGroup>> pendingGroupList : pendingCompletionTaskGroups
            .entrySet()) {

        boolean stopTasksInTaskGroup = false;
        Integer groupId = pendingGroupList.getKey();
        CopyOnWriteArrayList<TaskGroup> taskGroupList = pendingGroupList.getValue();
        List<TaskGroup> toRemove = Lists.newArrayList();

        for (TaskGroup group : taskGroupList) {
            boolean foundSuccess = false, entireTaskGroupFailed = false;

            if (stopTasksInTaskGroup) {
                // One of the earlier groups that was handling the same partition set timed out before the segments were
                // published so stop any additional groups handling the same partition set that are pending completion.
                futures.add(stopTasksInGroup(group));
                toRemove.add(group);
                continue;
            }

            Iterator<Map.Entry<String, TaskData>> iTask = group.tasks.entrySet().iterator();
            while (iTask.hasNext()) {
                Map.Entry<String, TaskData> task = iTask.next();

                if (task.getValue().status.isFailure()) {
                    iTask.remove(); // remove failed task
                    if (group.tasks.isEmpty()) {
                        // if all tasks in the group have failed, just nuke all task groups with this partition set and restart
                        entireTaskGroupFailed = true;
                        break;
                    }
                }

                if (task.getValue().status.isSuccess()) {
                    // If one of the pending completion tasks was successful, stop the rest of the tasks in the group as
                    // we no longer need them to publish their segment.
                    log.info("Task [%s] completed successfully, stopping tasks %s", task.getKey(),
                            group.taskIds());
                    futures.add(stopTasksInGroup(group));
                    foundSuccess = true;
                    toRemove.add(group); // remove the TaskGroup from the list of pending completion task groups
                    break; // skip iterating the rest of the tasks in this group as they've all been stopped now
                }
            }

            if ((!foundSuccess && group.completionTimeout.isBeforeNow()) || entireTaskGroupFailed) {
                if (entireTaskGroupFailed) {
                    log.warn(
                            "All tasks in group [%d] failed to publish, killing all tasks for these partitions",
                            groupId);
                } else {
                    log.makeAlert("No task in [%s] succeeded before the completion timeout elapsed [%s]!",
                            group.taskIds(), ioConfig.getCompletionTimeout()).emit();
                }

                // reset partitions offsets for this task group so that they will be re-read from metadata storage
                groups.remove(groupId);

                // stop all the tasks in this pending completion group
                futures.add(stopTasksInGroup(group));

                // set a flag so the other pending completion groups for this set of partitions will also stop
                stopTasksInTaskGroup = true;

                // stop all the tasks in the currently reading task group and remove the bad task group
                futures.add(stopTasksInGroup(taskGroups.remove(groupId)));

                toRemove.add(group);
            }
        }

        taskGroupList.removeAll(toRemove);
    }

    // wait for all task shutdowns to complete before returning
    Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void checkCurrentTaskState() throws ExecutionException, InterruptedException {
    List<ListenableFuture<Void>> futures = Lists.newArrayList();
    Iterator<Map.Entry<Integer, TaskGroup>> iTaskGroups = taskGroups.entrySet().iterator();
    while (iTaskGroups.hasNext()) {
        Map.Entry<Integer, TaskGroup> taskGroupEntry = iTaskGroups.next();
        Integer groupId = taskGroupEntry.getKey();
        TaskGroup taskGroup = taskGroupEntry.getValue();

        // Iterate the list of known tasks in this group and:
        //   1) Kill any tasks which are not "current" (have the partitions, starting offsets, and minimumMessageTime
        //      (if applicable) in [taskGroups])
        //   2) Remove any tasks that have failed from the list
        //   3) If any task completed successfully, stop all the tasks in this group and move to the next group

        log.debug("Task group [%d] pre-pruning: %s", groupId, taskGroup.tasks.keySet());

        Iterator<Map.Entry<String, TaskData>> iTasks = taskGroup.tasks.entrySet().iterator();
        while (iTasks.hasNext()) {
            Map.Entry<String, TaskData> task = iTasks.next();
            String taskId = task.getKey();
            TaskData taskData = task.getValue();

            // stop and remove bad tasks from the task group
            if (!isTaskCurrent(groupId, taskId)) {
                log.info("Stopping task [%s] which does not match the expected offset range and ingestion spec",
                        taskId);/*from ww w .  jav  a2s  .co  m*/
                futures.add(stopTask(taskId, false));
                iTasks.remove();
                continue;
            }

            // remove failed tasks
            if (taskData.status.isFailure()) {
                iTasks.remove();
                continue;
            }

            // check for successful tasks, and if we find one, stop all tasks in the group and remove the group so it can
            // be recreated with the next set of offsets
            if (taskData.status.isSuccess()) {
                futures.add(stopTasksInGroup(taskGroup));
                iTaskGroups.remove();
                break;
            }
        }
        log.debug("Task group [%d] post-pruning: %s", groupId, taskGroup.tasks.keySet());
    }

    Futures.successfulAsList(futures).get(); // wait for all task shutdowns to complete before returning
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * This method does two things -/*from w w w .  j  a  va2  s  . com*/
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published offsets.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<Integer, Long>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<Integer, Long>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();

    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<Integer, Long>>> checkpointsFuture = taskClient
                .getCheckpointsAsync(taskId, true);
        taskIds.add(taskId);
        futures.add(checkpointsFuture);
    }

    try {
        List<TreeMap<Integer, Map<Integer, Long>>> futuresResult = Futures.successfulAsList(futures)
                .get(futureTimeoutInSeconds, TimeUnit.SECONDS);

        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<Integer, Long>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId);
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final KafkaDataSourceMetadata latestDataSourceMetadata = (KafkaDataSourceMetadata) indexerMetadataStorageCoordinator
            .getDataSourceMetadata(dataSource);
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null
            && latestDataSourceMetadata.getKafkaPartitions() != null
            && ioConfig.getTopic().equals(latestDataSourceMetadata.getKafkaPartitions().getTopic());
    final Map<Integer, Long> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getKafkaPartitions().getPartitionOffsetMap();
    } else {
        latestOffsetsFromDb = null;
    }

    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));

    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;

    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<Integer, Long>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // find the first replica task with earliest sequenceId consistent with datasource metadata in the metadata
            // store
            if (taskCheckpoints.entrySet().stream()
                    .anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream()
                            .allMatch(partitionOffset -> Longs.compare(partitionOffset.getValue(),
                                    latestOffsetsFromDb == null ? partitionOffset.getValue()
                                            : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(),
                                                    partitionOffset.getValue())) == 0)
                            && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey()))
                    || (pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
                            && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
                        taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.sequenceOffsets.clear();
                taskGroup.sequenceOffsets.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId,
                        taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey()) == null
                    || !(taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey())
                            .equals(taskGroup.sequenceOffsets.firstEntry().getValue()))
                    || taskCheckpoints.tailMap(taskGroup.sequenceOffsets.firstKey())
                            .size() != taskGroup.sequenceOffsets.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId,
                        taskCheckpoints, taskGroup.sequenceOffsets);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }

    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0
            && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest offset information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        taskGroups.remove(groupId);
        partitionGroups.get(groupId).replaceAll((partition, offset) -> NOT_SET);
    }

    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs))
            .forEach(sequenceCheckpoint -> {
                log.warn(
                        "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest "
                                + "persisted offsets in metadata store [%s]",
                        sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.sequenceOffsets,
                        latestOffsetsFromDb);
                killTask(sequenceCheckpoint.lhs);
                taskGroup.tasks.remove(sequenceCheckpoint.lhs);
            });
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

private void checkCurrentTaskState() throws ExecutionException, InterruptedException, TimeoutException {
    List<ListenableFuture<?>> futures = Lists.newArrayList();
    Iterator<Map.Entry<Integer, TaskGroup>> iTaskGroups = taskGroups.entrySet().iterator();
    while (iTaskGroups.hasNext()) {
        Map.Entry<Integer, TaskGroup> taskGroupEntry = iTaskGroups.next();
        Integer groupId = taskGroupEntry.getKey();
        TaskGroup taskGroup = taskGroupEntry.getValue();

        // Iterate the list of known tasks in this group and:
        //   1) Kill any tasks which are not "current" (have the partitions, starting offsets, and minimumMessageTime
        //      (if applicable) in [taskGroups])
        //   2) Remove any tasks that have failed from the list
        //   3) If any task completed successfully, stop all the tasks in this group and move to the next group

        log.debug("Task group [%d] pre-pruning: %s", groupId, taskGroup.taskIds());

        Iterator<Map.Entry<String, TaskData>> iTasks = taskGroup.tasks.entrySet().iterator();
        while (iTasks.hasNext()) {
            Map.Entry<String, TaskData> task = iTasks.next();
            String taskId = task.getKey();
            TaskData taskData = task.getValue();

            // stop and remove bad tasks from the task group
            if (!isTaskCurrent(groupId, taskId)) {
                log.info("Stopping task [%s] which does not match the expected offset range and ingestion spec",
                        taskId);//w  ww.  j a  va2  s  .c o  m
                futures.add(stopTask(taskId, false));
                iTasks.remove();
                continue;
            }

            // remove failed tasks
            if (taskData.status.isFailure()) {
                iTasks.remove();
                continue;
            }

            // check for successful tasks, and if we find one, stop all tasks in the group and remove the group so it can
            // be recreated with the next set of offsets
            if (taskData.status.isSuccess()) {
                futures.add(stopTasksInGroup(taskGroup));
                iTaskGroups.remove();
                break;
            }
        }
        log.debug("Task group [%d] post-pruning: %s", groupId, taskGroup.taskIds());
    }

    // wait for all task shutdowns to complete before returning
    Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private ListenableFuture<Void> stopTasksInGroup(TaskGroup taskGroup) {
    if (taskGroup == null) {
        return Futures.immediateFuture(null);
    }//from ww w  . jav a 2s .  c  o  m

    final List<ListenableFuture<Void>> futures = Lists.newArrayList();
    for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
        if (!entry.getValue().status.isComplete()) {
            futures.add(stopTask(entry.getKey(), false));
        }
    }

    return Futures.transform(Futures.successfulAsList(futures), new Function<List<Void>, Void>() {
        @Nullable
        @Override
        public Void apply(@Nullable List<Void> input) {
            return null;
        }
    }, workerExec);
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

private ListenableFuture<?> stopTasksInGroup(TaskGroup taskGroup) {
    if (taskGroup == null) {
        return Futures.immediateFuture(null);
    }//from   w ww .j  av  a  2s  .  co m

    final List<ListenableFuture<Void>> futures = Lists.newArrayList();
    for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
        if (!entry.getValue().status.isComplete()) {
            futures.add(stopTask(entry.getKey(), false));
        }
    }

    return Futures.successfulAsList(futures);
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void updateTaskStatus() throws ExecutionException, InterruptedException, TimeoutException {
    final List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    final List<String> futureTaskIds = Lists.newArrayList();

    // update status (and startTime if unknown) of current tasks in taskGroups
    for (TaskGroup group : taskGroups.values()) {
        for (Entry<String, TaskData> entry : group.tasks.entrySet()) {
            final String taskId = entry.getKey();
            final TaskData taskData = entry.getValue();

            if (taskData.startTime == null) {
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStartTimeAsync(taskId),
                        new Function<DateTime, Boolean>() {
                            @Nullable//from   w ww.j a v  a  2s . c  o  m
                            @Override
                            public Boolean apply(@Nullable DateTime startTime) {
                                if (startTime == null) {
                                    return false;
                                }

                                taskData.startTime = startTime;
                                long millisRemaining = ioConfig.getTaskDuration().getMillis()
                                        - (System.currentTimeMillis() - taskData.startTime.getMillis());
                                if (millisRemaining > 0) {
                                    scheduledExec.schedule(buildRunTask(),
                                            millisRemaining + MAX_RUN_FREQUENCY_MILLIS, TimeUnit.MILLISECONDS);
                                }

                                return true;
                            }
                        }, workerExec));
            }

            taskData.status = taskStorage.getStatus(taskId).get();
        }
    }

    // update status of pending completion tasks in pendingCompletionTaskGroups
    for (List<TaskGroup> taskGroups : pendingCompletionTaskGroups.values()) {
        for (TaskGroup group : taskGroups) {
            for (Entry<String, TaskData> entry : group.tasks.entrySet()) {
                entry.getValue().status = taskStorage.getStatus(entry.getKey()).get();
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        // false means the task hasn't started running yet and that's okay; null means it should be running but the HTTP
        // request threw an exception so kill the task
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return start time, killing task", taskId);
            killTask(taskId);
        }
    }
}