Example usage for java.util.concurrent ConcurrentHashMap replace

List of usage examples for java.util.concurrent ConcurrentHashMap replace

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentHashMap replace.

Prototype

public boolean replace(K key, V oldValue, V newValue) 

Source Link

Usage

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }//from   ww  w .ja v  a2  s  . c  o  m

        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()
                .iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {

                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<KafkaIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(KafkaIndexTask.Status status) {
                                if (status == KafkaIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            kafkaTask.getIOConfig().getStartPartitions()
                                                    .getPartitionOffsetMap());

                                    // update partitionGroups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);

                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        Long offset = entry.getValue();
                                        ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups
                                                .get(getTaskGroupIdForPartition(partition));

                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = partitionOffsets.putIfAbsent(partition,
                                                    offset);
                                            if (previousOffset != null && previousOffset < offset) {
                                                succeeded = partitionOffsets.replace(partition, previousOffset,
                                                        offset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : kafkaTask.getIOConfig().getStartPartitions()
                                            .getPartitionOffsetMap().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get();
                                            } catch (InterruptedException | ExecutionException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }
                                    }

                                    if (taskGroups.putIfAbsent(taskGroupId,
                                            new TaskGroup(
                                                    ImmutableMap.copyOf(kafkaTask.getIOConfig()
                                                            .getStartPartitions().getPartitionOffsetMap()),
                                                    kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
                                        log.debug("Created new task group [%d]", taskGroupId);
                                    }

                                    if (!isTaskCurrent(taskGroupId, taskId)) {
                                        log.info(
                                                "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                taskId);
                                        try {
                                            stopTask(taskId, false).get();
                                        } catch (InterruptedException | ExecutionException e) {
                                            log.warn(e, "Exception while stopping task");
                                        }
                                        return false;
                                    } else {
                                        taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get();
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    log.info("TaskStorage ActiveTasks is [%d]", tasks.size());

    for (Task task : tasks) {
        if (!(task instanceof JDBCIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }//from  www.j a  va 2 s .  co m

        taskCount++;
        final JDBCIndexTask jdbcTask = (JDBCIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on table handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroup(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps().keySet().iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroup(it.next()) : null);

        log.info("taskGroupId is " + taskGroupId);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
                log.info("TaskGroup info details taskId [%s] in taskGroupId [%s]", taskId, taskGroupId);
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<JDBCIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(JDBCIndexTask.Status status) {
                                if (status == JDBCIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps());

                                    // update groups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);
                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        long endOffset = entry.getValue();
                                        log.info("Current offset is [%s]", endOffset);
                                        ConcurrentHashMap<Integer, Long> offsetsMap = (ConcurrentHashMap<Integer, Long>) groups
                                                .get(getTaskGroup(partition));
                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = offsetsMap.putIfAbsent(partition, endOffset);
                                            if (previousOffset != null && previousOffset < endOffset) {
                                                succeeded = offsetsMap.replace(partition, previousOffset,
                                                        endOffset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : jdbcTask.getIOConfig().getJdbcOffsets()
                                            .getOffsetMaps().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroup(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }

                                        if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(
                                                ImmutableMap.copyOf(jdbcTask.getIOConfig().getJdbcOffsets()
                                                        .getOffsetMaps()),
                                                jdbcTask.getIOConfig().getMinimumMessageTime())) == null) {
                                            log.info("Created new task group [%d] from discoverTasks",
                                                    taskGroupId);
                                        }

                                        if (!isTaskCurrent(taskGroupId, taskId)) {
                                            log.info(
                                                    "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        } else {
                                            log.info("taskGroup put IfAbsent by [%s]", taskId);
                                            taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId,
                                                    new TaskData());
                                        }
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] JDBC indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();
    final Map<Integer, TaskGroup> taskGroupsToVerify = new HashMap<>();

    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }// w  ww .j a v a2 s. c om

        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()
                .iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {

                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<KafkaIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(KafkaIndexTask.Status status) {
                                try {
                                    log.debug("Task [%s], status [%s]", taskId, status);
                                    if (status == KafkaIndexTask.Status.PUBLISHING) {
                                        kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()
                                                .keySet().forEach(
                                                        partition -> addDiscoveredTaskToPendingCompletionTaskGroups(
                                                                getTaskGroupIdForPartition(partition), taskId,
                                                                kafkaTask.getIOConfig().getStartPartitions()
                                                                        .getPartitionOffsetMap()));

                                        // update partitionGroups with the publishing task's offsets (if they are greater than what is
                                        // existing) so that the next tasks will start reading from where this task left off
                                        Map<Integer, Long> publishingTaskEndOffsets = taskClient
                                                .getEndOffsets(taskId);

                                        for (Entry<Integer, Long> entry : publishingTaskEndOffsets.entrySet()) {
                                            Integer partition = entry.getKey();
                                            Long offset = entry.getValue();
                                            ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups
                                                    .get(getTaskGroupIdForPartition(partition));

                                            boolean succeeded;
                                            do {
                                                succeeded = true;
                                                Long previousOffset = partitionOffsets.putIfAbsent(partition,
                                                        offset);
                                                if (previousOffset != null && previousOffset < offset) {
                                                    succeeded = partitionOffsets.replace(partition,
                                                            previousOffset, offset);
                                                }
                                            } while (!succeeded);
                                        }
                                    } else {
                                        for (Integer partition : kafkaTask.getIOConfig().getStartPartitions()
                                                .getPartitionOffsetMap().keySet()) {
                                            if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                                log.warn(
                                                        "Stopping task [%s] which does not match the expected partition allocation",
                                                        taskId);
                                                try {
                                                    stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                            TimeUnit.SECONDS);
                                                } catch (InterruptedException | ExecutionException
                                                        | TimeoutException e) {
                                                    log.warn(e, "Exception while stopping task");
                                                }
                                                return false;
                                            }
                                        }
                                        // make sure the task's io and tuning configs match with the supervisor config
                                        // if it is current then only create corresponding taskGroup if it does not exist
                                        if (!isTaskCurrent(taskGroupId, taskId)) {
                                            log.info(
                                                    "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        } else {
                                            final TaskGroup taskGroup = taskGroups.computeIfAbsent(taskGroupId,
                                                    k -> {
                                                        log.info(
                                                                "Creating a new task group for taskGroupId[%d]",
                                                                taskGroupId);
                                                        return new TaskGroup(taskGroupId,
                                                                ImmutableMap.copyOf(kafkaTask.getIOConfig()
                                                                        .getStartPartitions()
                                                                        .getPartitionOffsetMap()),
                                                                kafkaTask.getIOConfig().getMinimumMessageTime(),
                                                                kafkaTask.getIOConfig()
                                                                        .getMaximumMessageTime());
                                                    });
                                            taskGroupsToVerify.put(taskGroupId, taskGroup);
                                            final TaskData prevTaskData = taskGroup.tasks.putIfAbsent(taskId,
                                                    new TaskData());
                                            if (prevTaskData != null) {
                                                throw new ISE(
                                                        "WTH? a taskData[%s] already exists for new task[%s]",
                                                        prevTaskData, taskId);
                                            }
                                        }
                                    }
                                    return true;
                                } catch (Throwable t) {
                                    log.error(t, "Something bad while discovering task [%s]", taskId);
                                    return null;
                                }
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);

    // make sure the checkpoints are consistent with each other and with the metadata store
    verifyAndMergeCheckpoints(taskGroupsToVerify.values());
}