Example usage for java.util.concurrent ConcurrentHashMap putIfAbsent

List of usage examples for java.util.concurrent ConcurrentHashMap putIfAbsent

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentHashMap putIfAbsent.

Prototype

public V putIfAbsent(K key, V value) 

Source Link

Usage

From source file:org.wso2.carbon.event.output.adaptor.mqtt.MQTTEventAdaptorType.java

/**
 * @param outputEventAdaptorMessageConfiguration
 *                 - topic name to publish messages
 * @param message  - is and Object[]{Event, EventDefinition}
 * @param outputEventAdaptorConfiguration
 *                 the {@link OutputEventAdaptorConfiguration} object that will be used to
 *                 get configuration information
 * @param tenantId tenant id of the calling thread.
 *//*from   w  w  w  . ja  v  a 2 s .  c  o  m*/
public void publish(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration,
        Object message, OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) {

    ConcurrentHashMap<String, ConcurrentHashMap<String, MQTTAdaptorPublisher>> clientIdSpecificEventSenderMap = publisherMap
            .get(outputEventAdaptorConfiguration.getName());
    if (null == clientIdSpecificEventSenderMap) {
        clientIdSpecificEventSenderMap = new ConcurrentHashMap<String, ConcurrentHashMap<String, MQTTAdaptorPublisher>>();
        if (null != publisherMap.putIfAbsent(outputEventAdaptorConfiguration.getName(),
                clientIdSpecificEventSenderMap)) {
            clientIdSpecificEventSenderMap = publisherMap.get(outputEventAdaptorConfiguration.getName());
        }
    }

    String clientId = outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
            .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_CLIENTID);
    ConcurrentHashMap<String, MQTTAdaptorPublisher> topicSpecificEventPublisherMap = clientIdSpecificEventSenderMap
            .get(clientId);
    if (null == topicSpecificEventPublisherMap) {
        topicSpecificEventPublisherMap = new ConcurrentHashMap<String, MQTTAdaptorPublisher>();
        if (null != clientIdSpecificEventSenderMap.putIfAbsent(clientId, topicSpecificEventPublisherMap)) {
            topicSpecificEventPublisherMap = clientIdSpecificEventSenderMap.get(clientId);
        }
    }

    String topic = outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
            .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_TOPIC);
    MQTTAdaptorPublisher mqttAdaptorPublisher = topicSpecificEventPublisherMap.get(topic);
    if (mqttAdaptorPublisher == null) {
        MQTTBrokerConnectionConfiguration mqttBrokerConnectionConfiguration = new MQTTBrokerConnectionConfiguration(
                outputEventAdaptorConfiguration.getOutputProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_URL),
                outputEventAdaptorConfiguration.getOutputProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_USERNAME),
                outputEventAdaptorConfiguration.getOutputProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_PASSWORD),
                outputEventAdaptorConfiguration.getOutputProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_CLEAN_SESSION),
                outputEventAdaptorConfiguration.getOutputProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_KEEP_ALIVE));
        mqttAdaptorPublisher = new MQTTAdaptorPublisher(mqttBrokerConnectionConfiguration,
                outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_TOPIC),
                outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
                        .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_CLIENTID));
        topicSpecificEventPublisherMap.put(topic, mqttAdaptorPublisher);
    }
    String qos = outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
            .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_QOS);

    try {
        if (qos == null) {
            mqttAdaptorPublisher.publish(message.toString());
        } else {
            mqttAdaptorPublisher.publish(Integer.parseInt(qos), message.toString());
        }
    } catch (OutputEventAdaptorEventProcessingException ex) {
        log.error(ex);
        topicSpecificEventPublisherMap.remove(topic);
        throw new OutputEventAdaptorEventProcessingException(ex);
    }
}

From source file:uk.ac.ebi.fg.jobs.PubMedDataMinerJob.java

public void doExecute(JobExecutionContext jobExecutionContext)
        throws JobExecutionException, InterruptedException {
    JobDataMap dataMap = jobExecutionContext.getMergedJobDataMap();

    Set<String> pubMedNewIds = (Set<String>) dataMap.get("pubMedNewIds");
    ConcurrentHashMap<String, SortedSet<PubMedId>> pubMedIdRelationMap = (ConcurrentHashMap<String, SortedSet<PubMedId>>) dataMap
            .get("pubMedIdRelationMap");
    Configuration properties = (Configuration) dataMap.get("properties");
    AtomicInteger pubMedCounter = (AtomicInteger) dataMap.get("pubMedCounter");
    PubMedRetriever pubMedRetriever = (PubMedRetriever) dataMap.get("pubMedRetriever");
    String entry = (String) dataMap.get("entry");

    String pubMedURL = properties.getString("pub_med_url");
    int maxPubMedDist = properties.getInt("max_pubmed_distance");
    SortedSet<PubMedId> similarPublications = new TreeSet<PubMedId>();

    // add publication with distance 0
    similarPublications.add(new PubMedId(entry, 0));

    // get similar publications (distance 1)
    if (maxPubMedDist >= 1)
        similarPublications.addAll(getPubMedIdSet(pubMedRetriever.getSimilars(pubMedURL, entry), 1));

    // get publications with distance 2
    if (null != similarPublications && maxPubMedDist == 2) {
        SortedSet<PubMedId> iterationSet = new TreeSet<PubMedId>(similarPublications);

        for (PubMedId publication : iterationSet)
            similarPublications.addAll(//from   ww  w . ja  v a  2 s. c om
                    getPubMedIdSet(pubMedRetriever.getSimilars(pubMedURL, publication.getPublicationId()), 2));
    }

    if (!similarPublications.isEmpty())
        pubMedIdRelationMap.putIfAbsent(entry, similarPublications);

    // delay job to run for 1 second
    Thread.currentThread().wait(1000);

    logger.debug("Finished " + pubMedCounter.incrementAndGet() + " of " + pubMedNewIds.size()
            + " PubMedDataMinerJobs");
}

From source file:org.wso2.carbon.event.input.adaptor.jms.JMSEventAdaptorType.java

private void createJMSAdaptorListener(
        InputEventAdaptorMessageConfiguration inputEventAdaptorMessageConfiguration,
        InputEventAdaptorListener inputEventAdaptorListener,
        InputEventAdaptorConfiguration inputEventAdaptorConfiguration, AxisConfiguration axisConfiguration,
        String subscriptionId, int tenantId) {

    ConcurrentHashMap<String, ConcurrentHashMap<String, ConcurrentHashMap<String, SubscriptionDetails>>> adaptorDestinationSubscriptionsMap = tenantAdaptorDestinationSubscriptionsMap
            .get(tenantId);/*from   w  w  w . j  ava 2s.c  o m*/
    if (adaptorDestinationSubscriptionsMap == null) {
        adaptorDestinationSubscriptionsMap = new ConcurrentHashMap<String, ConcurrentHashMap<String, ConcurrentHashMap<String, SubscriptionDetails>>>();
        if (null != tenantAdaptorDestinationSubscriptionsMap.putIfAbsent(tenantId,
                adaptorDestinationSubscriptionsMap)) {
            adaptorDestinationSubscriptionsMap = tenantAdaptorDestinationSubscriptionsMap.get(tenantId);
        }
    }

    ConcurrentHashMap<String, ConcurrentHashMap<String, SubscriptionDetails>> destinationSubscriptionsMap = adaptorDestinationSubscriptionsMap
            .get(inputEventAdaptorConfiguration.getName());
    if (destinationSubscriptionsMap == null) {
        destinationSubscriptionsMap = new ConcurrentHashMap<String, ConcurrentHashMap<String, SubscriptionDetails>>();
        if (null != adaptorDestinationSubscriptionsMap.putIfAbsent(inputEventAdaptorConfiguration.getName(),
                destinationSubscriptionsMap)) {
            destinationSubscriptionsMap = adaptorDestinationSubscriptionsMap
                    .get(inputEventAdaptorConfiguration.getName());
        }
    }

    String destination = inputEventAdaptorMessageConfiguration.getInputMessageProperties()
            .get(JMSEventAdaptorConstants.ADAPTOR_JMS_DESTINATION);

    ConcurrentHashMap<String, SubscriptionDetails> subscriptionsMap = destinationSubscriptionsMap
            .get(destination);
    if (subscriptionsMap == null) {
        subscriptionsMap = new ConcurrentHashMap<String, SubscriptionDetails>();
        if (null != destinationSubscriptionsMap.putIfAbsent(destination, subscriptionsMap)) {
            subscriptionsMap = destinationSubscriptionsMap.get(destination);
        }
    }

    Map<String, String> adaptorProperties = new HashMap<String, String>();
    if (inputEventAdaptorConfiguration.getInputProperties()
            .get(JMSEventAdaptorConstants.ADAPTOR_JMS_DURABLE_SUBSCRIBER_NAME) != null) {
        InternalInputEventAdaptorConfiguration internalInputEventAdaptorConfiguration = inputEventAdaptorConfiguration
                .getInputConfiguration();
        internalInputEventAdaptorConfiguration
                .addEventAdaptorProperty(JMSEventAdaptorConstants.ADAPTOR_JMS_SUBSCRIPTION_DURABLE, "true");
        inputEventAdaptorConfiguration.setInputConfiguration(internalInputEventAdaptorConfiguration);
    } else {
        InternalInputEventAdaptorConfiguration internalInputEventAdaptorConfiguration = inputEventAdaptorConfiguration
                .getInputConfiguration();
        internalInputEventAdaptorConfiguration
                .addEventAdaptorProperty(JMSEventAdaptorConstants.ADAPTOR_JMS_SUBSCRIPTION_DURABLE, "false");
        inputEventAdaptorConfiguration.setInputConfiguration(internalInputEventAdaptorConfiguration);
    }

    adaptorProperties.putAll(inputEventAdaptorConfiguration.getInputProperties());

    JMSConnectionFactory jmsConnectionFactory = new JMSConnectionFactory(
            new Hashtable<String, String>(adaptorProperties), inputEventAdaptorConfiguration.getName());

    Map<String, String> messageConfig = new HashMap<String, String>();
    messageConfig.put(JMSConstants.PARAM_DESTINATION, destination);
    JMSTaskManager jmsTaskManager = JMSTaskManagerFactory
            .createTaskManagerForService(
                    jmsConnectionFactory, inputEventAdaptorConfiguration.getName(), new NativeWorkerPool(4, 100,
                            1000, 1000, "JMS Threads", "JMSThreads" + UUID.randomUUID().toString()),
                    messageConfig);
    jmsTaskManager.setJmsMessageListener(new JMSMessageListener(inputEventAdaptorListener, axisConfiguration));

    JMSListener jmsListener = new JMSListener(inputEventAdaptorConfiguration.getName() + "#" + destination,
            jmsTaskManager);
    jmsListener.startListener();
    SubscriptionDetails subscriptionDetails = new SubscriptionDetails(jmsConnectionFactory, jmsListener);
    subscriptionsMap.put(subscriptionId, subscriptionDetails);

}

From source file:com.taobao.diamond.client.impl.DefaultDiamondSubscriber.java

public void addDataId(String dataId, String group) {
    SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd   HH:mm:ss");
    log.info("diamond client start:" + formatter.format(new Date(System.currentTimeMillis())));
    if (null == group) {
        group = Constants.DEFAULT_GROUP;
    }//from  w  ww  . jav  a 2s  .c o m

    ConcurrentHashMap<String, CacheData> cacheDatas = this.cache.get(dataId);
    if (null == cacheDatas) {
        ConcurrentHashMap<String, CacheData> newCacheDatas = new ConcurrentHashMap<String, CacheData>();
        ConcurrentHashMap<String, CacheData> oldCacheDatas = this.cache.putIfAbsent(dataId, newCacheDatas);
        if (null != oldCacheDatas) {
            cacheDatas = oldCacheDatas;
        } else {
            cacheDatas = newCacheDatas;
        }
    }
    CacheData cacheData = cacheDatas.get(group);
    if (null == cacheData) {
        cacheDatas.putIfAbsent(group, new CacheData(dataId, group));
        if (log.isInfoEnabled()) {
            log.info("DataID[" + dataId + "]Group" + group);
        }
        this.start();
    }
}

From source file:cn.leancloud.diamond.client.impl.DefaultDiamondSubscriber.java

public void addDataId(String dataId, String group) {
    SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd   HH:mm:ss");
    log.info("diamond client start:" + formatter.format(new Date(System.currentTimeMillis())));
    if (null == group) {
        group = Constants.DEFAULT_GROUP;
    }/*from   w w w  .jav a  2s  . co m*/

    ConcurrentHashMap<String, CacheData> cacheDatas = this.cache.get(dataId);
    if (null == cacheDatas) {
        ConcurrentHashMap<String, CacheData> newCacheDatas = new ConcurrentHashMap<String, CacheData>();
        ConcurrentHashMap<String, CacheData> oldCacheDatas = this.cache.putIfAbsent(dataId, newCacheDatas);
        if (null != oldCacheDatas) {
            cacheDatas = oldCacheDatas;
        } else {
            cacheDatas = newCacheDatas;
        }
    }
    CacheData cacheData = cacheDatas.get(group);
    if (null == cacheData) {
        cacheDatas.putIfAbsent(group, new CacheData(dataId, group));
        if (log.isInfoEnabled()) {
            log.info("DataID[" + dataId + "]Group" + group);
        }
        this.start();
    }
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }/*from w  ww.  ja v a  2  s  . c om*/

        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()
                .iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {

                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<KafkaIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(KafkaIndexTask.Status status) {
                                if (status == KafkaIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            kafkaTask.getIOConfig().getStartPartitions()
                                                    .getPartitionOffsetMap());

                                    // update partitionGroups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);

                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        Long offset = entry.getValue();
                                        ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups
                                                .get(getTaskGroupIdForPartition(partition));

                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = partitionOffsets.putIfAbsent(partition,
                                                    offset);
                                            if (previousOffset != null && previousOffset < offset) {
                                                succeeded = partitionOffsets.replace(partition, previousOffset,
                                                        offset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : kafkaTask.getIOConfig().getStartPartitions()
                                            .getPartitionOffsetMap().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get();
                                            } catch (InterruptedException | ExecutionException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }
                                    }

                                    if (taskGroups.putIfAbsent(taskGroupId,
                                            new TaskGroup(
                                                    ImmutableMap.copyOf(kafkaTask.getIOConfig()
                                                            .getStartPartitions().getPartitionOffsetMap()),
                                                    kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
                                        log.debug("Created new task group [%d]", taskGroupId);
                                    }

                                    if (!isTaskCurrent(taskGroupId, taskId)) {
                                        log.info(
                                                "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                taskId);
                                        try {
                                            stopTask(taskId, false).get();
                                        } catch (InterruptedException | ExecutionException e) {
                                            log.warn(e, "Exception while stopping task");
                                        }
                                        return false;
                                    } else {
                                        taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get();
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    log.info("TaskStorage ActiveTasks is [%d]", tasks.size());

    for (Task task : tasks) {
        if (!(task instanceof JDBCIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }//from  ww  w  . j  a v  a2 s  . c  o m

        taskCount++;
        final JDBCIndexTask jdbcTask = (JDBCIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on table handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroup(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps().keySet().iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroup(it.next()) : null);

        log.info("taskGroupId is " + taskGroupId);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
                log.info("TaskGroup info details taskId [%s] in taskGroupId [%s]", taskId, taskGroupId);
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<JDBCIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(JDBCIndexTask.Status status) {
                                if (status == JDBCIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps());

                                    // update groups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);
                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        long endOffset = entry.getValue();
                                        log.info("Current offset is [%s]", endOffset);
                                        ConcurrentHashMap<Integer, Long> offsetsMap = (ConcurrentHashMap<Integer, Long>) groups
                                                .get(getTaskGroup(partition));
                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = offsetsMap.putIfAbsent(partition, endOffset);
                                            if (previousOffset != null && previousOffset < endOffset) {
                                                succeeded = offsetsMap.replace(partition, previousOffset,
                                                        endOffset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : jdbcTask.getIOConfig().getJdbcOffsets()
                                            .getOffsetMaps().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroup(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }

                                        if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(
                                                ImmutableMap.copyOf(jdbcTask.getIOConfig().getJdbcOffsets()
                                                        .getOffsetMaps()),
                                                jdbcTask.getIOConfig().getMinimumMessageTime())) == null) {
                                            log.info("Created new task group [%d] from discoverTasks",
                                                    taskGroupId);
                                        }

                                        if (!isTaskCurrent(taskGroupId, taskId)) {
                                            log.info(
                                                    "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        } else {
                                            log.info("taskGroup put IfAbsent by [%s]", taskId);
                                            taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId,
                                                    new TaskData());
                                        }
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] JDBC indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:com.starit.diamond.client.impl.DefaultDiamondSubscriber.java

public void addDataId(String dataId, String group) {
    SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMdd   HH:mm:ss");
    log.info("diamond client start:" + formatter.format(new Date(System.currentTimeMillis())));
    if (null == group) {
        group = Constants.DEFAULT_GROUP;
    }/*from   w ww  .j  a v  a 2  s.  com*/

    ConcurrentHashMap<String, CacheData> cacheDatas = this.cache.get(dataId);
    if (null == cacheDatas) {
        ConcurrentHashMap<String, CacheData> newCacheDatas = new ConcurrentHashMap<String, CacheData>();
        ConcurrentHashMap<String, CacheData> oldCacheDatas = this.cache.putIfAbsent(dataId, newCacheDatas);
        if (null != oldCacheDatas) {
            cacheDatas = oldCacheDatas;
        } else {
            cacheDatas = newCacheDatas;
        }
    }
    CacheData cacheData = cacheDatas.get(group);
    if (null == cacheData) {
        cacheDatas.putIfAbsent(group, new CacheData(dataId, group));
        if (log.isInfoEnabled()) {
            log.info("DataID[" + dataId + "]Group" + group);
        }
        this.start();
        DiamondClientUtil.addDataId(this.clusterType, dataId + "-" + group);
    }
}

From source file:com.taobao.gecko.service.impl.BaseRemotingController.java

public Map<Connection, ResponseCommand> invokeToGroupAllConnections(final String group,
        final RequestCommand command, final long time, final TimeUnit timeUnit)
        throws InterruptedException, NotifyRemotingException {
    if (group == null) {
        throw new NotifyRemotingException("Null group");
    }/*from  w  ww. j  a  va  2  s. co m*/
    if (command == null) {
        throw new NotifyRemotingException("Null command");
    }
    final List<Connection> connections = this.remotingContext.getConnectionsByGroup(group);

    if (connections != null && connections.size() > 0) {
        final long now = System.currentTimeMillis();
        final CountDownLatch countDownLatch = new CountDownLatch(connections.size());
        final ConcurrentHashMap<Connection, ResponseCommand> resultMap = new ConcurrentHashMap<Connection, ResponseCommand>();
        final GroupAllConnectionRequestCallBack requestCallBack = new GroupAllConnectionRequestCallBack(null,
                countDownLatch, TimeUnit.MILLISECONDS.convert(time, timeUnit), now, resultMap);

        for (final Connection conn : connections) {
            final DefaultConnection connection = (DefaultConnection) conn;
            if (connection.isConnected()) {
                try {
                    connection.addRequestCallBack(command.getOpaque(), requestCallBack);
                    requestCallBack.addWriteFuture(connection, connection.asyncSend(command));
                } catch (final Throwable e) {
                    requestCallBack.onResponse(group,
                            this.createCommErrorResponseCommand(command.getRequestHeader(), e.getMessage()),
                            connection);
                }
            } else {
                requestCallBack.onResponse(group,
                        this.createCommErrorResponseCommand(command.getRequestHeader(), ""),
                        connection);
            }
        }
        if (!countDownLatch.await(time, timeUnit)) {
            for (final Connection conn : connections) {
                if (!resultMap.containsKey(conn)) {
                    if (resultMap.putIfAbsent(conn, this.createTimeoutCommand(command.getRequestHeader(),
                            conn.getRemoteSocketAddress())) == null) {
                        requestCallBack.cancelWrite(conn);
                        // 
                        ((DefaultConnection) conn).removeRequestCallBack(command.getOpaque());
                    }
                }
            }
        }
        return resultMap;
    } else {
        return null;
    }
}

From source file:com.alibaba.napoli.gecko.service.impl.BaseRemotingController.java

public Map<Connection, ResponseCommand> invokeToGroupAllConnections(final String group,
        final RequestCommand command, final long time, final TimeUnit timeUnit)
        throws InterruptedException, NotifyRemotingException {
    if (group == null) {
        throw new NotifyRemotingException("Null group");
    }/*from  w  w  w. ja va 2 s .  com*/
    if (command == null) {
        throw new NotifyRemotingException("Null command");
    }
    final List<Connection> connections = this.remotingContext.getConnectionsByGroup(group);

    if (connections != null && connections.size() > 0) {
        final long now = System.currentTimeMillis();
        final CountDownLatch countDownLatch = new CountDownLatch(connections.size());
        final ConcurrentHashMap<Connection, ResponseCommand> resultMap = new ConcurrentHashMap<Connection, ResponseCommand>();
        final GroupAllConnectionRequestCallBack requestCallBack = new GroupAllConnectionRequestCallBack(null,
                countDownLatch, TimeUnit.MILLISECONDS.convert(time, timeUnit), now, resultMap);

        for (final Connection conn : connections) {
            final DefaultConnection connection = (DefaultConnection) conn;
            if (connection.isConnected()) {
                try {
                    connection.addRequestCallBack(command.getOpaque(), requestCallBack);
                    requestCallBack.addWriteFuture(connection, connection.asyncSend(command));
                } catch (final Throwable e) {
                    requestCallBack.onResponse(group,
                            this.createCommErrorResponseCommand(command.getRequestHeader(), e.getMessage()),
                            connection);
                }
            } else {
                requestCallBack.onResponse(group,
                        this.createCommErrorResponseCommand(command.getRequestHeader(), "?"),
                        connection);
            }
        }
        if (!countDownLatch.await(time, timeUnit)) {
            for (final Connection conn : connections) {
                if (!resultMap.containsKey(conn)) {
                    if (resultMap.putIfAbsent(conn, this.createTimeoutCommand(command.getRequestHeader(),
                            conn.getRemoteSocketAddress())) == null) {
                        requestCallBack.cancelWrite(conn);
                        // 
                        ((DefaultConnection) conn).removeRequestCallBack(command.getOpaque());
                    }
                }
            }
        }
        return resultMap;
    } else {
        return null;
    }
}