Example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

List of usage examples for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor.

Prototype

@Deprecated
@GwtIncompatible("TODO")
public static ListeningExecutorService sameThreadExecutor() 

Source Link

Document

Creates an executor service that runs each task in the thread that invokes execute/submit , as in CallerRunsPolicy .

Usage

From source file:org.xtreemfs.foundation.flease.comm.tcp.TCPClient.java

public void setLifeCycleListener(Service.Listener l) {
    server.addListener(l, MoreExecutors.sameThreadExecutor());
}

From source file:com.flipkart.phantom.task.impl.TaskHandlerExecutorRepository.java

private Future<TaskResult> executeAsyncCommand(final long receiveTime, final TaskHandlerExecutor command,
        final String commandName, final TaskRequestWrapper requestWrapper) {
    if (command == null) {
        throw new UnsupportedOperationException("Invoked unsupported command : " + commandName);
    } else {//from   www  . j a v  a 2s . c o  m
        final Future<TaskResult> future = command.queue();
        MoreExecutors.sameThreadExecutor().submit(new Runnable() {
            @Override
            public void run() {
                try {
                    future.get();
                } catch (Exception e) {
                    throw new RuntimeException(
                            "Error in processing command " + command.getServiceName() + ": " + e.getMessage(),
                            e);
                } finally {
                    publishEvent(command, receiveTime, requestWrapper);
                }
            }
        });
        return future;
    }
}

From source file:com.google.gerrit.pgm.Reindex.java

private int indexAll() throws Exception {
    ReviewDb db = sysInjector.getInstance(ReviewDb.class);
    ChangeIndexer indexer = sysInjector.getInstance(ChangeIndexer.class);
    Stopwatch sw = new Stopwatch().start();
    int queueLen = 2 * threads;
    final Semaphore sem = new Semaphore(queueLen);
    final AtomicBoolean ok = new AtomicBoolean(true);
    int i = 0;//from  ww  w.ja  v  a  2s. c  o m
    for (final Change change : db.changes().all()) {
        sem.acquire();
        final ListenableFuture<?> future = indexer.index(change);
        future.addListener(new Runnable() {
            @Override
            public void run() {
                try {
                    future.get();
                } catch (InterruptedException e) {
                    log.error("Failed to index change " + change.getId(), e);
                    ok.set(false);
                } catch (ExecutionException e) {
                    log.error("Failed to index change " + change.getId(), e);
                    ok.set(false);
                } finally {
                    sem.release();
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        i++;
    }
    sem.acquire(queueLen);
    double elapsed = sw.elapsed(TimeUnit.MILLISECONDS) / 1000d;
    System.out.format("Reindexed %d changes in %.02fms\n", i, elapsed);

    return ok.get() ? 0 : 1;
}

From source file:io.druid.sql.calcite.DruidSchema.java

@LifecycleStart
public void start() {
    cacheExec.submit(new Runnable() {
        @Override//from   www  .j av  a  2 s .  c om
        public void run() {
            try {
                while (!Thread.currentThread().isInterrupted()) {
                    final Set<String> dataSources = Sets.newHashSet();

                    try {
                        synchronized (lock) {
                            final long nextRefresh = new DateTime(lastRefresh)
                                    .plus(config.getMetadataRefreshPeriod()).getMillis();

                            while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty()
                                    && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
                                lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
                            }

                            dataSources.addAll(dataSourcesNeedingRefresh);
                            dataSourcesNeedingRefresh.clear();
                            lastRefresh = System.currentTimeMillis();
                            refreshImmediately = false;
                        }

                        // Refresh dataSources.
                        for (final String dataSource : dataSources) {
                            log.debug("Refreshing metadata for dataSource[%s].", dataSource);
                            final long startTime = System.currentTimeMillis();
                            final DruidTable druidTable = computeTable(dataSource);
                            if (druidTable == null) {
                                if (tables.remove(dataSource) != null) {
                                    log.info("Removed dataSource[%s] from the list of active dataSources.",
                                            dataSource);
                                }
                            } else {
                                tables.put(dataSource, druidTable);
                                log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource,
                                        System.currentTimeMillis() - startTime);
                            }
                        }

                        initializationLatch.countDown();
                    } catch (InterruptedException e) {
                        // Fall through.
                        throw e;
                    } catch (Exception e) {
                        log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.",
                                Joiner.on(", ").join(dataSources));

                        synchronized (lock) {
                            // Add dataSources back to the refresh list.
                            dataSourcesNeedingRefresh.addAll(dataSources);
                            lock.notifyAll();
                        }
                    }
                }
            } catch (InterruptedException e) {
                // Just exit.
            } catch (Throwable e) {
                // Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
                // OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
                log.makeAlert(e, "Metadata refresh failed permanently").emit();
                throw e;
            } finally {
                log.info("Metadata refresh stopped.");
            }
        }
    });

    serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            synchronized (lock) {
                isServerViewInitialized = true;
                lock.notifyAll();
            }

            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                if (!tables.containsKey(segment.getDataSource())) {
                    refreshImmediately = true;
                }

                lock.notifyAll();
            }

            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                lock.notifyAll();
            }

            return ServerView.CallbackAction.CONTINUE;
        }
    });

    serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            final List<String> dataSourceNames = Lists.newArrayList();
            for (DruidDataSource druidDataSource : server.getDataSources()) {
                dataSourceNames.add(druidDataSource.getName());
            }

            synchronized (lock) {
                dataSourcesNeedingRefresh.addAll(dataSourceNames);
                lock.notifyAll();
            }

            return ServerView.CallbackAction.CONTINUE;
        }
    });
}

From source file:io.druid.server.namespace.KafkaExtractionManager.java

public void addListener(final KafkaExtractionNamespace kafkaNamespace, final Map<String, String> map) {
    final String topic = kafkaNamespace.getKafkaTopic();
    final String namespace = kafkaNamespace.getNamespace();
    final ListenableFuture<?> future = executorService.submit(new Runnable() {
        @Override/*w w w  .ja v a2 s. c om*/
        public void run() {
            final Properties privateProperties = new Properties();
            privateProperties.putAll(kafkaProperties);
            privateProperties.setProperty("group.id", UUID.randomUUID().toString());
            ConsumerConnector consumerConnector = new kafka.javaapi.consumer.ZookeeperConsumerConnector(
                    new ConsumerConfig(privateProperties));
            List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreamsByFilter(
                    new Whitelist(Pattern.quote(topic)), 1, defaultStringDecoder, defaultStringDecoder);

            if (streams == null || streams.isEmpty()) {
                throw new IAE("Topic [%s] had no streams", topic);
            }
            if (streams.size() > 1) {
                throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
            }
            backgroundTaskCount.incrementAndGet();
            final KafkaStream<String, String> kafkaStream = streams.get(0);
            final ConsumerIterator<String, String> it = kafkaStream.iterator();
            log.info("Listening to topic [%s] for namespace [%s]", topic, namespace);
            AtomicLong eventCounter = topicEvents.get(namespace);
            if (eventCounter == null) {
                topicEvents.putIfAbsent(namespace, new AtomicLong(0L));
                eventCounter = topicEvents.get(namespace);
            }
            while (it.hasNext()) {
                final MessageAndMetadata<String, String> messageAndMetadata = it.next();
                final String key = messageAndMetadata.key();
                final String message = messageAndMetadata.message();
                if (key == null || message == null) {
                    log.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                    continue;
                }
                map.put(key, message);
                namespaceVersionMap.put(namespace, Long.toString(eventCounter.incrementAndGet()));
                log.debug("Placed key[%s] val[%s]", key, message);
            }
        }
    });
    Futures.addCallback(future, new FutureCallback<Object>() {
        @Override
        public void onSuccess(Object result) {
            topicEvents.remove(namespace);
        }

        @Override
        public void onFailure(Throwable t) {
            topicEvents.remove(namespace);
            if (t instanceof java.util.concurrent.CancellationException) {
                log.warn("Cancelled rename task for topic [%s]", topic);
            } else {
                Throwables.propagate(t);
            }
        }
    }, MoreExecutors.sameThreadExecutor());
}

From source file:org.xtreemfs.foundation.flease.comm.tcp.TCPFleaseCommunicator.java

public void setLifeCycleListener(Service.Listener l) {
    comm.setLifeCycleListener(l);
    stage.addListener(l, MoreExecutors.sameThreadExecutor());
}

From source file:com.netflix.curator.framework.recipes.queue.QueueBuilder.java

private QueueBuilder(CuratorFramework client, QueueConsumer<T> consumer, QueueSerializer<T> serializer,
        String queuePath) {/*from  w  w w.j a  v a  2s  .  c om*/
    this.client = client;
    this.consumer = consumer;
    this.serializer = serializer;
    this.queuePath = queuePath;

    factory = defaultThreadFactory;
    executor = MoreExecutors.sameThreadExecutor();
}

From source file:org.elasticsearch.threadpool.ThreadPool.java

@Inject
public ThreadPool(Settings settings, @Nullable NodeSettingsService nodeSettingsService) {
    super(settings);

    Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);

    int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
    int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
    int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
    defaultExecutorTypeSettings = ImmutableMap.<String, Settings>builder()
            .put(Names.GENERIC, settingsBuilder().put("type", "cached").put("keep_alive", "30s").build())
            .put(Names.INDEX,/*from  w ww  .  jav a2 s.co  m*/
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors)
                            .put("queue_size", 200).build())
            .put(Names.BULK,
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors)
                            .put("queue_size", 50).build())
            .put(Names.GET,
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors)
                            .put("queue_size", 1000).build())
            .put(Names.SEARCH,
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors * 3)
                            .put("queue_size", 1000).build())
            .put(Names.SUGGEST,
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors)
                            .put("queue_size", 1000).build())
            .put(Names.PERCOLATE,
                    settingsBuilder().put("type", "fixed").put("size", availableProcessors)
                            .put("queue_size", 1000).build())
            .put(Names.MANAGEMENT,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", 5).build())
            .put(Names.FLUSH,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5)
                            .build())
            .put(Names.MERGE,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5)
                            .build())
            .put(Names.REFRESH,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m")
                            .put("size", halfProcMaxAt10).build())
            .put(Names.WARMER,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5)
                            .build())
            .put(Names.SNAPSHOT,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5)
                            .build())
            .put(Names.SNAPSHOT_DATA,
                    settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", 5).build())
            .put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
            .put(Names.BENCH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5).build())
            .build();

    Map<String, ExecutorHolder> executors = Maps.newHashMap();
    for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {
        executors.put(executor.getKey(),
                build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue()));
    }
    executors.put(Names.SAME,
            new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(Names.SAME, "same")));
    if (!executors.get(Names.GENERIC).info.getType().equals("cached")) {
        throw new ElasticsearchIllegalArgumentException("generic thread pool must be of type cached");
    }
    this.executors = ImmutableMap.copyOf(executors);
    this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"),
            new EsAbortPolicy());
    this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
    this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
    if (nodeSettingsService != null) {
        nodeSettingsService.addListener(new ApplySettings());
    }

    TimeValue estimatedTimeInterval = componentSettings.getAsTime("estimated_time_interval",
            TimeValue.timeValueMillis(200));
    this.estimatedTimeThread = new EstimatedTimeThread(EsExecutors.threadName(settings, "[timer]"),
            estimatedTimeInterval.millis());
    this.estimatedTimeThread.start();
}

From source file:org.grouplens.lenskit.eval.script.EvalScript.java

public Object methodMissing(String name, Object arg) {
    Object[] args = InvokerHelper.asArray(arg);
    logger.debug("searching for eval command {}", name);
    Object obj = null;/*ww  w .jav  a 2  s .c  om*/
    try {
        obj = helper.callExternalMethod(name, args);
    } catch (NoSuchMethodException e) {
        throw new MissingMethodException(name, getClass(), args, true);
    }
    if (obj instanceof Builder) {
        return helper.finishBuilder((Builder<?>) obj);
    } else if (obj instanceof EvalTask) {
        final EvalTask<?> task = (EvalTask<?>) obj;
        if (currentTarget == null) {
            try {
                ListenableFuture<List<Object>> deps = Futures.allAsList(helper.getDeps(task));
                helper.clearDeps(task);
                Runnable execute = new Runnable() {
                    @Override
                    public void run() {
                        try {
                            task.execute();
                        } catch (TaskExecutionException e) {
                            throw new RuntimeException("task failure", e);
                        }
                    }
                };
                deps.addListener(execute, MoreExecutors.sameThreadExecutor());
                if (task.isDone()) {
                    return Uninterruptibles.getUninterruptibly(task);
                } else {
                    return task;
                }
            } catch (ExecutionException e) {
                throw new RuntimeException("task failure", e);
            }
        } else {
            EvalAntTask aTask = new EvalAntTask(task, helper.getDeps(task));
            aTask.setProject(getAntProject());
            aTask.setOwningTarget(currentTarget);
            aTask.init();
            currentTarget.addTask(aTask);
            return obj;
        }
    } else {
        return obj;
    }
}

From source file:org.apache.druid.indexing.worker.WorkerTaskManager.java

private void registerLocationListener() {
    taskRunner.registerListener(new TaskRunnerListener() {
        @Override//from  w w  w .  j a va  2  s .c  o  m
        public String getListenerId() {
            return "WorkerTaskManager";
        }

        @Override
        public void locationChanged(final String taskId, final TaskLocation newLocation) {
            submitNoticeToExec(new LocationNotice(taskId, newLocation));
        }

        @Override
        public void statusChanged(final String taskId, final TaskStatus status) {
            // do nothing
        }
    }, MoreExecutors.sameThreadExecutor());
}