Example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator

List of usage examples for com.google.common.util.concurrent MoreExecutors listeningDecorator

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator.

Prototype

@GwtIncompatible("TODO")
public static ListeningScheduledExecutorService listeningDecorator(ScheduledExecutorService delegate) 

Source Link

Document

Creates a ScheduledExecutorService whose submit and invokeAll methods submit ListenableFutureTask instances to the given delegate executor.

Usage

From source file:co.cask.cdap.logging.save.LogMetricsPlugin.java

@Override
public void init(Set<Integer> partitions) {
    super.init(partitions, checkpointManager);

    scheduledExecutor = MoreExecutors.listeningDecorator(Executors
            .newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("log-saver-metrics-plugin")));

    partitionCheckpoints.clear();//  w ww.  ja  v a 2 s.c  o  m
    try {
        for (Integer partition : partitions) {
            partitionCheckpoints.put(partition, checkpointManager.getCheckpoint(partition));
        }
    } catch (Exception e) {
        LOG.error("Caught exception while reading checkpoint", e);
        throw Throwables.propagate(e);
    }

    checkPointWriter = new CheckPointWriter(checkpointManager, partitionCheckpoints);
    scheduledExecutor.scheduleWithFixedDelay(checkPointWriter, 100, 200, TimeUnit.MILLISECONDS);
}

From source file:com.stratio.deep.cassandra.cql.DeepCqlRecordWriter.java

/**
 * Cassandra record writer constructor.//from ww w .  j a v  a2 s .co  m
 *
 * @param writeConfig write configuration
 * @param queryBuilder query builder
 */
public DeepCqlRecordWriter(ICassandraDeepJobConfig writeConfig, CassandraUpdateQueryBuilder queryBuilder) {
    this.taskExecutorService = MoreExecutors.listeningDecorator(
            Utils.newBlockingFixedThreadPoolExecutor(MAX_PARALLEL_QUERIES, WORK_QUEUE_SIZE));
    this.pendingTasks = new ConcurrentHashMap<>();
    this.writeConfig = writeConfig;
    this.queryBuilder = queryBuilder;
    try {
        InetAddress localhost = InetAddress.getLocalHost();
        sessionWithHost = CassandraClientProvider.trySessionForLocation(localhost.getHostAddress(),
                (CassandraDeepJobConfig) writeConfig, false).left;
        sessionWithHost.init();
    } catch (UnknownHostException e) {
        throw new DeepInstantiationException("Cannot resolve local hostname", e);
    }
}

From source file:com.spectralogic.ds3client.helpers.ChunkTransferrer.java

public void transferChunks(final Iterable<JobNode> nodes, final Iterable<Objects> chunks)
        throws IOException, XmlProcessingException {
    LOG.debug("Getting ready to process chunks");
    final ImmutableMap<UUID, JobNode> nodeMap = buildNodeMap(nodes);
    LOG.debug("Starting executor service");
    final ListeningExecutorService executor = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(maxParallelRequests));
    LOG.debug("Executor service started");
    try {/*from   w ww. j  av  a  2  s .  co  m*/
        final List<ListenableFuture<?>> tasks = new ArrayList<>();
        for (final Objects chunk : chunks) {
            LOG.debug("Processing parts for chunk: {}", chunk.getChunkId().toString());
            final Ds3Client client = getClient(nodeMap, chunk.getNodeId(), mainClient);
            for (final BulkObject ds3Object : chunk.getObjects()) {
                final ObjectPart part = new ObjectPart(ds3Object.getOffset(), ds3Object.getLength());
                if (this.partTracker.containsPart(ds3Object.getName(), part)) {
                    LOG.debug("Adding {} to executor for processing", ds3Object.getName());
                    tasks.add(executor.submit(new Callable<Object>() {
                        @Override
                        public Object call() throws Exception {
                            LOG.debug("Processing {}", ds3Object.getName());
                            ChunkTransferrer.this.itemTransferrer.transferItem(client, ds3Object);
                            ChunkTransferrer.this.partTracker.completePart(ds3Object.getName(), part);
                            return null;
                        }
                    }));
                }
            }
        }
        executeWithExceptionHandling(tasks);
    } finally {
        LOG.debug("Shutting down executor");
        executor.shutdown();
    }
}

From source file:org.excalibur.core.executor.task.SingleTaskExecutorService.java

public SingleTaskExecutorService() {
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("task-executor-%d")
            .setUncaughtExceptionHandler(new TaskUncaughtExceptionHandler()).build();

    executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor(threadFactory));
}

From source file:com.github.rinde.rinsim.experiment.LocalComputer.java

@Override
public ExperimentResults compute(Builder builder, Set<SimArgs> inputs) {
    final ImmutableList.Builder<ExperimentRunner> runnerBuilder = ImmutableList.builder();
    for (final SimArgs args : inputs) {
        runnerBuilder.add(new ExperimentRunner(args));
    }//from w  ww . ja v  a 2s .c  om

    final List<ExperimentRunner> runners = runnerBuilder.build();

    final int threads = Math.min(builder.numThreads, runners.size());
    final ListeningExecutorService executor;
    if (builder.showGui) {
        executor = MoreExecutors.newDirectExecutorService();
    } else {
        executor = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(threads, new LocalThreadFactory()));
    }

    final List<SimulationResult> results = Collections.synchronizedList(new ArrayList<SimulationResult>());
    final ResultCollector resultCollector = new ResultCollector(executor, results, builder.resultListeners);

    try {
        for (final ExperimentRunner r : runners) {
            checkForError(executor, resultCollector);
            final ListenableFuture<SimulationResult> f = executor.submit(r);
            Futures.addCallback(f, resultCollector);
        }
        while (results.size() < inputs.size() && !resultCollector.hasError()) {
            Thread.sleep(THREAD_SLEEP_TIME_MS);
        }
        checkForError(executor, resultCollector);
    } catch (final InterruptedException e) {
        LOGGER.trace("Interrupt, shutting down the executor.");
        executor.shutdownNow();
        LOGGER.trace("Waiting for executor to shutdown.");
        try {
            final boolean executorStopped = executor.awaitTermination(MAX_WAIT_FOR_SHUTDOWN_S,
                    TimeUnit.SECONDS);
            if (executorStopped) {
                LOGGER.trace("Executor is shutdown.");
            } else {
                LOGGER.warn("Executor did not stop, timed out after {} seconds.", MAX_WAIT_FOR_SHUTDOWN_S);
            }
        } catch (final InterruptedException e1) {
            LOGGER.warn("Waiting for executor to shutdown is interrupted.");
        }
        return ExperimentResults.create(builder, ImmutableSet.<SimulationResult>of());
    }

    checkForError(executor, resultCollector);
    executor.shutdown();

    final ExperimentResults er = ExperimentResults.create(builder, ImmutableSet.copyOf(results));
    for (final ResultListener rl : builder.resultListeners) {
        rl.doneComputing(er);
    }
    return er;
}

From source file:io.viewserver.reactor.EventLoopReactor.java

public EventLoopReactor(String name, Network network) {
    this.name = name;
    this.network = network;

    ReactorMonitor.INSTANCE.addReactor(this);

    jobQueue = new PriorityBlockingQueue<>(8, getJobComparator());
    loopTasks = new PriorityBlockingQueue<>(8, getLoopTaskComparator());
    loopTasksCopy = new PriorityBlockingQueue<>(8, getLoopTaskComparator());

    executor = MoreExecutors.listeningDecorator(
            Executors.newSingleThreadScheduledExecutor(r -> runThread = new Thread(r, "reactor-" + name)));

    commandWheel = new SimpleReactorCommandWheel();
    commandWheel.registerReactorCommandListener(this);
    commandWheel.startRotating();/*from  w ww.  ja  v  a 2 s. c  o  m*/

    network.setReactor(this);
    final INetworkAdapter networkAdapter = network.getNetworkAdapter();
    final INetworkMessageWheel networkMessageWheel = networkAdapter.getNetworkMessageWheel();
    networkMessageWheel.registerNetworkMessageListener(this);
    // TODO: get rid of this?
    networkAdapter.registerListener(this);
    networkMessageWheel.startRotating();
}

From source file:org.apache.pig.backend.hadoop.executionengine.physicalLayer.util.MonitoredUDFExecutor.java

@SuppressWarnings("unchecked")
public MonitoredUDFExecutor(EvalFunc udf) {
    // is 10 enough? This is pretty arbitrary.
    exec = MoreExecutors
            .listeningDecorator(MoreExecutors.getExitingExecutorService(new ScheduledThreadPoolExecutor(1)));
    this.evalFunc = udf;
    MonitoredUDF anno = udf.getClass().getAnnotation(MonitoredUDF.class);
    timeUnit = anno.timeUnit();// w  ww  .java  2s  .c  o m
    duration = anno.duration();
    errorCallback = anno.errorCallback();

    // The exceptions really should not happen since our handlers are defined by the parent class which
    // must be extended by all custom handlers.
    try {
        errorHandler = errorCallback.getMethod("handleError", EvalFunc.class, Exception.class);
        timeoutHandler = errorCallback.getMethod("handleTimeout", EvalFunc.class, Exception.class);
    } catch (SecurityException e1) {
        throw new RuntimeException(
                "Unable to use the monitored callback due to a Security Exception while working with "
                        + evalFunc.getClass().getName());
    } catch (NoSuchMethodException e1) {
        throw new RuntimeException(
                "Unable to use the monitored callback because a required method not found while working with "
                        + evalFunc.getClass().getName());
    }

    Type retType = udf.getReturnType();
    defaultValue = getDefaultValue(anno, retType);
    closure = new Function<Tuple, Object>() {
        @Override
        public Object apply(Tuple input) {
            try {
                return evalFunc.exec(input);
            } catch (IOException e) {
                // I don't see a CheckedFunction in Guava. Resorting to this hackery.
                throw new RuntimeException(e);
            }
        }
    };
}

From source file:com.torodb.mongodb.repl.topology.TopologyExecutor.java

public TopologyExecutor(ConcurrentToolsFactory concurrentToolsFactory, LoggerFactory lf,
        Duration maxSyncSourceLag, Duration slaveDelay) {
    this.logger = lf.apply(this.getClass());
    this.executor = MoreExecutors.listeningDecorator(
            concurrentToolsFactory.createScheduledExecutorServiceWithMaxThreads("topology-executor", 1));
    this.coord = new TopologyCoordinator(maxSyncSourceLag, slaveDelay, lf);
    this.versionChangeListener = this::onVersionChange;
    this.coord.addVersionChangeListener(versionChangeListener);
    this.onAnyVersion = new OnAnyVersion(executor, coord);
    this.onCurrentVersion = new OnCurrentVersion(executor, coord, this::getVersion);
}

From source file:io.ucoin.ucoinj.elasticsearch.service.ExecutorServiceImpl.java

public ExecutorServiceImpl() {
    this.jobsById = Maps.newHashMap();
    this.config = Configuration.instance();
    this.progressionByJobIdCache = initJobByIdCache(config.getTaskExecutorQueueCapacity() * 2,
            config.getTaskExecutorTimeToIdle());
    delegate = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(config.getTaskExecutorQueueCapacity()));
}

From source file:io.druid.client.CachingQueryRunner.java

public CachingQueryRunner(String segmentIdentifier, SegmentDescriptor segmentDescriptor, ObjectMapper mapper,
        Cache cache, QueryToolChest toolchest, QueryRunner<T> base, ExecutorService backgroundExecutorService,
        CacheConfig cacheConfig) {//from   w w w.j  av a2s  .c  o m
    this.base = base;
    this.segmentIdentifier = segmentIdentifier;
    this.segmentDescriptor = segmentDescriptor;
    this.toolChest = toolchest;
    this.cache = cache;
    this.mapper = mapper;
    this.backgroundExecutorService = MoreExecutors.listeningDecorator(backgroundExecutorService);
    this.cacheConfig = cacheConfig;
}