Example usage for com.google.common.util.concurrent MoreExecutors newDirectExecutorService

List of usage examples for com.google.common.util.concurrent MoreExecutors newDirectExecutorService

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors newDirectExecutorService.

Prototype

@GwtIncompatible("TODO")
public static ListeningExecutorService newDirectExecutorService() 

Source Link

Document

Creates an executor service that runs each task in the thread that invokes execute/submit , as in CallerRunsPolicy This applies both to individually submitted tasks and to collections of tasks submitted via invokeAll or invokeAny .

Usage

From source file:com.facebook.buck.parser.PerBuildStateFactoryWithConfigurableAttributes.java

@Override
protected PerBuildStateWithConfigurableAttributes create(ParsingContext parsingContext,
        DaemonicParserState daemonicParserState, ImmutableList<String> targetPlatforms,
        Optional<AtomicLong> parseProcessedBytes) {

    Cell rootCell = parsingContext.getCell();
    ListeningExecutorService executorService = parsingContext.getExecutor();
    SymlinkCache symlinkCache = new SymlinkCache(eventBus, daemonicParserState);
    CellManager cellManager = new CellManager(symlinkCache);

    TargetNodeListener<TargetNode<?>> symlinkCheckers = cellManager::registerInputsUnderSymlinks;
    ParserConfig parserConfig = rootCell.getBuckConfig().getView(ParserConfig.class);
    int numParsingThreads = parserConfig.getNumParsingThreads();
    DefaultProjectBuildFileParserFactory projectBuildFileParserFactory = new DefaultProjectBuildFileParserFactory(
            typeCoercerFactory, parserPythonInterpreterProvider, parsingContext.isProfilingEnabled(),
            parseProcessedBytes, knownRuleTypesProvider, manifestServiceSupplier, fileHashCache);
    ProjectBuildFileParserPool projectBuildFileParserPool = new ProjectBuildFileParserPool(numParsingThreads, // Max parsers to create per cell.
            projectBuildFileParserFactory, parsingContext.isProfilingEnabled());

    TargetNodeFactory targetNodeFactory = new TargetNodeFactory(typeCoercerFactory);

    BuildFileRawNodeParsePipeline buildFileRawNodeParsePipeline = new BuildFileRawNodeParsePipeline(
            new PipelineNodeCache<>(daemonicParserState.getRawNodeCache()), projectBuildFileParserPool,
            executorService, eventBus, watchman);

    BuildTargetRawNodeParsePipeline buildTargetRawNodeParsePipeline = new BuildTargetRawNodeParsePipeline(
            executorService, buildFileRawNodeParsePipeline);

    ListeningExecutorService pipelineExecutorService = parserConfig.getEnableParallelParsing() ? executorService
            : MoreExecutors.newDirectExecutorService();
    boolean enableSpeculativeParsing = parserConfig.getEnableParallelParsing()
            && parsingContext.getSpeculativeParsing() == SpeculativeParsing.ENABLED;
    RawTargetNodePipeline rawTargetNodePipeline = new RawTargetNodePipeline(pipelineExecutorService,
            daemonicParserState.getOrCreateNodeCache(RawTargetNode.class), eventBus,
            buildFileRawNodeParsePipeline, buildTargetRawNodeParsePipeline,
            new DefaultRawTargetNodeFactory(knownRuleTypesProvider, new BuiltTargetVerifier()));

    PackageBoundaryChecker packageBoundaryChecker = new ThrowingPackageBoundaryChecker(
            daemonicParserState.getBuildFileTrees());

    ParserTargetNodeFactory<RawTargetNode> nonResolvingRawTargetNodeToTargetNodeFactory = new NonResolvingRawTargetNodeToTargetNodeFactory(
            DefaultParserTargetNodeFactory.createForParser(knownRuleTypesProvider, marshaller,
                    daemonicParserState.getBuildFileTrees(), symlinkCheckers, targetNodeFactory));

    // This pipeline uses a direct executor instead of pipelineExecutorService to avoid
    // deadlocks happening when too many node are requested from targetNodeParsePipeline.
    // That pipeline does blocking calls to get nodes from nonResolvingTargetNodeParsePipeline
    // which can lead to deadlocks.
    ParsePipeline<TargetNode<?>> nonResolvingTargetNodeParsePipeline = new RawTargetNodeToTargetNodeParsePipeline(
            daemonicParserState.getOrCreateNodeCache(TargetNode.class),
            MoreExecutors.newDirectExecutorService(), rawTargetNodePipeline, eventBus,
            "nonresolving_raw_target_node_parse_pipeline", enableSpeculativeParsing,
            nonResolvingRawTargetNodeToTargetNodeFactory);

    ConfigurationRuleResolver configurationRuleResolver = new SameThreadConfigurationRuleResolver(
            cellManager::getCell, nonResolvingTargetNodeParsePipeline::getNode);

    SelectableResolver selectableResolver = new ConfigurationRuleSelectableResolver(configurationRuleResolver);

    SelectorListResolver selectorListResolver = new DefaultSelectorListResolver(selectableResolver);

    ConstraintResolver constraintResolver = new RuleBasedConstraintResolver(configurationRuleResolver);

    Supplier<Platform> targetPlatform = Suppliers.memoize(
            () -> getTargetPlatform(configurationRuleResolver, constraintResolver, rootCell, targetPlatforms));

    RawTargetNodeToTargetNodeFactory rawTargetNodeToTargetNodeFactory = new RawTargetNodeToTargetNodeFactory(
            knownRuleTypesProvider, marshaller, targetNodeFactory, packageBoundaryChecker, symlinkCheckers,
            selectorListResolver, constraintResolver, targetPlatform);

    ListeningExecutorService configuredPipelineExecutor = MoreExecutors
            .listeningDecorator(createExecutorService(rootCell.getBuckConfig(), "configured-pipeline"));

    ParsePipeline<TargetNode<?>> targetNodeParsePipeline = new RawTargetNodeToTargetNodeParsePipeline(
            daemonicParserState.getOrCreateNodeCache(TargetNode.class), configuredPipelineExecutor,
            rawTargetNodePipeline, eventBus, "configured_raw_target_node_parse_pipeline",
            enableSpeculativeParsing, rawTargetNodeToTargetNodeFactory) {
        @Override/*from   w w  w .ja v a 2  s.c  o  m*/
        public void close() {
            super.close();
            nonResolvingTargetNodeParsePipeline.close();
            rawTargetNodePipeline.close();
            try {
                MostExecutors.shutdown(configuredPipelineExecutor, 1, TimeUnit.MINUTES);
            } catch (InterruptedException e) {
            }
        }
    };

    cellManager.register(rootCell);

    return new PerBuildStateWithConfigurableAttributes(cellManager, buildFileRawNodeParsePipeline,
            targetNodeParsePipeline, parsingContext, constraintResolver, selectorListResolver, targetPlatform);
}

From source file:ch.cyberduck.core.sds.provider.HttpComponentsConnector.java

@Override
public Future<?> apply(final ClientRequest request, final AsyncConnectorCallback callback) {
    return MoreExecutors.newDirectExecutorService().submit(new Runnable() {
        @Override//w ww.j  av  a2s.c om
        public void run() {
            try {
                callback.response(apply(request));
            } catch (final Throwable t) {
                callback.failure(t);
            }
        }
    });
}

From source file:org.apache.drill.exec.store.TimedCallable.java

/**
 * Execute the list of runnables with the given parallelization.  At end, return values and report completion time
 * stats to provided logger. Each runnable is allowed a certain timeout. If the timeout exceeds, existing/pending
 * tasks will be cancelled and a {@link UserException} is thrown.
 * @param activity Name of activity for reporting in logger.
 * @param logger The logger to use to report results.
 * @param tasks List of callable that should be executed and timed.  If this list has one item, task will be
 *                  completed in-thread. Each callable must handle {@link InterruptedException}s.
 * @param parallelism  The number of threads that should be run to complete this task.
 * @return The list of outcome objects.//from  w ww . java  2s.co  m
 * @throws IOException All exceptions are coerced to IOException since this was build for storage system tasks initially.
 */
public static <V> List<V> run(final String activity, final Logger logger, final List<TimedCallable<V>> tasks,
        int parallelism) throws IOException {
    Preconditions.checkArgument(!Preconditions.checkNotNull(tasks).isEmpty(), "list of tasks is empty");
    Preconditions.checkArgument(parallelism > 0);
    parallelism = Math.min(parallelism, tasks.size());
    final ExecutorService threadPool = parallelism == 1 ? MoreExecutors.newDirectExecutorService()
            : Executors.newFixedThreadPool(parallelism,
                    new ThreadFactoryBuilder().setNameFormat(activity + "-%d").build());
    final long timeout = TIMEOUT_PER_RUNNABLE_IN_MSECS * ((tasks.size() - 1) / parallelism + 1);
    final FutureMapper<V> futureMapper = new FutureMapper<>();
    final Statistics<V> statistics = logger.isDebugEnabled() ? new Statistics<>() : null;
    try {
        return Collectors.toList(threadPool.invokeAll(tasks, timeout, TimeUnit.MILLISECONDS), futureMapper);
    } catch (InterruptedException e) {
        final String errMsg = String.format("Interrupted while waiting for activity '%s' tasks to be done.",
                activity);
        logger.error(errMsg, e);
        throw UserException.resourceError(e).message(errMsg).build(logger);
    } catch (RejectedExecutionException e) {
        final String errMsg = String.format("Failure while submitting activity '%s' tasks for execution.",
                activity);
        logger.error(errMsg, e);
        throw UserException.internalError(e).message(errMsg).build(logger);
    } finally {
        List<Runnable> notStartedTasks = threadPool.shutdownNow();
        if (!notStartedTasks.isEmpty()) {
            logger.error("{} activity '{}' tasks never commenced execution.", notStartedTasks.size(), activity);
        }
        try {
            // Wait for 5s for currently running threads to terminate. Above call (threadPool.shutdownNow()) interrupts
            // any running threads. If the tasks are handling the interrupts properly they should be able to
            // wrap up and terminate. If not waiting for 5s here gives a chance to identify and log any potential
            // thread leaks.
            if (!threadPool.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
                logger.error("Detected run away tasks in activity '{}'.", activity);
            }
        } catch (final InterruptedException e) {
            logger.warn("Interrupted while waiting for pending threads in activity '{}' to terminate.",
                    activity);
        }

        if (statistics != null) {
            statistics.collect(tasks).log(activity, logger, parallelism);
        }
        if (futureMapper.count != tasks.size()) {
            final String errMsg = String.format(
                    "Waited for %d ms, but only %d tasks for '%s' are complete."
                            + " Total number of tasks %d, parallelism %d.",
                    timeout, futureMapper.count, activity, tasks.size(), parallelism);
            logger.error(errMsg, futureMapper.throwable);
            throw UserException.resourceError(futureMapper.throwable).message(errMsg).build(logger);
        }
        if (futureMapper.throwable != null) {
            throw (futureMapper.throwable instanceof IOException) ? (IOException) futureMapper.throwable
                    : new IOException(futureMapper.throwable);
        }
    }
}

From source file:com.google.gerrit.testutil.InMemoryModule.java

@Override
protected void configure() {
    // Do NOT bind @RemotePeer, as it is bound in a child injector of
    // ChangeMergeQueue (bound via GerritGlobalModule below), so there cannot be
    // a binding in the parent injector. If you need @RemotePeer, you must bind
    // it in a child injector of the one containing InMemoryModule. But unless
    // you really need to test something request-scoped, you likely don't
    // actually need it.

    // For simplicity, don't create child injectors, just use this one to get a
    // few required modules.
    Injector cfgInjector = Guice.createInjector(new AbstractModule() {
        @Override//  w  ww  .  j  ava 2 s  . c  o m
        protected void configure() {
            bind(Config.class).annotatedWith(GerritServerConfig.class).toInstance(cfg);
        }
    });
    bind(MetricMaker.class).to(DisabledMetricMaker.class);
    install(cfgInjector.getInstance(GerritGlobalModule.class));
    install(new SearchingChangeCacheImpl.Module());
    factory(GarbageCollection.Factory.class);

    bindScope(RequestScoped.class, PerThreadRequestScope.REQUEST);

    // TODO(dborowitz): Use jimfs.
    bind(Path.class).annotatedWith(SitePath.class).toInstance(Paths.get("."));
    bind(Config.class).annotatedWith(GerritServerConfig.class).toInstance(cfg);
    bind(GerritOptions.class).toInstance(new GerritOptions(cfg, false, false, false));
    bind(PersonIdent.class).annotatedWith(GerritPersonIdent.class).toProvider(GerritPersonIdentProvider.class);
    bind(String.class).annotatedWith(AnonymousCowardName.class).toProvider(AnonymousCowardNameProvider.class);
    bind(String.class).annotatedWith(GerritServerId.class).toInstance("gerrit");
    bind(AllProjectsName.class).toProvider(AllProjectsNameProvider.class);
    bind(AllUsersName.class).toProvider(AllUsersNameProvider.class);
    bind(GitRepositoryManager.class).to(InMemoryRepositoryManager.class);
    bind(InMemoryRepositoryManager.class).in(SINGLETON);
    bind(TrackingFooters.class).toProvider(TrackingFootersProvider.class).in(SINGLETON);
    bind(NotesMigration.class).toInstance(notesMigration);
    bind(ListeningExecutorService.class).annotatedWith(ChangeUpdateExecutor.class)
            .toInstance(MoreExecutors.newDirectExecutorService());
    bind(DataSourceType.class).to(InMemoryH2Type.class);
    bind(ChangeBundleReader.class).to(GwtormChangeBundleReader.class);
    bind(SecureStore.class).to(DefaultSecureStore.class);

    TypeLiteral<SchemaFactory<ReviewDb>> schemaFactory = new TypeLiteral<SchemaFactory<ReviewDb>>() {
    };
    bind(schemaFactory).to(NotesMigrationSchemaFactory.class);
    bind(Key.get(schemaFactory, ReviewDbFactory.class)).to(InMemoryDatabase.class);

    install(NoSshKeyCache.module());
    install(new CanonicalWebUrlModule() {
        @Override
        protected Class<? extends Provider<String>> provider() {
            return CanonicalWebUrlProvider.class;
        }
    });
    //Replacement of DiffExecutorModule to not use thread pool in the tests
    install(new AbstractModule() {
        @Override
        protected void configure() {
        }

        @Provides
        @Singleton
        @DiffExecutor
        public ExecutorService createDiffExecutor() {
            return MoreExecutors.newDirectExecutorService();
        }
    });
    install(new DefaultCacheFactory.Module());
    install(new FakeEmailSender.Module());
    install(new SignedTokenEmailTokenVerifier.Module());
    install(new GpgModule(cfg));
    install(new H2AccountPatchReviewStore.InMemoryModule());

    bind(AllAccountsIndexer.class).toProvider(Providers.of(null));
    bind(AllChangesIndexer.class).toProvider(Providers.of(null));
    bind(AllGroupsIndexer.class).toProvider(Providers.of(null));

    IndexType indexType = null;
    try {
        indexType = cfg.getEnum("index", null, "type", IndexType.LUCENE);
    } catch (IllegalArgumentException e) {
        // Custom index type, caller must provide their own module.
    }
    if (indexType != null) {
        switch (indexType) {
        case LUCENE:
            install(luceneIndexModule());
            break;
        case ELASTICSEARCH:
            install(elasticIndexModule());
            break;
        default:
            throw new ProvisionException("index type unsupported in tests: " + indexType);
        }
    }
}

From source file:com.google.devtools.build.lib.skyframe.RecursivePackageProviderBackedTargetPatternResolver.java

@Override
public <E extends Exception> void findTargetsBeneathDirectory(final RepositoryName repository,
        final String originalPattern, String directory, boolean rulesOnly,
        ImmutableSet<PathFragment> excludedSubdirectories, BatchCallback<Target, E> callback,
        Class<E> exceptionClass) throws TargetParsingException, E, InterruptedException {
    findTargetsBeneathDirectoryParImpl(repository, originalPattern, directory, rulesOnly,
            excludedSubdirectories, new SynchronizedBatchCallback<Target, E>(callback), exceptionClass,
            MoreExecutors.newDirectExecutorService());
}

From source file:com.google.devtools.build.android.dexer.DexBuilder.java

private static void produceDexArchive(ZipFile in, ZipOutputStream out, ExecutorService executor,
        boolean convertOnReaderThread, DexingOptions dexingOptions, @Nullable Cache<DexingKey, byte[]> dexCache)
        throws InterruptedException, ExecutionException, IOException {
    // If we only have one thread in executor, we give a "direct" executor to the stuffer, which
    // will convert .class files to .dex inline on the same thread that reads the input jar.
    // This is an optimization that makes sure we can start writing the output file below while
    // the stuffer is still working its way through the input.
    DexConversionEnqueuer enqueuer = new DexConversionEnqueuer(in,
            convertOnReaderThread ? MoreExecutors.newDirectExecutorService() : executor,
            new DexConverter(new Dexing(dexingOptions)), dexCache);
    Future<?> enqueuerTask = executor.submit(enqueuer);
    while (true) {
        // Wait for next future in the queue *and* for that future to finish.  To guarantee
        // deterministic output we just write out the files in the order they appear, which is
        // the same order as in the input zip.
        ZipEntryContent file = enqueuer.getFiles().take().get();
        if (file == null) {
            // "done" marker indicating no more files coming.
            // Make sure enqueuer terminates normally (any wait should be minimal).  This in
            // particular surfaces any exceptions thrown in the enqueuer.
            enqueuerTask.get();//from   w  w w . j a  v a2 s  .  c o  m
            break;
        }
        out.putNextEntry(file.getEntry());
        out.write(file.getContent());
        out.closeEntry();
    }
}

From source file:com.google.gerrit.pgm.RebuildNoteDb.java

private ListeningExecutorService newExecutor() {
    if (threads > 0) {
        return MoreExecutors.listeningDecorator(workQueue.createQueue(threads, "RebuildChange"));
    }//w w  w . j a  va  2s . c om
    return MoreExecutors.newDirectExecutorService();
}

From source file:com.google.gerrit.testutil.InMemoryModule.java

@Provides
@Singleton
@SendEmailExecutor
public ExecutorService createSendEmailExecutor() {
    return MoreExecutors.newDirectExecutorService();
}

From source file:io.fabric8.maven.docker.StartMojo.java

private ExecutorService getExecutorService() {
    final ExecutorService executorService;
    if (startParallel) {
        executorService = Executors.newCachedThreadPool();
    } else {//w  w  w. j a v  a 2  s  . c  om
        executorService = MoreExecutors.newDirectExecutorService();
    }
    return executorService;
}

From source file:com.google.gerrit.server.update.ReviewDbBatchUpdate.java

private List<ChangeTask> executeChangeOps(boolean parallel, boolean dryrun)
        throws UpdateException, RestApiException {
    List<ChangeTask> tasks;
    boolean success = false;
    Stopwatch sw = Stopwatch.createStarted();
    try {//from  w ww .  ja v  a 2  s  .  co  m
        logDebug("Executing change ops (parallel? {})", parallel);
        ListeningExecutorService executor = parallel ? changeUpdateExector
                : MoreExecutors.newDirectExecutorService();

        tasks = new ArrayList<>(ops.keySet().size());
        try {
            if (notesMigration.commitChangeWrites() && repoView != null) {
                // A NoteDb change may have been rebuilt since the repo was originally
                // opened, so make sure we see that.
                logDebug("Preemptively scanning for repo changes");
                repoView.getRepository().scanForRepoChanges();
            }
            if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
                // Fail fast before attempting any writes if changes are read-only, as
                // this is a programmer error.
                logDebug("Failing early due to read-only Changes table");
                throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
            }
            List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
            for (Map.Entry<Change.Id, Collection<BatchUpdateOp>> e : ops.asMap().entrySet()) {
                ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread(), dryrun);
                tasks.add(task);
                if (!parallel) {
                    logDebug("Direct execution of task for ops: {}", ops);
                }
                futures.add(executor.submit(task));
            }
            if (parallel) {
                logDebug("Waiting on futures for {} ops spanning {} changes", ops.size(), ops.keySet().size());
            }
            Futures.allAsList(futures).get();

            if (notesMigration.commitChangeWrites()) {
                if (!dryrun) {
                    executeNoteDbUpdates(tasks);
                }
            }
            success = true;
        } catch (ExecutionException | InterruptedException e) {
            Throwables.throwIfInstanceOf(e.getCause(), UpdateException.class);
            Throwables.throwIfInstanceOf(e.getCause(), RestApiException.class);
            throw new UpdateException(e);
        } catch (OrmException | IOException e) {
            throw new UpdateException(e);
        }
    } finally {
        metrics.executeChangeOpsLatency.record(success, sw.elapsed(NANOSECONDS), NANOSECONDS);
    }
    return tasks;
}