Example usage for com.google.common.util.concurrent MoreExecutors directExecutor

List of usage examples for com.google.common.util.concurrent MoreExecutors directExecutor

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors directExecutor.

Prototype

public static Executor directExecutor() 

Source Link

Document

Returns an Executor that runs each task in the thread that invokes Executor#execute execute , as in CallerRunsPolicy .

Usage

From source file:com.facebook.buck.core.build.distributed.synchronization.impl.RemoteBuildRuleSynchronizer.java

@Override
public synchronized ListenableFuture<CacheResult> waitForBuildRuleToAppearInCache(BuildRule buildRule,
        Supplier<ListenableFuture<CacheResult>> cacheCheck) {
    String buildTarget = buildRule.getFullyQualifiedName();
    if (!buildRule.isCacheable()) {
        LOG.info(String.format("Doing only immediate cache check for build target [%s]", buildTarget));
        // Stampede transfers artifacts via the cache. If the build rule isn't cacheable, then
        // immediately do only the requested cache check (may contain logging etc.) without any
        // retries.
        // Will allow proceeding with next local steps immediately (i.e. cache fetches for
        // all dependencies).
        return cacheCheck.get();
    }/* w  ww .  jav a2  s . c  o  m*/

    // If build is already marked as finish, then cannot expect to get completion signal for rule
    // later (possible completion event was missed/misordered). Do not wait for it then.
    ListenableFuture<CacheResult> resultFuture = remoteBuildFinished ? cacheCheck.get()
            : Futures.transformAsync(createCompletionFutureIfNotPresent(buildTarget),
                    (Void v) -> cacheCheck.get(), MoreExecutors.directExecutor());

    // Backoffs are disabled.
    if (cacheSyncMaxTotalBackoffMillis == 0) {
        return resultFuture;
    }

    for (int backOffNumber = 0; backOffNumber < backOffsMillis.length; backOffNumber++) {
        int backOffNumberForLambda = backOffNumber;
        resultFuture = Futures.transformAsync(resultFuture, result -> {
            // If we didn't get a miss (miss -> need more wait time), stop any further retries.
            if (result.getType() != CacheResultType.MISS) {
                return Futures.immediateFuture(result);
            }
            return getNextCacheCheckResult(result, cacheCheck, buildTarget, backOffNumberForLambda);
        }, MoreExecutors.directExecutor());
    }

    LOG.info(String.format("Returning future that waits for build target [%s]", buildTarget));

    return resultFuture;
}

From source file:com.facebook.buck.distributed.build_client.PostBuildPhase.java

@VisibleForTesting
ListenableFuture<BuildSlaveStats> publishBuildSlaveFinishedStatsEvent(BuildJob job,
        ListeningExecutorService executor, ConsoleEventsDispatcher consoleEventsDispatcher) {
    if (!job.isSetBuildSlaves()) {
        return Futures.immediateFuture(null);
    }/*from  ww w .  j ava  2s  .c  o m*/

    List<ListenableFuture<Pair<BuildSlaveRunId, Optional<BuildSlaveFinishedStats>>>> slaveFinishedStatsFutures = new ArrayList<>(
            job.getBuildSlavesSize());
    for (BuildSlaveInfo info : job.getBuildSlaves()) {
        BuildSlaveRunId runId = info.getBuildSlaveRunId();
        slaveFinishedStatsFutures.add(executor.submit(() -> {
            Optional<BuildSlaveFinishedStats> stats = fetchStatsForIndividualSlave(job, runId);
            return new Pair<BuildSlaveRunId, Optional<BuildSlaveFinishedStats>>(runId, stats);
        }));
    }

    Builder builder = BuildSlaveStats.builder().setStampedeId(job.getStampedeId());
    return Futures.transform(Futures.allAsList(slaveFinishedStatsFutures),
            statsList -> createAndPublishBuildSlaveStats(builder, statsList, consoleEventsDispatcher),
            MoreExecutors.directExecutor());
}

From source file:com.google.caliper.runner.worker.Worker.java

@Override
protected void doStart() {
    try {// ww w. j ava  2s  .  c o m
        process = device.startVm(spec, output);
    } catch (Exception e) {
        notifyFailed(e);
        return;
    }
    // Failsafe kill the process and the executor service.
    // If the process has already exited cleanly, this will be a no-op.
    addListener(new Listener() {
        @Override
        public void terminated(State from) {
            cleanup();
        }

        @Override
        public void failed(State from, Throwable failure) {
            cleanup();
        }

        void cleanup() {
            streamExecutor.shutdown();
            process.kill();

            boolean interrupt = false;
            try {
                process.awaitExit();
            } catch (InterruptedException e) {
                interrupt = true;
            }
            try {
                streamExecutor.awaitTermination(10, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                interrupt = true;
            }

            if (interrupt) {
                Thread.currentThread().interrupt();
            }
            streamExecutor.shutdownNow();
        }
    }, MoreExecutors.directExecutor());
    // You may be thinking as you read this "Yo dawg, what if IOExceptions rain from the sky?"
    // If a stream we are reading from throws an IOException then we fail the entire Service. This
    // will cause the worker to be killed (if its not dead already) and the various StreamReaders to
    // be interrupted (eventually).

    openStreams.incrementAndGet();

    startStreamReader("stderr", process.stderr());
    startStreamReader("stdout", process.stdout());
    socketFuture.addListener(new Runnable() {
        @Override
        public void run() {
            startSocketStream();
        }
    }, MoreExecutors.directExecutor());
    notifyStarted();
}

From source file:io.atomix.core.multimap.AtomicMultimap.java

/**
 * Registers the specified listener to be notified whenever the map is updated.
 *
 * @param listener listener to notify about map events
 *///ww w  .j a va2 s  .c  o  m
default void addListener(AtomicMultimapEventListener<K, V> listener) {
    addListener(listener, MoreExecutors.directExecutor());
}

From source file:com.facebook.buck.distributed.ServerContentsProvider.java

@Override
@SuppressWarnings("CheckReturnValue")
public ListenableFuture<Boolean> materializeFileContentsAsync(BuildJobStateFileHashEntry entry,
        Path targetAbsPath) {/*from  w  w  w  . j  av  a2  s .  c o  m*/

    ListenableFuture<byte[]> fileFuture = scheduleFileToBeFetched(entry);
    // If the buffer is full, make a multi-fetch request using the thread pool.
    // Don't block the calling thread.
    networkThreadPool.submit(this::makeMultiFetchRequestIfBufferIsFull);

    return Futures.transform(fileFuture,
            (byte[] fileContents) -> writeFileContentsToPath(fileContents, targetAbsPath),
            MoreExecutors.directExecutor());
}

From source file:org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker.java

private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
        final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {

    final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();

    // Not using Futures.allAsList here to avoid its internal overhead.
    FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
        @Override//from   w w  w . ja  v  a 2s  .  co  m
        public void onSuccess(Void notUsed) {
            if (!cohortIterator.hasNext()) {
                // All cohorts completed successfully - we can move on to the commit phase
                doCommit(startTime, clientSubmitFuture, transaction, cohorts);
            } else {
                ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
                Futures.addCallback(preCommitFuture, this, MoreExecutors.directExecutor());
            }
        }

        @Override
        public void onFailure(Throwable t) {
            handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT,
                    TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER, t);
        }
    };

    ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
    Futures.addCallback(preCommitFuture, futureCallback, MoreExecutors.directExecutor());
}

From source file:org.opendaylight.infrautils.utils.concurrent.LoggingFutures.java

private static <V, F extends ListenableFuture<V>> F addCallback(F future, FutureCallback<V> callback) {
    Futures.addCallback(future, callback, MoreExecutors.directExecutor());
    return future;
}

From source file:net.oneandone.troilus.ListenableFutures.java

public static <T, E> ListenableFuture<E> transform(ListenableFuture<T> future,
        Function<T, ListenableFuture<E>> mapperFunction) {
    return transform(future, mapperFunction, MoreExecutors.directExecutor());
}

From source file:org.apache.zeppelin.cluster.ClusterManagerServer.java

private void initThread() {
    // RaftServer Thread
    new Thread(new Runnable() {
        @Override/*from  w  w  w. ja v  a2 s.  com*/
        public void run() {
            LOGGER.info("RaftServer run() >>>");

            Address address = Address.from(zeplServerHost, raftServerPort);
            Member member = Member.builder(MemberId.from(zeplServerHost + ":" + raftServerPort))
                    .withAddress(address).build();
            messagingService = NettyMessagingService.builder().withAddress(address).build().start().join();
            RaftServerProtocol protocol = new RaftServerMessagingProtocol(messagingService,
                    ClusterManager.protocolSerializer, raftAddressMap::get);

            BootstrapService bootstrapService = new BootstrapService() {
                @Override
                public MessagingService getMessagingService() {
                    return messagingService;
                }

                @Override
                public BroadcastService getBroadcastService() {
                    return new BroadcastServiceAdapter();
                }
            };

            ManagedClusterMembershipService clusterService = new DefaultClusterMembershipService(member,
                    new DefaultNodeDiscoveryService(bootstrapService, member,
                            new BootstrapDiscoveryProvider(clusterNodes)),
                    bootstrapService, new MembershipConfig());

            File atomixDateDir = com.google.common.io.Files.createTempDir();
            atomixDateDir.deleteOnExit();

            RaftServer.Builder builder = RaftServer.builder(member.id()).withMembershipService(clusterService)
                    .withProtocol(protocol)
                    .withStorage(RaftStorage.builder().withStorageLevel(StorageLevel.MEMORY)
                            .withDirectory(atomixDateDir).withSerializer(storageSerializer)
                            .withMaxSegmentSize(1024 * 1024).build());

            raftServer = builder.build();
            raftServer.bootstrap(clusterMemberIds);

            messagingService.registerHandler(ZEPL_CLUSTER_EVENT_TOPIC, subscribeClusterEvent,
                    MoreExecutors.directExecutor());

            LOGGER.info("RaftServer run() <<<");
        }
    }).start();
}

From source file:org.opendaylight.unimgr.mef.nrp.ovs.tapi.TopologyDataHandler.java

public void close() {
    ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();

    NrpDao dao = new NrpDao(tx);
    dao.removeNode(OVS_NODE, true);//from   www  .  j a v  a 2 s. co m

    Futures.addCallback(tx.commit(), new FutureCallback<CommitInfo>() {
        @Override
        public void onSuccess(@Nullable CommitInfo result) {
            LOG.info("Node {} deleted", OVS_NODE);
        }

        @Override
        public void onFailure(Throwable t) {
            LOG.error("No node deleted due to the error", t);
        }
    }, MoreExecutors.directExecutor());

    if (registration != null) {
        LOG.info("closing netconf tree listener");
        registration.close();
    }
}