Example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

List of usage examples for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor.

Prototype

@Deprecated
@GwtIncompatible("TODO")
public static ListeningExecutorService sameThreadExecutor() 

Source Link

Document

Creates an executor service that runs each task in the thread that invokes execute/submit , as in CallerRunsPolicy .

Usage

From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java

public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeAsyncBatchSet(
        final Executor executor, final AsyncFunction<List<SET>, SEQUENCE> source, List<SET> keys, Tracer tracer,
        Timeout timeout, TimeoutHandler handler) throws Exception {
    List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList();
    final Tracer childTracer = tracer.start(tracer.getGroup(), tracer.getName());
    List<SET> methodArgs = Lists.newArrayList();
    for (int i = 0; i < keys.size(); i++) {
        if (keys.get(i) != null) {
            methodArgs.add(keys.get(i));
        } else {//from   www  .j a  v a 2s  .co  m
            ListenableFuture<SEQUENCE> result = source.apply(methodArgs);
            results.add(result);
            result.addListener(new Runnable() {
                @Override
                public void run() {
                    childTracer.end();
                }
            }, MoreExecutors.sameThreadExecutor());
            methodArgs = Lists.newArrayList();
        }
    }
    ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results);
    return handler.withTimeout(gatherResults(executor, gather, 1), timeout.verify(), timeout.getTickUnits());
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

@Override
public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

    final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

        @Override/*from   w ww .ja  va  2 s .  c om*/
        public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
            return toolchest.makeMetricBuilder(query);
        }
    };

    List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
    for (Interval interval : query.getIntervals()) {
        querySinks.addAll(sinkTimeline.lookup(interval));
    }

    return toolchest.mergeResults(factory.mergeRunners(queryExecutorService, FunctionalIterable
            .create(querySinks).transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                @Override
                public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                    if (holder == null) {
                        throw new ISE("No timeline entry at all!");
                    }

                    // The realtime plumber always uses SingleElementPartitionChunk
                    final Sink theSink = holder.getObject().getChunk(0).getObject();

                    if (theSink == null) {
                        throw new ISE("Missing sink for timeline entry[%s]!", holder);
                    }

                    final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(),
                            theSink.getSegment().getVersion(),
                            theSink.getSegment().getShardSpec().getPartitionNum());

                    return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter,
                            builderFn, factory.mergeRunners(MoreExecutors.sameThreadExecutor(),
                                    Iterables.transform(theSink, new Function<FireHydrant, QueryRunner<T>>() {
                                        @Override
                                        public QueryRunner<T> apply(FireHydrant input) {
                                            // It is possible that we got a query for a segment, and while that query
                                            // is in the jetty queue, the segment is abandoned. Here, we need to retry
                                            // the query for the segment.
                                            if (input == null || input.getSegment() == null) {
                                                return new ReportTimelineMissingSegmentQueryRunner<T>(
                                                        descriptor);
                                            }

                                            // Prevent the underlying segment from closing when its being iterated
                                            final Closeable closeable = input.getSegment().increment();
                                            try {
                                                return factory.createRunner(input.getSegment());
                                            } finally {
                                                try {
                                                    if (closeable != null) {
                                                        closeable.close();
                                                    }
                                                } catch (IOException e) {
                                                    throw Throwables.propagate(e);
                                                }
                                            }
                                        }
                                    }))).withWaitMeasuredFromNow(),
                            new SpecificSegmentSpec(descriptor));
                }
            })));
}

From source file:com.cloudera.hadoop.hdfs.nfs.rpc.ClientInputHandler.java

private void execute(final SessionSecurityHandler<? extends Verifier> securityHandler,
        AccessPrivilege accessPrivilege, final RPCRequest request, RPCBuffer requestBuffer)
        throws RPCException {
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(mSessionID + " starting xid " + request.getXidAsHexString());
    }//from www.  j a va  2 s.c  o  m
    mHandler.beforeProcess(request);
    REQUEST applicationRequest = mHandler.createRequest();
    applicationRequest.read(requestBuffer);
    if (applicationRequest instanceof RequiresCredentials) {
        RequiresCredentials requiresCredentials = (RequiresCredentials) applicationRequest;
        // check to ensure it's auth creds is above
        requiresCredentials.setCredentials((AuthenticatedCredentials) request.getCredentials());
    }
    final ListenableFuture<RESPONSE> future = mHandler.process(request, applicationRequest, accessPrivilege,
            mClient.getInetAddress(), mSessionID);
    future.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                writeApplicationResponse(securityHandler, request, future.get());
            } catch (Throwable t) {
                LOGGER.error("Unexpected error processing request", t);
            }
        }
    }, MoreExecutors.sameThreadExecutor());
}

From source file:org.stem.client.Session.java

void onDown(Host host) throws InterruptedException, ExecutionException {
    removePool(host).force().get();
    updateCreatedPools(MoreExecutors.sameThreadExecutor());
}

From source file:com.continuuity.weave.internal.ZKServiceDecorator.java

private <V> boolean handleStopMessage(Message message, final Supplier<OperationFuture<V>> postHandleSupplier) {
    if (message.getType() == Message.Type.SYSTEM
            && "stop".equalsIgnoreCase(message.getCommand().getCommand())) {
        callbackExecutor.execute(new Runnable() {
            @Override//from  w w  w .  j a v  a2  s  .  c om
            public void run() {
                decoratedService.stop().addListener(new Runnable() {

                    @Override
                    public void run() {
                        stopServiceOnComplete(postHandleSupplier.get(), ZKServiceDecorator.this);
                    }
                }, MoreExecutors.sameThreadExecutor());
            }
        });
        return true;
    }
    return false;
}

From source file:com.google.dogecoin.kits.WalletAppKit.java

@Override
protected void startUp() throws Exception {
    // Runs in a separate thread.
    if (!directory.exists()) {
        if (!directory.mkdir()) {
            throw new IOException("Could not create named directory.");
        }//  w  ww. j  a v  a2  s .  c o m
    }
    FileInputStream walletStream = null;
    try {
        File chainFile = new File(directory, filePrefix + ".spvchain");
        boolean chainFileExists = chainFile.exists();
        vWalletFile = new File(directory, filePrefix + ".wallet");
        boolean shouldReplayWallet = vWalletFile.exists() && !chainFileExists;

        vStore = new SPVBlockStore(params, chainFile);
        if (!chainFileExists && checkpoints != null) {
            // Ugly hack! We have to create the wallet once here to learn the earliest key time, and then throw it
            // away. The reason is that wallet extensions might need access to peergroups/chains/etc so we have to
            // create the wallet later, but we need to know the time early here before we create the BlockChain
            // object.
            long time = Long.MAX_VALUE;
            if (vWalletFile.exists()) {
                Wallet wallet = new Wallet(params);
                FileInputStream stream = new FileInputStream(vWalletFile);
                new WalletProtobufSerializer().readWallet(WalletProtobufSerializer.parseToProto(stream),
                        wallet);
                time = wallet.getEarliestKeyCreationTime();
            }
            CheckpointManager.checkpoint(params, checkpoints, vStore, time);
        }
        vChain = new BlockChain(params, vStore);
        vPeerGroup = createPeerGroup();
        if (this.userAgent != null)
            vPeerGroup.setUserAgent(userAgent, version);
        if (vWalletFile.exists()) {
            walletStream = new FileInputStream(vWalletFile);
            vWallet = new Wallet(params);
            addWalletExtensions(); // All extensions must be present before we deserialize
            new WalletProtobufSerializer().readWallet(WalletProtobufSerializer.parseToProto(walletStream),
                    vWallet);
            if (shouldReplayWallet)
                vWallet.clearTransactions(0);
        } else {
            vWallet = new Wallet(params);
            vWallet.addKey(new ECKey());
            addWalletExtensions();
        }
        if (useAutoSave)
            vWallet.autosaveToFile(vWalletFile, 1, TimeUnit.SECONDS, null);
        // Set up peer addresses or discovery first, so if wallet extensions try to broadcast a transaction
        // before we're actually connected the broadcast waits for an appropriate number of connections.
        if (peerAddresses != null) {
            for (PeerAddress addr : peerAddresses)
                vPeerGroup.addAddress(addr);
            peerAddresses = null;
        } else {
            vPeerGroup.addPeerDiscovery(new DnsDiscovery(params));
        }
        vChain.addWallet(vWallet);
        vPeerGroup.addWallet(vWallet);
        onSetupCompleted();

        if (blockingStartup) {
            vPeerGroup.startAsync();
            vPeerGroup.awaitRunning();
            // Make sure we shut down cleanly.
            installShutdownHook();

            // TODO: Be able to use the provided download listener when doing a blocking startup.
            final DownloadListener listener = new DownloadListener();
            vPeerGroup.startBlockChainDownload(listener);
            listener.await();
        } else {
            vPeerGroup.startAsync();
            vPeerGroup.addListener(new Service.Listener() {
                @Override
                public void running() {
                    final PeerEventListener l = downloadListener == null ? new DownloadListener()
                            : downloadListener;
                    vPeerGroup.startBlockChainDownload(l);
                }

                @Override
                public void failed(State from, Throwable failure) {
                    throw new RuntimeException(failure);
                }
            }, MoreExecutors.sameThreadExecutor());
        }
    } catch (BlockStoreException e) {
        throw new IOException(e);
    } finally {
        if (walletStream != null)
            walletStream.close();
    }
}

From source file:dk.ilios.spanner.internal.ExperimentingSpannerRun.java

public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(
        Iterable<? extends ListenableFuture<? extends T>> futures) {
    final ConcurrentLinkedQueue<SettableFuture<T>> delegates = Queues.newConcurrentLinkedQueue();
    ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
    Executor executor = MoreExecutors.sameThreadExecutor();
    for (final ListenableFuture<? extends T> future : futures) {
        SettableFuture<T> delegate = SettableFuture.create();
        // Must make sure to add the delegate to the queue first in case the future is already done
        delegates.add(delegate);/*  w  w  w  . ja  v a  2 s . c o  m*/
        future.addListener(new Runnable() {
            @Override
            public void run() {
                SettableFuture<T> delegate = delegates.remove();
                try {
                    delegate.set(Uninterruptibles.getUninterruptibly(future));
                } catch (ExecutionException e) {
                    delegate.setException(e.getCause());
                } catch (CancellationException e) {
                    delegate.cancel(true);
                }
            }
        }, executor);
        listBuilder.add(delegate);
    }
    return listBuilder.build();
}

From source file:co.cask.cdap.internal.app.runtime.distributed.AbstractProgramTwillRunnable.java

@Override
public void run() {
    LOG.info("Starting metrics service");
    Futures.getUnchecked(Services.chainStart(zkClientService, kafkaClientService, metricsCollectionService,
            streamCoordinatorClient, resourceReporter));

    LOG.info("Starting runnable: {}", name);
    controller = injector.getInstance(getProgramClass()).run(program, programOpts);
    final SettableFuture<ProgramController.State> state = SettableFuture.create();
    controller.addListener(new AbstractListener() {

        @Override//from  ww  w  .  j a  v  a 2s . com
        public void alive() {
            runlatch.countDown();
        }

        @Override
        public void init(ProgramController.State currentState, @Nullable Throwable cause) {
            if (currentState == ProgramController.State.ALIVE) {
                alive();
            } else {
                super.init(currentState, cause);
            }
        }

        @Override
        public void completed() {
            state.set(ProgramController.State.COMPLETED);
        }

        @Override
        public void killed() {
            state.set(ProgramController.State.KILLED);
        }

        @Override
        public void error(Throwable cause) {
            LOG.error("Program runner error out.", cause);
            state.setException(cause);
        }
    }, MoreExecutors.sameThreadExecutor());

    try {
        state.get();
        LOG.info("Program stopped.");
    } catch (InterruptedException e) {
        LOG.warn("Program interrupted.", e);
    } catch (ExecutionException e) {
        LOG.error("Program execution failed.", e);
        if (propagateServiceError()) {
            throw Throwables.propagate(Throwables.getRootCause(e));
        }
    } finally {
        // Always unblock the handleCommand method if it is not unblocked before (e.g if program failed to start).
        // The controller state will make sure the corresponding command will be handled correctly in the correct state.
        runlatch.countDown();
    }
}

From source file:org.apache.twill.internal.ZKServiceDecorator.java

private <V> boolean handleStopMessage(Message message, final Supplier<OperationFuture<V>> postHandleSupplier) {
    if (message.getType() == Message.Type.SYSTEM && SystemMessages.STOP_COMMAND.equals(message.getCommand())) {
        callbackExecutor.execute(new Runnable() {
            @Override//  w w w. ja  v a2  s. c o m
            public void run() {
                decoratedService.stop().addListener(new Runnable() {

                    @Override
                    public void run() {
                        stopServiceOnComplete(postHandleSupplier.get(), ZKServiceDecorator.this);
                    }
                }, MoreExecutors.sameThreadExecutor());
            }
        });
        return true;
    }
    return false;
}

From source file:com.datastax.driver.core.HostConnectionPool.java

private List<ShutdownFuture> discardAvailableConnections() {

    List<ShutdownFuture> futures = new ArrayList<ShutdownFuture>(connections.size());
    for (Connection connection : connections) {
        ShutdownFuture future = connection.close();
        future.addListener(new Runnable() {
            public void run() {
                open.decrementAndGet();/*  ww  w  .j  ava  2 s .  c  om*/
            }
        }, MoreExecutors.sameThreadExecutor());
        futures.add(future);
    }
    return futures;
}