Example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

List of usage examples for com.google.common.util.concurrent MoreExecutors sameThreadExecutor

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors sameThreadExecutor.

Prototype

@Deprecated
@GwtIncompatible("TODO")
public static ListeningExecutorService sameThreadExecutor() 

Source Link

Document

Creates an executor service that runs each task in the thread that invokes execute/submit , as in CallerRunsPolicy .

Usage

From source file:org.guldenj.protocols.channels.PaymentChannelClient.java

/**
 * Increments the total value which we pay the server. Note that the amount of money sent may not be the same as the
 * amount of money actually requested. It can be larger if the amount left over in the channel would be too small to
 * be accepted by the Bitcoin network. ValueOutOfRangeException will be thrown, however, if there's not enough money
 * left in the channel to make the payment at all. Only one payment can be in-flight at once. You have to ensure
 * you wait for the previous increase payment future to complete before incrementing the payment again.
 *
 * @param size How many satoshis to increment the payment by (note: not the new total).
 * @param info Information about this update, used to extend this protocol.
 * @param userKey Key derived from a user password, needed for any signing when the wallet is encrypted.
 *                The wallet KeyCrypter is assumed.
 * @return a future that completes when the server acknowledges receipt and acceptance of the payment.
 * @throws ValueOutOfRangeException If the size is negative or would pay more than this channel's total value
 *                                  ({@link PaymentChannelClientConnection#state()}.getTotalValue())
 * @throws IllegalStateException If the channel has been closed or is not yet open
 *                               (see {@link PaymentChannelClientConnection#getChannelOpenFuture()} for the second)
 * @throws ECKey.KeyIsEncryptedException If the keys are encrypted and no AES key has been provided,
 *///w  w  w .  ja  va2 s.c o m
@Override
public ListenableFuture<PaymentIncrementAck> incrementPayment(Coin size, @Nullable ByteString info,
        @Nullable KeyParameter userKey)
        throws ValueOutOfRangeException, IllegalStateException, ECKey.KeyIsEncryptedException {
    lock.lock();
    try {
        if (state() == null || !connectionOpen || step != InitStep.CHANNEL_OPEN)
            throw new IllegalStateException("Channel is not fully initialized/has already been closed");
        if (increasePaymentFuture != null)
            throw new IllegalStateException(
                    "Already incrementing paying, wait for previous payment to complete.");
        if (wallet.isEncrypted() && userKey == null)
            throw new ECKey.KeyIsEncryptedException();

        PaymentChannelV1ClientState.IncrementedPayment payment = state().incrementPaymentBy(size, userKey);
        Protos.UpdatePayment.Builder updatePaymentBuilder = Protos.UpdatePayment.newBuilder()
                .setSignature(ByteString.copyFrom(payment.signature.encodeToBitcoin()))
                .setClientChangeValue(state.getValueRefunded().value);
        if (info != null)
            updatePaymentBuilder.setInfo(info);

        increasePaymentFuture = SettableFuture.create();
        increasePaymentFuture.addListener(new Runnable() {
            @Override
            public void run() {
                lock.lock();
                increasePaymentFuture = null;
                lock.unlock();
            }
        }, MoreExecutors.sameThreadExecutor());

        conn.sendToServer(Protos.TwoWayChannelMessage.newBuilder().setUpdatePayment(updatePaymentBuilder)
                .setType(Protos.TwoWayChannelMessage.MessageType.UPDATE_PAYMENT).build());
        lastPaymentActualAmount = payment.amount;
        return increasePaymentFuture;
    } finally {
        lock.unlock();
    }
}

From source file:com.google.digitalcoin.core.PeerGroup.java

private void setupPingingForNewPeer(final Peer peer) {
    checkState(lock.isLocked());// w  w w . j  a v  a  2  s. co  m
    if (peer.getPeerVersionMessage().clientVersion < Pong.MIN_PROTOCOL_VERSION)
        return;
    if (getPingIntervalMsec() <= 0)
        return; // Disabled.
    // Start the process of pinging the peer. Do a ping right now and then ensure there's a fixed delay between
    // each ping. If the peer is taken out of the peers list then the cycle will stop.
    final Runnable[] pingRunnable = new Runnable[1];
    pingRunnable[0] = new Runnable() {
        private boolean firstRun = true;

        public void run() {
            // Ensure that the first ping happens immediately and later pings after the requested delay.
            if (firstRun) {
                firstRun = false;
                try {
                    peer.ping().addListener(this, MoreExecutors.sameThreadExecutor());
                } catch (Exception e) {
                    log.warn("{}: Exception whilst trying to ping peer: {}", peer, e.toString());
                    return;
                }
                return;
            }

            final long interval = getPingIntervalMsec();
            if (interval <= 0)
                return; // Disabled.
            pingTimer.schedule(new TimerTask() {
                @Override
                public void run() {
                    try {
                        if (!peers.contains(peer) || !PeerGroup.this.isRunning())
                            return; // Peer was removed/shut down.
                        peer.ping().addListener(pingRunnable[0], MoreExecutors.sameThreadExecutor());
                    } catch (Exception e) {
                        log.warn("{}: Exception whilst trying to ping peer: {}", peer, e.toString());
                    }
                }
            }, interval);
        }
    };
    pingRunnable[0].run();
}

From source file:net.floodlightcontroller.core.internal.OFSwitch.java

/**
 * Append a listener to receive an OFStatsReply and update the 
 * internal OFSwitch data structures.//  w w  w .  jav a2 s .c o m
 * 
 * This presently taps into the following stats request 
 * messages to listen for the corresponding reply:
 * -- OFTableFeaturesStatsRequest
 * 
 * Extend this to tap into and update other OFStatsType messages.
 * 
 * @param future
 * @param request
 * @return
 */
private <REPLY extends OFStatsReply> ListenableFuture<List<REPLY>> addInternalStatsReplyListener(
        final ListenableFuture<List<REPLY>> future, OFStatsRequest<REPLY> request) {
    switch (request.getStatsType()) {
    case TABLE_FEATURES:
        /* case YOUR_CASE_HERE */
        future.addListener(new Runnable() {
            /*
             * We know the reply will be a list of OFStatsReply.
             */
            @SuppressWarnings("unchecked")
            @Override
            public void run() {
                /*
                 * The OFConnection handles REPLY_MORE for us in the case there
                 * are multiple OFStatsReply messages with the same XID.
                 */
                try {
                    List<? extends OFStatsReply> replies = future.get();
                    if (!replies.isEmpty()) {
                        /*
                         * By checking only the 0th element, we assume all others are the same type.
                         * TODO If not, what then?
                         */
                        switch (replies.get(0).getStatsType()) {
                        case TABLE_FEATURES:
                            processOFTableFeatures((List<OFTableFeaturesStatsReply>) future.get());
                            break;
                        /* case YOUR_CASE_HERE */
                        default:
                            throw new Exception("Received an invalid OFStatsReply of "
                                    + replies.get(0).getStatsType().toString() + ". Expected TABLE_FEATURES.");
                        }
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }, MoreExecutors.sameThreadExecutor()); /* No need for another thread. */
    default:
        break;
    }
    return future; /* either unmodified or with an additional listener */
}

From source file:com.google.digitalcoin.core.PeerGroup.java

/**
 * <p>Given a transaction, sends it un-announced to one peer and then waits for it to be received back from other
 * peers. Once all connected peers have announced the transaction, the future will be completed. If anything goes
 * wrong the exception will be thrown when get() is called, or you can receive it via a callback on the
 * {@link ListenableFuture}. This method returns immediately, so if you want it to block just call get() on the
 * result.</p>/*w  w w .j av a  2 s.c  om*/
 *
 * <p>Note that if the PeerGroup is limited to only one connection (discovery is not activated) then the future
 * will complete as soon as the transaction was successfully written to that peer.</p>
 *
 * <p>Other than for sending your own transactions, this method is useful if you have received a transaction from
 * someone and want to know that it's valid. It's a bit of a weird hack because the current version of the Digitalcoin
 * protocol does not inform you if you send an invalid transaction. Because sending bad transactions counts towards
 * your DoS limit, be careful with relaying lots of unknown transactions. Otherwise you might get kicked off the
 * network.</p>
 *
 * <p>The transaction won't be sent until there are at least minConnections active connections available.
 * A good choice for proportion would be between 0.5 and 0.8 but if you want faster transmission during initial
 * bringup of the peer group you can lower it.</p>
 */
public ListenableFuture<Transaction> broadcastTransaction(final Transaction tx, final int minConnections) {
    final SettableFuture<Transaction> future = SettableFuture.create();
    log.info("Waiting for {} peers required for broadcast ...", minConnections);
    ListenableFuture<PeerGroup> peerAvailabilityFuture = waitForPeers(minConnections);
    peerAvailabilityFuture.addListener(new Runnable() {
        public void run() {
            // We now have enough connected peers to send the transaction.
            // This can be called immediately if we already have enough. Otherwise it'll be called from a peer
            // thread.

            // Pick a peer to be the lucky recipient of our tx.
            final Peer somePeer = peers.get(0);
            log.info("broadcastTransaction: Enough peers, adding {} to the memory pool and sending to {}",
                    tx.getHashAsString(), somePeer);
            final Transaction pinnedTx = memoryPool.seen(tx, somePeer.getAddress());
            // Prepare to send the transaction by adding a listener that'll be called when confidence changes.
            // Only bother with this if we might actually hear back:
            if (minConnections > 1)
                tx.getConfidence().addEventListener(new TransactionConfidence.Listener() {
                    public void onConfidenceChanged(Transaction tx) {
                        // The number of peers that announced this tx has gone up.
                        // Thread safe - this can run in parallel.
                        final TransactionConfidence conf = tx.getConfidence();
                        int numSeenPeers = conf.numBroadcastPeers();
                        boolean mined = conf
                                .getConfidenceType() != TransactionConfidence.ConfidenceType.NOT_SEEN_IN_CHAIN;
                        log.info("broadcastTransaction: TX {} seen by {} peers{}", new Object[] {
                                pinnedTx.getHashAsString(), numSeenPeers, mined ? " and mined" : "" });
                        if (!(numSeenPeers >= minConnections || mined))
                            return;
                        // We've seen the min required number of peers announce the transaction, or it was included
                        // in a block. Normally we'd expect to see it fully propagate before it gets mined, but
                        // it can be that a block is solved very soon after broadcast, and it's also possible that
                        // due to version skew and changes in the relay rules our transaction is not going to
                        // fully propagate yet can get mined anyway.
                        //
                        // Note that we can't wait for the current number of connected peers right now because we
                        // could have added more peers after the broadcast took place, which means they won't
                        // have seen the transaction. In future when peers sync up their memory pools after they
                        // connect we could come back and change this.
                        //
                        // OK, now tell the wallet about the transaction. If the wallet created the transaction then
                        // it already knows and will ignore this. If it's a transaction we received from
                        // somebody else via a side channel and are now broadcasting, this will put it into the
                        // wallet now we know it's valid.
                        for (Wallet wallet : wallets) {
                            try {
                                // Assumption here is there are no dependencies of the created transaction.
                                //
                                // We may end up with two threads trying to do this in parallel - the wallet will
                                // ignore whichever one loses the race.
                                wallet.receivePending(pinnedTx, null);
                            } catch (Throwable t) {
                                future.setException(t); // RE-ENTRANCY POINT
                                return;
                            }
                        }
                        // We're done! It's important that the PeerGroup lock is not held (by this thread) at this
                        // point to avoid triggering inversions when the Future completes.
                        log.info("broadcastTransaction: {} complete", pinnedTx.getHashAsString());
                        tx.getConfidence().removeEventListener(this);
                        future.set(pinnedTx); // RE-ENTRANCY POINT
                    }
                });

            // Satoshis code sends an inv in this case and then lets the peer request the tx data. We just
            // blast out the TX here for a couple of reasons. Firstly it's simpler: in the case where we have
            // just a single connection we don't have to wait for getdata to be received and handled before
            // completing the future in the code immediately below. Secondly, it's faster. The reason the
            // Satoshi client sends an inv is privacy - it means you can't tell if the peer originated the
            // transaction or not. However, we are not a fully validating node and this is advertised in
            // our version message, as SPV nodes cannot relay it doesn't give away any additional information
            // to skip the inv here - we wouldn't send invs anyway.
            //
            // TODO: The peer we picked might be dead by now. If we can't write the message, pick again and retry.
            ChannelFuture sendComplete = somePeer.sendMessage(pinnedTx);
            // If we've been limited to talk to only one peer, we can't wait to hear back because the
            // remote peer won't tell us about transactions we just announced to it for obvious reasons.
            // So we just have to assume we're done, at that point. This happens when we're not given
            // any peer discovery source and the user just calls connectTo() once.
            if (minConnections == 1) {
                sendComplete.addListener(new ChannelFutureListener() {
                    public void operationComplete(ChannelFuture _) throws Exception {
                        for (Wallet wallet : wallets) {
                            try {
                                // Assumption here is there are no dependencies of the created transaction.
                                wallet.receivePending(pinnedTx, null);
                            } catch (Throwable t) {
                                future.setException(t);
                                return;
                            }
                        }
                        future.set(pinnedTx);
                    }
                });
            }
        }
    }, MoreExecutors.sameThreadExecutor());
    return future;
}

From source file:com.google.NithPoints.core.PeerGroup.java

/**
 * <p>Given a transaction, sends it un-announced to one peer and then waits for it to be received back from other
 * peers. Once all connected peers have announced the transaction, the future will be completed. If anything goes
 * wrong the exception will be thrown when get() is called, or you can receive it via a callback on the
 * {@link ListenableFuture}. This method returns immediately, so if you want it to block just call get() on the
 * result.</p>/*www . j  a  v a2 s  .co m*/
 *
 * <p>Note that if the PeerGroup is limited to only one connection (discovery is not activated) then the future
 * will complete as soon as the transaction was successfully written to that peer.</p>
 *
 * <p>Other than for sending your own transactions, this method is useful if you have received a transaction from
 * someone and want to know that it's valid. It's a bit of a weird hack because the current version of the NithPoints
 * protocol does not inform you if you send an invalid transaction. Because sending bad transactions counts towards
 * your DoS limit, be careful with relaying lots of unknown transactions. Otherwise you might get kicked off the
 * network.</p>
 *
 * <p>The transaction won't be sent until there are at least minConnections active connections available.
 * A good choice for proportion would be between 0.5 and 0.8 but if you want faster transmission during initial
 * bringup of the peer group you can lower it.</p>
 */
public ListenableFuture<Transaction> broadcastTransaction(final Transaction tx, final int minConnections) {
    final SettableFuture<Transaction> future = SettableFuture.create();
    log.info("Waiting for {} peers required for broadcast ...", minConnections);
    ListenableFuture<PeerGroup> peerAvailabilityFuture = waitForPeers(minConnections);
    peerAvailabilityFuture.addListener(new Runnable() {
        public void run() {
            // We now have enough connected peers to send the transaction.
            // This can be called immediately if we already have enough. Otherwise it'll be called from a peer
            // thread.

            // Pick a peer to be the lucky recipient of our tx. This can race if the peer we pick dies immediately.
            final Peer somePeer;
            lock.lock();
            try {
                somePeer = peers.get(0);
            } finally {
                lock.unlock();
            }
            log.info("broadcastTransaction: Enough peers, adding {} to the memory pool and sending to {}",
                    tx.getHashAsString(), somePeer);
            final Transaction pinnedTx = memoryPool.seen(tx, somePeer.getAddress());
            // Prepare to send the transaction by adding a listener that'll be called when confidence changes.
            // Only bother with this if we might actually hear back:
            if (minConnections > 1)
                tx.getConfidence().addEventListener(new TransactionConfidence.Listener() {
                    public void onConfidenceChanged(Transaction tx) {
                        // The number of peers that announced this tx has gone up.
                        // Thread safe - this can run in parallel.
                        final TransactionConfidence conf = tx.getConfidence();
                        int numSeenPeers = conf.numBroadcastPeers();
                        boolean mined = conf
                                .getConfidenceType() != TransactionConfidence.ConfidenceType.NOT_SEEN_IN_CHAIN;
                        log.info("broadcastTransaction: TX {} seen by {} peers{}", new Object[] {
                                pinnedTx.getHashAsString(), numSeenPeers, mined ? " and mined" : "" });
                        if (!(numSeenPeers >= minConnections || mined))
                            return;
                        // We've seen the min required number of peers announce the transaction, or it was included
                        // in a block. Normally we'd expect to see it fully propagate before it gets mined, but
                        // it can be that a block is solved very soon after broadcast, and it's also possible that
                        // due to version skew and changes in the relay rules our transaction is not going to
                        // fully propagate yet can get mined anyway.
                        //
                        // Note that we can't wait for the current number of connected peers right now because we
                        // could have added more peers after the broadcast took place, which means they won't
                        // have seen the transaction. In future when peers sync up their memory pools after they
                        // connect we could come back and change this.
                        //
                        // OK, now tell the wallet about the transaction. If the wallet created the transaction then
                        // it already knows and will ignore this. If it's a transaction we received from
                        // somebody else via a side channel and are now broadcasting, this will put it into the
                        // wallet now we know it's valid.
                        for (Wallet wallet : wallets) {
                            try {
                                // Assumption here is there are no dependencies of the created transaction.
                                //
                                // We may end up with two threads trying to do this in parallel - the wallet will
                                // ignore whichever one loses the race.
                                wallet.receivePending(pinnedTx, null);
                            } catch (Throwable t) {
                                future.setException(t); // RE-ENTRANCY POINT
                                return;
                            }
                        }
                        // We're done! It's important that the PeerGroup lock is not held (by this thread) at this
                        // point to avoid triggering inversions when the Future completes.
                        log.info("broadcastTransaction: {} complete", pinnedTx.getHashAsString());
                        tx.getConfidence().removeEventListener(this);
                        future.set(pinnedTx); // RE-ENTRANCY POINT
                    }
                });

            // Satoshis code sends an inv in this case and then lets the peer request the tx data. We just
            // blast out the TX here for a couple of reasons. Firstly it's simpler: in the case where we have
            // just a single connection we don't have to wait for getdata to be received and handled before
            // completing the future in the code immediately below. Secondly, it's faster. The reason the
            // Satoshi client sends an inv is privacy - it means you can't tell if the peer originated the
            // transaction or not. However, we are not a fully validating node and this is advertised in
            // our version message, as SPV nodes cannot relay it doesn't give away any additional information
            // to skip the inv here - we wouldn't send invs anyway.
            //
            // TODO: The peer we picked might be dead by now. If we can't write the message, pick again and retry.
            ChannelFuture sendComplete = somePeer.sendMessage(pinnedTx);
            // If we've been limited to talk to only one peer, we can't wait to hear back because the
            // remote peer won't tell us about transactions we just announced to it for obvious reasons.
            // So we just have to assume we're done, at that point. This happens when we're not given
            // any peer discovery source and the user just calls connectTo() once.
            if (minConnections == 1) {
                sendComplete.addListener(new ChannelFutureListener() {
                    public void operationComplete(ChannelFuture _) throws Exception {
                        for (Wallet wallet : wallets) {
                            try {
                                // Assumption here is there are no dependencies of the created transaction.
                                wallet.receivePending(pinnedTx, null);
                            } catch (Throwable t) {
                                future.setException(t);
                                return;
                            }
                        }
                        future.set(pinnedTx);
                    }
                });
            }
        }
    }, MoreExecutors.sameThreadExecutor());
    return future;
}

From source file:org.voltdb.ClientInterface.java

public ListenableFutureTask<?> processFinishedCompilerWork(final AsyncCompilerResult result) {
    /*/*from   www.  j a va  2 s. c o m*/
     * Do the task in the network thread associated with the connection
     * so that access to the CIHM can be lock free for fast path work.
     * Can't access the CIHM from this thread without adding locking.
     */
    final Connection c = (Connection) result.clientData;
    final ListenableFutureTask<?> ft = ListenableFutureTask.create(new Runnable() {
        @Override
        public void run() {
            if (result.errorMsg == null) {
                if (result instanceof AdHocPlannedStmtBatch) {
                    final AdHocPlannedStmtBatch plannedStmtBatch = (AdHocPlannedStmtBatch) result;
                    // assume all stmts have the same catalog version
                    if ((plannedStmtBatch.getPlannedStatementCount() > 0) && (plannedStmtBatch
                            .getPlannedStatement(0).catalogVersion != m_catalogContext.get().catalogVersion)) {

                        /* The adhoc planner learns of catalog updates after the EE and the
                           rest of the system. If the adhoc sql was planned against an
                           obsolete catalog, re-plan. */
                        LocalObjectMessage work = new LocalObjectMessage(new AdHocPlannerWork(m_siteId, false,
                                plannedStmtBatch.clientHandle, plannedStmtBatch.connectionId,
                                plannedStmtBatch.hostname, plannedStmtBatch.adminConnection,
                                plannedStmtBatch.clientData, plannedStmtBatch.sqlBatchText,
                                plannedStmtBatch.getSQLStatements(), plannedStmtBatch.partitionParam, null,
                                false, true, m_adhocCompletionHandler));

                        m_mailbox.send(m_plannerSiteId, work);
                    } else {
                        createAdHocTransaction(plannedStmtBatch);
                    }
                } else if (result instanceof CatalogChangeResult) {
                    final CatalogChangeResult changeResult = (CatalogChangeResult) result;
                    // create the execution site task
                    StoredProcedureInvocation task = new StoredProcedureInvocation();
                    task.procName = "@UpdateApplicationCatalog";
                    task.setParams(changeResult.encodedDiffCommands, changeResult.catalogBytes,
                            changeResult.expectedCatalogVersion, changeResult.deploymentString,
                            changeResult.deploymentCRC);
                    task.clientHandle = changeResult.clientHandle;

                    /*
                     * Round trip the invocation to initialize it for command logging
                     */
                    FastSerializer fs = new FastSerializer();
                    try {
                        fs.writeObject(task);
                        ByteBuffer source = fs.getBuffer();
                        ByteBuffer copy = ByteBuffer.allocate(source.remaining());
                        copy.put(source);
                        copy.flip();
                        FastDeserializer fds = new FastDeserializer(copy);
                        task = new StoredProcedureInvocation();
                        task.readExternal(fds);
                    } catch (Exception e) {
                        hostLog.fatal(e);
                        VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
                    }

                    // initiate the transaction. These hard-coded values from catalog
                    // procedure are horrible, horrible, horrible.
                    createTransaction(changeResult.connectionId, changeResult.hostname,
                            changeResult.adminConnection, task, false, true, true, m_allPartitions,
                            m_allPartitions.length, changeResult.clientData, 0, EstTime.currentTimeMillis(),
                            false);
                } else {
                    throw new RuntimeException(
                            "Should not be able to get here (ClientInterface.checkForFinishedCompilerWork())");
                }
            } else {
                ClientResponseImpl errorResponse = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
                        new VoltTable[0], result.errorMsg, result.clientHandle);
                ByteBuffer buf = ByteBuffer.allocate(errorResponse.getSerializedSize() + 4);
                buf.putInt(buf.capacity() - 4);
                errorResponse.flattenToBuffer(buf);
                buf.flip();
                c.writeStream().enqueue(buf);
            }
        }
    }, null);
    if (c != null) {
        c.queueTask(ft);
    }

    /*
     * Add error handling in case of an unexpected exception
     */
    ft.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                ft.get();
            } catch (Exception e) {
                StringWriter sw = new StringWriter();
                PrintWriter pw = new PrintWriter(sw);
                e.printStackTrace(pw);
                pw.flush();
                ClientResponseImpl errorResponse = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
                        new VoltTable[0], result.errorMsg, result.clientHandle);
                ByteBuffer buf = ByteBuffer.allocate(errorResponse.getSerializedSize() + 4);
                buf.putInt(buf.capacity() - 4);
                errorResponse.flattenToBuffer(buf);
                buf.flip();
                c.writeStream().enqueue(buf);
            }
        }
    }, MoreExecutors.sameThreadExecutor());

    //Return the future task for test code
    return ft;
}