Example usage for com.google.common.util.concurrent SettableFuture setException

List of usage examples for com.google.common.util.concurrent SettableFuture setException

Introduction

In this page you can find the example usage for com.google.common.util.concurrent SettableFuture setException.

Prototype

@Override
    public boolean setException(Throwable throwable) 

Source Link

Usage

From source file:com.continuuity.weave.internal.ZKWeaveController.java

@Override
public ListenableFuture<State> stop() {
    final SettableFuture<State> result = SettableFuture.create();
    final ListenableFuture<State> future = super.stop();
    future.addListener(new Runnable() {
        @Override/*ww w . jav a2  s  .  com*/
        public void run() {
            logPoller.interrupt();
            try {
                logPoller.join();
            } catch (InterruptedException e) {
                LOG.warn("Joining of log poller thread interrupted.", e);
            }
            Futures.addCallback(kafkaClient.stop(), new FutureCallback<Service.State>() {
                @Override
                public void onSuccess(Service.State state) {
                    try {
                        future.get();
                        result.set(State.TERMINATED);
                    } catch (Exception e) {
                        LOG.error("Failed when stopping local services", e);
                        result.setException(e);
                    }
                }

                @Override
                public void onFailure(Throwable t) {
                    result.setException(t);
                }
            });
        }
    }, Threads.SAME_THREAD_EXECUTOR);
    return result;
}

From source file:org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter.ShardedDOMDataBrokerDelegatingReadWriteTransaction.java

@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
        final YangInstanceIdentifier path) {
    checkState(root != null,// w ww . j a v  a2  s .c o  m
            "A modify operation (put, merge or delete) must be performed prior to a read operation");
    final SettableFuture<Optional<NormalizedNode<?, ?>>> readResult = SettableFuture.create();
    final Queue<Modification> currentHistory = Lists.newLinkedList(modificationHistoryMap.get(store));
    Futures.addCallback(initialReadMap.get(store), new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
        @Override
        public void onSuccess(@Nullable final Optional<NormalizedNode<?, ?>> result) {
            final DataTreeModification mod = snapshotMap.get(store).newModification();
            if (result.isPresent()) {
                mod.write(path, result.get());
            }
            applyModificationHistoryToSnapshot(mod, currentHistory);
            readResult.set(mod.readNode(path));
        }

        @Override
        public void onFailure(final Throwable t) {
            readResult.setException(t);
        }
    });

    return Futures.makeChecked(readResult, ReadFailedException.MAPPER);
}

From source file:com.microsoft.tooling.msservices.helpers.auth.AADManagerImpl.java

private <V> void unauthenticatedRequest(@NotNull UserInfo userInfo, @NotNull String resource,
        @NotNull String title, @NotNull RequestCallback<ListenableFuture<V>> requestCallback,
        SettableFuture<V> wrappedFuture) {
    try {/*from w  w w  .  j a v  a  2s . co m*/
        if (hasRefreshAuthResult(userInfo)) {
            authenticateWithRefreshToken(userInfo, resource, title);
        } else {
            authenticateWithInteractiveToken(userInfo, resource, title);
        }

        requestWithAuthenticationResult(userInfo, resource, requestCallback, wrappedFuture);
    } catch (AzureCmdException e) {
        wrappedFuture.setException(e);
    }
}

From source file:com.microsoft.sharepointservices.DocLibClient.java

/**
 * Creates a file with a given path inside a given library
 * // www.j  av a  2  s.  co m
 * @param fileName
 * @param library
 * @param overwrite
 * @param content
 * @return OfficeFuture<FileSystemItem>
 */
public ListenableFuture<FileSystemItem> createFile(String fileName, String library, boolean overwrite,
        byte[] content) {

    if (fileName == null || fileName.length() == 0) {
        throw new IllegalArgumentException("fileName cannot be null or empty");
    }

    String urlPart = urlEncode(
            String.format("Add(name='%s', overwrite='%s')", fileName, Boolean.toString(overwrite)));

    String url;
    if (library == null || library.length() == 0) {
        url = getSiteUrl() + "_api/files/" + urlPart;
    } else {
        url = getSiteUrl() + String.format("_api/web/lists/getbytitle('%s')/files/", urlEncode(library))
                + urlPart;
    }
    final SettableFuture<FileSystemItem> result = SettableFuture.create();
    Map<String, String> headers = new HashMap<String, String>();
    headers.put("Content-Type", "application/octet-stream");

    ListenableFuture<JSONObject> request = executeRequestJsonWithDigest(url, "POST", headers, content);

    Futures.addCallback(request, new FutureCallback<JSONObject>() {
        @Override
        public void onFailure(Throwable t) {
            result.setException(t);
        }

        @Override
        public void onSuccess(JSONObject json) {
            FileSystemItem item = new FileSystemItem();
            item.loadFromJson(json, true);
            result.set(item);
        }

    });
    return result;
}

From source file:io.crate.executor.transport.task.UpsertByIdTask.java

private SettableFuture<Long> createIndexAndExecuteUpsertRequest(final UpsertById.Item item) {
    final SettableFuture<Long> future = SettableFuture.create();
    transportCreateIndexAction.execute(new CreateIndexRequest(item.index()).cause("upsert single item"),
            new ActionListener<CreateIndexResponse>() {
                @Override/*from  w ww.j av  a2s  . c  o m*/
                public void onResponse(CreateIndexResponse createIndexResponse) {
                    executeUpsertRequest(item, future);
                }

                @Override
                public void onFailure(Throwable e) {
                    e = ExceptionsHelper.unwrapCause(e);
                    if (e instanceof IndexAlreadyExistsException) {
                        executeUpsertRequest(item, future);
                    } else {
                        future.setException(e);
                    }

                }
            });
    return future;
}

From source file:com.google.pubsub.flic.controllers.Client.java

void start(MessageTracker messageTracker) throws Throwable {
    this.messageTracker = messageTracker;
    // Send a gRPC call to start the server
    log.info("Connecting to " + networkAddress + ":" + PORT);
    StartRequest.Builder requestBuilder = StartRequest.newBuilder().setProject(project).setTopic(topic)
            .setMaxOutstandingRequests(maxOutstandingRequests).setMessageSize(messageSize)
            .setRequestRate(requestRate).setStartTime(startTime).setPublishBatchSize(publishBatchSize);
    if (numberOfMessages > 0) {
        requestBuilder.setNumberOfMessages(numberOfMessages);
    } else {/*  ww w  .  j  a  v a2  s. c o  m*/
        requestBuilder.setTestDuration(Duration.newBuilder().setSeconds(loadtestLengthSeconds).build());
    }
    switch (clientType) {
    case CPS_GCLOUD_JAVA_SUBSCRIBER:
        requestBuilder.setPubsubOptions(PubsubOptions.newBuilder().setSubscription(subscription)
                .setMaxMessagesPerPull(maxMessagesPerPull));
        break;
    case KAFKA_PUBLISHER:
        requestBuilder.setKafkaOptions(KafkaOptions.newBuilder().setBroker(broker));
        break;
    case KAFKA_SUBSCRIBER:
        requestBuilder.setKafkaOptions(KafkaOptions.newBuilder().setBroker(broker).setPollLength(pollLength));
        break;
    case CPS_GCLOUD_JAVA_PUBLISHER:
    case CPS_GCLOUD_PYTHON_PUBLISHER:
        break;
    }
    StartRequest request = requestBuilder.build();
    SettableFuture<Void> startFuture = SettableFuture.create();
    stub = getStub();
    stub.start(request, new StreamObserver<StartResponse>() {
        private int connectionErrors = 0;

        @Override
        public void onNext(StartResponse response) {
            log.info("Successfully started client [" + networkAddress + "]");
            clientStatus = ClientStatus.RUNNING;
            startFuture.set(null);
        }

        @Override
        public void onError(Throwable throwable) {
            if (connectionErrors > 10) {
                log.error("Client failed to start " + connectionErrors + " times, shutting down.");
                clientStatus = ClientStatus.FAILED;
                startFuture.setException(throwable);
                doneFuture.setException(throwable);
                return;
            }
            connectionErrors++;
            try {
                Thread.sleep(5000);
            } catch (InterruptedException e) {
                log.info("Interrupted during back off, retrying.");
            }
            log.debug("Going to retry client connection, likely due to start up time.");
            stub = getStub();
            stub.start(request, this);
        }

        @Override
        public void onCompleted() {
        }
    });
    try {
        startFuture.get();
        executorService.scheduleAtFixedRate(this::checkClient, 20, 20, TimeUnit.SECONDS);
    } catch (ExecutionException e) {
        throw e.getCause();
    }
}

From source file:org.opendaylight.netconf.topology.util.BaseTopologyManager.java

@Override
public ListenableFuture<Void> onNodeDeleted(final NodeId nodeId) {
    final ArrayList<ListenableFuture<Void>> futures = new ArrayList<>();
    created.remove(nodeId);/*from w w  w .  ja  v  a2 s  . com*/

    // Master needs to trigger delete on peers and combine results
    if (isMaster) {
        futures.add(delegateTopologyHandler.onNodeDeleted(nodeId));
        for (TopologyManager topologyManager : peers.values()) {
            // add a future into our futures that gets its completion status from the converted scala future
            final SettableFuture<Void> settableFuture = SettableFuture.create();
            futures.add(settableFuture);
            final Future<Void> scalaFuture = topologyManager.onRemoteNodeDeleted(nodeId);
            scalaFuture.onComplete(new OnComplete<Void>() {
                @Override
                public void onComplete(Throwable failure, Void success) throws Throwable {
                    if (failure != null) {
                        settableFuture.setException(failure);
                        return;
                    }

                    settableFuture.set(success);
                }
            }, TypedActor.context().dispatcher());
        }

        final ListenableFuture<Void> aggregatedFuture = aggregator.combineDeleteAttempts(futures);
        Futures.addCallback(aggregatedFuture, new FutureCallback<Void>() {
            @Override
            public void onSuccess(final Void result) {
                naSalNodeWriter.delete(nodeId);
            }

            @Override
            public void onFailure(final Throwable t) {

            }
        });

        return aggregatedFuture;
    }

    // Trigger delete
    return delegateTopologyHandler.onNodeDeleted(nodeId);
}

From source file:ohmdb.flease.FleaseLease.java

/**
 * Only call when executed ON the fiber runnable.  If you are already on the fiber runnable, you may
 * call this, otherwise consider using write().
 * @param future callback ftw/*from   w ww .  j a  v  a  2 s . com*/
 * @param k the "k"/ballot number value.
 * @param newLease the lease object
 */
private void writeInternal(final SettableFuture<LeaseValue> future, final BallotNumber k,
        final LeaseValue newLease) {
    try {
        FleaseRequestMessage outMsg = FleaseRequestMessage.newBuilder()
                .setMessageType(FleaseRequestMessage.MessageType.WRITE).setLeaseId(leaseId).setK(k.getMessage())
                .setLease(newLease.getMessage()).build();

        final List<IncomingRpcReply> replies = new ArrayList<>(peers.size());

        final Disposable timeout = fiber.schedule(new Runnable() {
            @Override
            public void run() {
                future.setException(new FleaseWriteTimeoutException(replies, majority));
            }
        }, 20, TimeUnit.SECONDS);

        for (Long peer : peers) {
            OutgoingRpcRequest rpcRequest = new OutgoingRpcRequest(myId, outMsg, peer);

            AsyncRequest.withOneReply(fiber, sendRpcChannel, rpcRequest, new Callback<IncomingRpcReply>() {
                @Override
                public void onMessage(IncomingRpcReply message) {
                    // in theory it's possible for this exception call to fail.
                    if (message.isNackWrite()) {
                        if (!future.setException(new NackWriteException(message))) {
                            LOG.warn("{} write unable to set future exception nackWRITE {}", getId(), message);
                        }
                        // kill the timeout, let's GTFO
                        timeout.dispose();
                        return; // well not much to do anymore.
                    }

                    replies.add(message);

                    // TODO add delay processing so we confirm AFTER we have given all processes a chance to report in
                    if (replies.size() >= majority) {
                        timeout.dispose();

                        // not having received any nackWRITE, we can declare success:
                        if (!future.set(newLease)) {
                            LOG.warn("{} write unable to set future for new lease {}", getId(), newLease);
                        }
                    }
                }
            });
        }
    } catch (Throwable t) {
        future.setException(t);
    }
}

From source file:org.waveprotocol.box.server.waveletstate.block.BlockWaveletStateImpl.java

@Override
public ListenableFuture close() {
    writeLock.lock();// w  w w . j  a  v  a  2s .  c om
    try {
        checkOpened();
        closing = true;
    } finally {
        writeLock.unlock();
    }
    final SettableFuture future = SettableFuture.create();
    flush().addListener(new Runnable() {

        @Override
        public void run() {
            writeLock.lock();
            try {
                Preconditions.checkNotNull(blockAccess, "Store is not opened.");
                writeWaitingBlocks();
                blockAccess.close();
                LOG.info("Block wavelet state of " + waveletName.toString() + " is closed.");
                blockAccess = null;
                closing = false;
                future.set(null);
            } catch (PersistenceException | IOException ex) {
                future.setException(ex);
            } finally {
                writeLock.unlock();
            }
        }
    }, MoreExecutors.sameThreadExecutor());
    return future;
}

From source file:com.google.devtools.build.lib.remote.ByteStreamUploader.java

private void startAsyncUploadWithRetry(Chunker chunker, Retrier.Backoff backoffTimes,
        SettableFuture<Void> overallUploadResult) {

    AsyncUpload.Listener listener = new AsyncUpload.Listener() {
        @Override/*from  ww  w  . j  a  va2 s .co m*/
        public void success() {
            overallUploadResult.set(null);
        }

        @Override
        public void failure(Status status) {
            StatusException cause = status.asException();
            long nextDelayMillis = backoffTimes.nextDelayMillis();
            if (nextDelayMillis < 0 || !retrier.isRetriable(status)) {
                // Out of retries or status not retriable.
                RetryException error = new RetryException(cause, backoffTimes.getRetryAttempts());
                overallUploadResult.setException(error);
            } else {
                retryAsyncUpload(nextDelayMillis, chunker, backoffTimes, overallUploadResult);
            }
        }

        private void retryAsyncUpload(long nextDelayMillis, Chunker chunker, Retrier.Backoff backoffTimes,
                SettableFuture<Void> overallUploadResult) {
            try {
                ListenableScheduledFuture<?> schedulingResult = retryService.schedule(
                        Context.current().wrap(
                                () -> startAsyncUploadWithRetry(chunker, backoffTimes, overallUploadResult)),
                        nextDelayMillis, MILLISECONDS);
                // In case the scheduled execution errors, we need to notify the overallUploadResult.
                schedulingResult.addListener(() -> {
                    try {
                        schedulingResult.get();
                    } catch (Exception e) {
                        overallUploadResult
                                .setException(new RetryException(e, backoffTimes.getRetryAttempts()));
                    }
                }, MoreExecutors.directExecutor());
            } catch (RejectedExecutionException e) {
                // May be thrown by .schedule(...) if i.e. the executor is shutdown.
                overallUploadResult.setException(new RetryException(e, backoffTimes.getRetryAttempts()));
            }
        }
    };

    try {
        chunker.reset();
    } catch (IOException e) {
        overallUploadResult.setException(e);
        return;
    }

    AsyncUpload newUpload = new AsyncUpload(channel, callCredentials, callTimeoutSecs, instanceName, chunker,
            listener);
    overallUploadResult.addListener(() -> {
        if (overallUploadResult.isCancelled()) {
            newUpload.cancel();
        }
    }, MoreExecutors.directExecutor());
    newUpload.start();
}