Example usage for java.util.concurrent CompletableFuture thenAccept

List of usage examples for java.util.concurrent CompletableFuture thenAccept

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenAccept.

Prototype

public CompletableFuture<Void> thenAccept(Consumer<? super T> action) 

Source Link

Usage

From source file:org.kordamp.javatrove.example06.impl.GithubImplTest.java

@Test
public void happyPath() throws Exception {
    // given://w w  w .  j a v a 2 s  . co m
    Collection<Repository> repositories = createSampleRepositories();
    stubFor(get(urlEqualTo("/orgs/" + ORGANIZATION + "/repos")).willReturn(aResponse().withStatus(200)
            .withHeader("Content-Type", "text/json").withBody(repositoriesAsJSON(repositories, objectMapper))));

    // when:
    CompletableFuture<Collection<Repository>> promise = github.repositories(ORGANIZATION);
    await().timeout(2, SECONDS).until(promise::isDone, equalTo(true));

    // then:
    promise.thenAccept(result -> assertThat(result, equalTo(repositories)));
    verify(getRequestedFor(urlEqualTo("/orgs/" + ORGANIZATION + "/repos")));
}

From source file:com.ikanow.aleph2.management_db.controllers.actors.BucketDeletionActor.java

@Override
public void onReceive(Object arg0) throws Exception {
    if (!_bucket_crud_proxy.isSet()) { // (for some reason, core_mdb.anything() can fail in the c'tor)
        _bucket_crud_proxy.set(_core_mgmt_db.getDataBucketStore());
    }/*from  w ww  .j  av  a  2 s  . c  o m*/
    //_logger.info("REAL ACTOR Received message from singleton! " + arg0.getClass().toString());
    if (!BucketDeletionMessage.class.isAssignableFrom(arg0.getClass())) { // not for me
        _logger.debug("Unexpected message: " + arg0.getClass());
        return;
    }
    final ActorRef self_closure = this.self();
    final ActorRef sender_closure = this.sender();
    final BucketDeletionMessage msg = (BucketDeletionMessage) arg0;

    // 1) Before we do anything at all, has this bucket already been deleted somehow?

    if (!DataBucketCrudService.doesBucketPathExist(msg.bucket(), _storage_service, Optional.empty())) {
        sender_closure.tell(msg, self_closure);
        return;
    }

    if (msg.data_only()) { // 2) purge is a bit simpler

        // 2a) DON'T Delete the state stores (this has to be done by hand by the harvester if desired)         

        // 2b) Delete data in all data services
        deleteAllDataStoresForBucket(msg.bucket(), _context, false);

        notifyHarvesterOfPurge(msg.bucket(), _core_mgmt_db.getDataBucketStatusStore(),
                _core_mgmt_db.getRetryStore(BucketActionRetryMessage.class));

        // If we got this far then remove from the queue
        sender_closure.tell(msg, self_closure);
    } else { // 3) OK check for the rare but unpleasant case where the bucket wasn't deleted

        // (don't delete any topics for this bucket, there's a separate CDS worker that is responsible for topic-based "garbage collection")

        final QueryComponent<DataBucketBean> bucket_selector = CrudUtils.allOf(DataBucketBean.class)
                .when(DataBucketBean::full_name, msg.bucket().full_name());
        _bucket_crud_proxy.get().getObjectBySpec(bucket_selector).thenAccept(bucket_opt -> {
            if (bucket_opt.isPresent()) {

                // Hasn't been deleted yet - try to delete async and then just exit out
                CompletableFuture<Boolean> deleted = _bucket_crud_proxy.get()
                        .deleteObjectBySpec(bucket_selector);
                //(see you in an hour!)

                //(some logging)
                deleted.thenAccept(b -> {
                    _logger.warn(ErrorUtils.get(
                            "Problem: deleting bucket {0} not yet removed from bucket store: retrying delete: {1}",
                            msg.bucket().full_name(), b));
                }).exceptionally(t -> {
                    _logger.error(ErrorUtils.get(
                            "Problem: deleting bucket {1} not yet removed from bucket store: retrying delete failed: {0}",
                            t, msg.bucket().full_name()));
                    return null;
                });
            } else {
                // 3a) Delete the state directories               
                deleteAllStateObjectsForBucket(msg.bucket(), _core_mgmt_db, false);

                // 3b) Delete data in all data services
                deleteAllDataStoresForBucket(msg.bucket(), _context, true);

                // 3c) Delete the HDFS data (includes all the archived/stored data)
                try {
                    DataBucketCrudService.removeBucketPath(msg.bucket(), _storage_service, Optional.empty());

                    // If we got this far then delete the bucket forever
                    sender_closure.tell(msg, self_closure);
                } catch (Exception e) {
                    // failed to delete the bucket
                }
            }
        });
    }
}

From source file:com.devicehive.resource.impl.DeviceCommandResourceImpl.java

/**
 * Implementation of <a href="http://www.devicehive.com/restful#Reference/DeviceCommand/wait">DeviceHive RESTful
 * API: DeviceCommand: wait</a>//from w w w .  j  a  va2  s  . c  o  m
 *
 * @param timeout Waiting timeout in seconds (default: 30 seconds, maximum: 60 seconds). Specify 0 to disable
 *                waiting.
 */
@Override
public void wait(final String deviceGuid, final String commandId, final long timeout,
        final AsyncResponse asyncResponse) {

    LOGGER.debug("DeviceCommand wait requested, deviceId = {},  commandId = {}", deviceGuid, commandId);

    asyncResponse.setTimeoutHandler(
            asyncRes -> asyncRes.resume(ResponseFactory.response(Response.Status.NO_CONTENT)));

    if (deviceGuid == null || commandId == null) {
        LOGGER.warn("DeviceCommand wait request failed. BAD REQUEST: deviceGuid and commandId required",
                deviceGuid);
        asyncResponse.resume(ResponseFactory.response(Response.Status.BAD_REQUEST));
        return;
    }

    DeviceVO device = deviceService.getDeviceWithNetworkAndDeviceClass(deviceGuid);

    if (device == null) {
        LOGGER.warn("DeviceCommand wait request failed. NOT FOUND: device {} not found", deviceGuid);
        asyncResponse.resume(ResponseFactory.response(Response.Status.NOT_FOUND));
        return;
    }

    Optional<DeviceCommand> command = commandService.findOne(Long.valueOf(commandId), device.getGuid()).join();

    if (!command.isPresent()) {
        LOGGER.warn(
                "DeviceCommand wait request failed. NOT FOUND: No command found with id = {} for deviceId = {}",
                commandId, deviceGuid);
        asyncResponse.resume(ResponseFactory.response(Response.Status.NO_CONTENT));
        return;
    }

    if (!command.get().getDeviceGuid().equals(device.getGuid())) {
        LOGGER.warn(
                "DeviceCommand wait request failed. BAD REQUEST: Command with id = {} was not sent for device with guid = {}",
                commandId, deviceGuid);
        asyncResponse.resume(ResponseFactory.response(Response.Status.BAD_REQUEST));
        return;
    }

    BiConsumer<DeviceCommand, String> callback = (com, subscriptionId) -> {
        if (!asyncResponse.isDone()) {
            asyncResponse.resume(ResponseFactory.response(Response.Status.OK, com, Policy.COMMAND_TO_DEVICE));
        }
    };

    if (!command.get().getIsUpdated()) {
        CompletableFuture<Pair<String, DeviceCommand>> future = commandService
                .sendSubscribeToUpdateRequest(Long.valueOf(commandId), deviceGuid, callback);
        future.thenAccept(pair -> {
            final DeviceCommand deviceCommand = pair.getRight();
            if (!asyncResponse.isDone() && deviceCommand.getIsUpdated()) {
                asyncResponse.resume(
                        ResponseFactory.response(Response.Status.OK, deviceCommand, Policy.COMMAND_TO_DEVICE));
            }

            if (timeout == 0) {
                asyncResponse.setTimeout(1, TimeUnit.MILLISECONDS); // setting timeout to 0 would cause
                // the thread to suspend indefinitely, see AsyncResponse docs
            } else {
                asyncResponse.setTimeout(timeout, TimeUnit.SECONDS);
            }
        });
        asyncResponse.register(new CompletionCallback() {
            @Override
            public void onComplete(Throwable throwable) {
                try {
                    commandService.sendUnsubscribeRequest(future.get().getLeft(), null);
                } catch (InterruptedException | ExecutionException e) {
                    if (!asyncResponse.isDone()) {
                        asyncResponse.resume(ResponseFactory.response(Response.Status.INTERNAL_SERVER_ERROR));
                    }
                }
            }
        });
    } else {
        if (!asyncResponse.isDone()) {
            asyncResponse.resume(
                    ResponseFactory.response(Response.Status.OK, command.get(), Policy.COMMAND_TO_DEVICE));
        }
    }

}

From source file:com.devicehive.websockets.WebSocketApiInfoHandlerTest.java

@Test
public void shouldReturnApiInfo() throws Exception {
    final String requestId = "62345vxgsa5";

    CompletableFuture<TextMessage> future = new CompletableFuture<>();
    new StandardWebSocketClient().doHandshake(new TextWebSocketHandler() {
        @Override//ww  w  .  j  a v  a 2s  . c o m
        protected void handleTextMessage(WebSocketSession session, TextMessage message) throws Exception {
            future.complete(message);
        }
    }, wsBaseUri() + "/websocket/client").addCallback(session -> {
        JsonObject apiInfoRequest = JsonFixture.createWsCommand("server/info", requestId);
        try {
            session.sendMessage(new TextMessage(gson.toJson(apiInfoRequest)));
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }, future::completeExceptionally);

    future.thenAccept(response -> {
        JsonObject jsonResp = gson.fromJson(response.getPayload(), JsonObject.class);

        assertThat(jsonResp.get("action").getAsString(), is("server/info"));
        assertThat(jsonResp.get("requestId").getAsString(), is(requestId));
        assertThat(jsonResp.get("status").getAsString(), is("success"));
    }).exceptionally(e -> {
        fail(e.getMessage());
        return null;
    }).get(5, TimeUnit.SECONDS);
}

From source file:com.yahoo.pulsar.broker.service.BrokerService.java

/**
 * Unload all the topic served by the broker service under the given service unit
 *
 * @param serviceUnit/*from ww w  .j  a v a2  s  .com*/
 * @return
 */
public CompletableFuture<Integer> unloadServiceUnit(NamespaceBundle serviceUnit) {
    CompletableFuture<Integer> result = new CompletableFuture<Integer>();
    List<CompletableFuture<Void>> closeFutures = Lists.newArrayList();
    topics.forEach((name, topicFuture) -> {
        DestinationName topicName = DestinationName.get(name);
        if (serviceUnit.includes(topicName)) {
            // Topic needs to be unloaded
            log.info("[{}] Unloading topic", topicName);
            closeFutures.add(topicFuture.thenCompose(Topic::close));
        }
    });
    CompletableFuture<Void> aggregator = FutureUtil.waitForAll(closeFutures);
    aggregator.thenAccept(res -> result.complete(closeFutures.size())).exceptionally(ex -> {
        result.completeExceptionally(ex);
        return null;
    });
    return result;
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

/** Gets the secondary buffer (deletes any existing data, and switches to "ping" on an uninitialized index)
 *  NOTE: CAN HAVE SIDE EFFECTS IF UNINITIALIZED
 * @param bucket/*from w w  w  .j ava2  s  .c om*/
 * @param job - if present _and_ points to transient output, then returns the buffers for that transient output, else for the entire bucket
 * @param need_ping_pong_buffer - based on the job.output
 * @param data_service
 * @return
 */
protected Optional<String> getSecondaryBuffer(final DataBucketBean bucket,
        final Optional<AnalyticThreadJobBean> job, final boolean need_ping_pong_buffer,
        final IGenericDataService data_service) {
    if (need_ping_pong_buffer) {
        final Optional<String> job_name = job
                .filter(j -> Optionals.of(() -> j.output().is_transient()).orElse(false)).map(j -> j.name());
        final Optional<String> write_buffer = data_service.getPrimaryBufferName(bucket, job_name)
                .map(Optional::of).orElseGet(() -> { // Two cases:

                    final Set<String> secondaries = data_service.getSecondaryBuffers(bucket, job_name);
                    final int ping_pong_count = (secondaries.contains(IGenericDataService.SECONDARY_PING) ? 1
                            : 0) + (secondaries.contains(IGenericDataService.SECONDARY_PONG) ? 1 : 0);

                    if (1 == ping_pong_count) { // 1) one of ping/pong exists but not the other ... this is the file case where we can't tell what the primary actually is
                        if (secondaries.contains(IGenericDataService.SECONDARY_PONG)) { //(eg pong is secondary so ping must be primary)
                            return Optional.of(IGenericDataService.SECONDARY_PING);
                        } else
                            return Optional.of(IGenericDataService.SECONDARY_PONG);
                    } else { // 2) all other cases: this is the ES case, where we just use an alias to switch ..
                        // So here there are side effects
                        if (_state_name == State.IN_MODULE) { // this should not happen (unless the data service doesn't support secondary buffers)
                            _logger.warn(ErrorUtils.get(
                                    "Startup case: no primary buffer for bucket:job {0}:{1} service {2}, number of secondary buffers = {3} (ping/pong={4}, secondaries={5})",
                                    bucket.full_name(), job_name.orElse("(none)"),
                                    data_service.getClass().getSimpleName(), ping_pong_count,
                                    need_ping_pong_buffer,
                                    secondaries.stream().collect(Collectors.joining(";"))));
                        } else {
                            _logger.info(ErrorUtils.get(
                                    "Startup case: no primary buffer for bucket:job {0}:{1} service {2}, number of secondary buffers = {3} (ping/pong={4})",
                                    bucket.full_name(), job_name.orElse("(none)"),
                                    data_service.getClass().getSimpleName(), ping_pong_count,
                                    need_ping_pong_buffer));
                        }

                        // ... but we don't currently have a primary so need to build that
                        if (0 == ping_pong_count) { // first time through, create the buffers:
                            data_service.getWritableDataService(JsonNode.class, bucket, Optional.empty(),
                                    Optional.of(IGenericDataService.SECONDARY_PONG));
                            data_service.getWritableDataService(JsonNode.class, bucket, Optional.empty(),
                                    Optional.of(IGenericDataService.SECONDARY_PING));
                        }
                        final Optional<String> curr_primary = Optional.of(IGenericDataService.SECONDARY_PING);
                        final CompletableFuture<BasicMessageBean> future_res = data_service
                                .switchCrudServiceToPrimaryBuffer(bucket, curr_primary, Optional.empty(),
                                        job_name);
                        future_res.thenAccept(res -> {
                            if (!res.success()) {
                                _logger.warn(ErrorUtils.get(
                                        "Error switching between ping/pong buffers (service {0}: ",
                                        data_service.getClass().getSimpleName()) + res.message());
                            }
                        });
                        return curr_primary;
                    }
                }).map(curr_pri -> { // then just pick the buffer that isn't the primary
                    if (IGenericDataService.SECONDARY_PING.equals(curr_pri)) {
                        return IGenericDataService.SECONDARY_PONG;
                    } else
                        return IGenericDataService.SECONDARY_PING;
                });

        return write_buffer;
    } else
        return Optional.empty();
}

From source file:io.pravega.controller.server.SegmentHelper.java

private <ResultT> void sendRequestAsync(final WireCommand request, final ReplyProcessor replyProcessor,
        final CompletableFuture<ResultT> resultFuture, final ConnectionFactory connectionFactory,
        final PravegaNodeUri uri) {
    CompletableFuture<ClientConnection> connectionFuture = connectionFactory.establishConnection(uri,
            replyProcessor);/*from   w  ww .ja v  a  2 s  .c  om*/
    connectionFuture.whenComplete((connection, e) -> {
        if (connection == null) {
            resultFuture.completeExceptionally(new WireCommandFailedException(new ConnectionFailedException(e),
                    request.getType(), WireCommandFailedException.Reason.ConnectionFailed));
        } else {
            try {
                connection.send(request);
            } catch (ConnectionFailedException cfe) {
                throw new WireCommandFailedException(cfe, request.getType(),
                        WireCommandFailedException.Reason.ConnectionFailed);
            } catch (Exception e2) {
                throw new RuntimeException(e2);
            }
        }
    }).exceptionally(e -> {
        Throwable cause = ExceptionHelpers.getRealException(e);
        if (cause instanceof WireCommandFailedException) {
            resultFuture.completeExceptionally(cause);
        } else if (cause instanceof ConnectionFailedException) {
            resultFuture.completeExceptionally(new WireCommandFailedException(cause, request.getType(),
                    WireCommandFailedException.Reason.ConnectionFailed));
        } else {
            resultFuture.completeExceptionally(new RuntimeException(cause));
        }
        return null;
    });
    resultFuture.whenComplete((result, e) -> {
        connectionFuture.thenAccept(c -> c.close());
    });
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

/** Internal utility for completing job output
 *  NOTE IF THE JOB IS NOT TRANSIENT THEN IT SWITCHES THE ENTIRE BUFFER ACROSS
 *  (WHICH WON'T WORK IF THERE ARE >1 JOBS)
 * @param bucket/* w w  w.  j  a  v  a2  s .  c o m*/
 * @param job
 */
private void switchJobOutputBuffer(final DataBucketBean bucket, final AnalyticThreadJobBean job) {
    final Optional<String> job_name = Optional.of(job)
            .filter(j -> Optionals.of(() -> j.output().is_transient()).orElse(false)).map(j -> j.name());
    final Consumer<IGenericDataService> switchPrimary = s -> {
        getSecondaryBuffer(bucket, Optional.of(job), true, s).ifPresent(curr_secondary -> {
            final CompletableFuture<BasicMessageBean> result = s.switchCrudServiceToPrimaryBuffer(bucket,
                    Optional.of(curr_secondary),
                    IGenericDataService.SECONDARY_PING.equals(curr_secondary)
                            ? Optional.of(IGenericDataService.SECONDARY_PONG) //(primary must have been ping)
                            : Optional.of(IGenericDataService.SECONDARY_PING),
                    job_name);

            // Log on error:
            result.thenAccept(res -> {
                if (!res.success()) {
                    _logger.warn(ErrorUtils.get("Bucket:job {0}:{1} service {2}: {3}", bucket.full_name(),
                            job.name(), s.getClass().getSimpleName(), res.message()));
                }
            });
        });
    };
    if (!_multi_writer.isSet()) {
        setupOutputs(bucket, job);
    }
    _multi_writer.optional().ifPresent(w -> w.getDataServices()
            .forEach(s -> s.getDataService().ifPresent(ss -> switchPrimary.accept(ss))));
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Make various requests of the analytics module based on the message type
 * @param bucket/*from w  w  w  . ja  v a2s.  c  om*/
 * @param tech_module
 * @param m
 * @return - a future containing the reply or an error (they're the same type at this point hence can discard the Validation finally)
 */
protected static CompletableFuture<BucketActionReplyMessage> talkToAnalytics(final DataBucketBean bucket,
        final BucketActionMessage m, final String source, final AnalyticsContext context,
        final DataImportActorContext dim_context, final Tuple2<ActorRef, ActorSelection> me_sibling,
        final Map<String, Tuple2<SharedLibraryBean, String>> libs, // (if we're here then must be valid)
        final Validation<BasicMessageBean, Tuple2<IAnalyticsTechnologyModule, ClassLoader>> err_or_tech_module, // "pipeline element"
        final ILoggingService _logging_service) {
    final List<AnalyticThreadJobBean> jobs = bucket.analytic_thread().jobs();

    final BiFunction<Stream<AnalyticThreadJobBean>, Tuple2<Boolean, Boolean>, Stream<AnalyticThreadJobBean>> perJobSetup = (
            job_stream, existingbucket_bucketactive) -> {
        return job_stream.filter(
                job -> existingbucket_bucketactive._1() || Optional.ofNullable(job.enabled()).orElse(true))
                .filter(job -> !isBatchJobWithDependencies(bucket, job, existingbucket_bucketactive))
                .peek(job -> setPerJobContextParams(job, context, libs)); //(WARNING: mutates context)
    };

    final ClassLoader saved_current_classloader = Thread.currentThread().getContextClassLoader();
    try {
        return err_or_tech_module.<CompletableFuture<BucketActionReplyMessage>>validation(
                //Error:
                error -> CompletableFuture.completedFuture(new BucketActionHandlerMessage(source, error)),
                // Normal
                techmodule_classloader -> {
                    final IAnalyticsTechnologyModule tech_module = techmodule_classloader._1();

                    if (shouldLog(m))
                        _logging_service.getSystemLogger(bucket).log(Level.INFO, ErrorUtils.lazyBuildMessage(
                                false, () -> DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                () -> "talkToAnalytics", () -> null,
                                () -> "Set active classloader=" + techmodule_classloader._2() + " class="
                                        + tech_module.getClass() + " message=" + m.getClass().getSimpleName()
                                        + " bucket=" + bucket.full_name(),
                                () -> Collections.emptyMap()));
                    Thread.currentThread().setContextClassLoader(techmodule_classloader._2());

                    tech_module.onInit(context);

                    // One final check before we do anything: are we allowed to run multi-node if we're trying
                    // By construction, all the jobs have the same setting, so:
                    final boolean multi_node_enabled = jobs.stream().findFirst()
                            .map(j -> j.multi_node_enabled()).orElse(false);
                    if (multi_node_enabled) {
                        if (!tech_module.supportsMultiNode(bucket, jobs, context)) {
                            return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                    SharedErrorUtils.buildErrorMessage(source, m, ErrorUtils.get(
                                            AnalyticsErrorUtils.TRIED_TO_RUN_MULTI_NODE_ON_UNSUPPORTED_TECH,
                                            bucket.full_name(), tech_module.getClass().getSimpleName()))));
                        }
                    }

                    return Patterns.match(m).<CompletableFuture<BucketActionReplyMessage>>andReturn()
                            .when(BucketActionMessage.BucketActionOfferMessage.class, msg -> {
                                final boolean accept_or_ignore = NodeRuleUtils.canRunOnThisNode(
                                        jobs.stream().map(j -> Optional.ofNullable(j.node_list_rules())),
                                        dim_context) && tech_module.canRunOnThisNode(bucket, jobs, context);

                                return CompletableFuture.completedFuture(accept_or_ignore
                                        ? new BucketActionReplyMessage.BucketActionWillAcceptMessage(source)
                                        : new BucketActionReplyMessage.BucketActionIgnoredMessage(source));
                            }).when(BucketActionMessage.DeleteBucketActionMessage.class, msg -> {
                                //(note have already told the sibling about this)

                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onDeleteThread(bucket, jobs, context);
                                final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                        .apply(jobs.stream(), Tuples._2T(true, false))
                                        .map(job -> Tuples._2T(job,
                                                (CompletableFuture<BasicMessageBean>) tech_module
                                                        .stopAnalyticJob(bucket, jobs, job, context)))
                                        .collect(Collectors.toList());

                                //(no need to call the context.completeJobOutput since we're deleting the bucket)
                                sendOnTriggerEventMessages(job_results, msg.bucket(),
                                        __ -> Optional.of(JobMessageType.stopping), me_sibling,
                                        _logging_service);

                                return combineResults(top_level_result,
                                        job_results.stream().map(jf -> jf._2()).collect(Collectors.toList()),
                                        source);
                            }).when(BucketActionMessage.NewBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onNewThread(bucket, jobs, context, !msg.is_suspended());

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = msg.is_suspended() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(false, true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(
                                                    thread_start_result.success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .is_suspended()
                                                        ? Collections.emptyList()
                                                        : perJobSetup
                                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                                .map(job -> Tuples._2T(job,
                                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                                .startAnalyticJob(bucket, jobs,
                                                                                        job, context)))
                                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(
                                                    j_r._2().success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));

                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.UpdateBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onUpdatedThread(msg.old_bucket(), bucket, jobs, msg.is_enabled(),
                                                Optional.empty(), context);

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = !msg.is_enabled() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(true, true))
                                                        .filter(job -> Optional.ofNullable(job.enabled())
                                                                .orElse(true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }
                                        //(don't need the analog for stopping because the trigger will give me the notification once all jobs are completed)

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(true, msg.is_enabled()))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) ((msg.is_enabled()
                                                                && Optional.ofNullable(job.enabled())
                                                                        .orElse(true))
                                                                                ? tech_module.resumeAnalyticJob(
                                                                                        bucket, jobs, job,
                                                                                        context)
                                                                                : tech_module
                                                                                        .suspendAnalyticJob(
                                                                                                bucket, jobs,
                                                                                                job, context))))
                                                .collect(Collectors.toList());

                                        // Send all stop messages, and start messages for jobs that succeeeded
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            if (msg.is_enabled()
                                                    && Optional.ofNullable(j_r._1().enabled()).orElse(true)) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Starting bucket:job {0}:{1} success={2}{3}",
                                                                        bucket.full_name(), j_r._1().name(),
                                                                        j_r._2().success(),
                                                                        j_r._2().success() ? ""
                                                                                : (" error = "
                                                                                        + j_r._2().message())),
                                                                () -> Collections.emptyMap()));
                                                return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                        : Optional.empty();
                                            } else { // either stopping all, or have disabled certain jobs
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping bucket:job {0}:{1}",
                                                                        bucket.full_name(), j_r._1().name()),
                                                                () -> Collections.emptyMap()));
                                                if (msg.is_enabled()) { //(else stopping the entire bucket)
                                                    context.completeJobOutput(msg.bucket(), j_r._1());
                                                }
                                                return Optional.of(JobMessageType.stopping);
                                            }
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PurgeBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPurge(bucket, jobs, context);
                                // (don't need to tell the sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            }).when(BucketActionMessage.TestBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onTestThread(bucket, jobs, msg.test_spec(), context);
                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJobTest(bucket, jobs, job,
                                                                        msg.test_spec(), context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting test bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PollFreqBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPeriodicPoll(bucket, jobs, context);

                                //(don't need to tell trigger sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            })
                            // Finally, a bunch of analytic messages (don't tell trigger sibling about any of these)
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.check_completion == msg.type()), msg -> {
                                        // Check whether these jobs are complete, send message back to sibling asynchronously

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<Boolean>>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<Boolean>) tech_module
                                                                .checkAnalyticJobProgress(msg.bucket(),
                                                                        msg.jobs(), job, context)))
                                                .collect(Collectors.toList());

                                        // In addition (for now) just log the management results
                                        job_results.stream().forEach(jr -> {
                                            if (jr._2() instanceof ManagementFuture) {
                                                ManagementFuture<Boolean> jr2 = (ManagementFuture<Boolean>) jr
                                                        ._2();
                                                jr2.thenAccept(result -> {
                                                    if (result) {
                                                        jr2.getManagementResults().thenAccept(mgmt_results -> {
                                                            List<String> errs = mgmt_results.stream()
                                                                    .filter(res -> !res.success())
                                                                    .map(res -> res.message())
                                                                    .collect(Collectors.toList());
                                                            if (!errs.isEmpty()) {
                                                                _logging_service.getSystemLogger(bucket).log(
                                                                        Level.INFO,
                                                                        ErrorUtils.lazyBuildMessage(false,
                                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                                        .getSimpleName(),
                                                                                () -> "talkToAnalytics",
                                                                                () -> null,
                                                                                () -> ErrorUtils.get(
                                                                                        "Completed bucket:job {0}:{1} had errors: {2}",
                                                                                        bucket.full_name(),
                                                                                        jr._1().name(),
                                                                                        errs.stream().collect(
                                                                                                Collectors
                                                                                                        .joining(
                                                                                                                ";"))),
                                                                                () -> Collections.emptyMap()));
                                                            }
                                                        });
                                                    }
                                                });
                                            }
                                            //(it will always be)
                                        });

                                        sendOnTriggerEventMessages(job_results, msg.bucket(), t2 -> {
                                            if (t2._2()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Completed: bucket:job {0}:{1}",
                                                                        bucket.full_name(), t2._1().name()),
                                                                () -> Collections.emptyMap()));
                                                context.completeJobOutput(msg.bucket(), t2._1());
                                            }
                                            return t2._2() ? Optional.of(JobMessageType.stopping)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a start notification for the bucket

                                        //TODO (ALEPH-12): get the matching triggers into the message
                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadExecute(msg.bucket(), jobs, Collections.emptyList(),
                                                        context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error starting analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Started analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Now start any enabled jobs that have no dependencies
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = jobs
                                                .stream()
                                                .filter(job -> Optional.ofNullable(job.enabled()).orElse(true))
                                                .filter(job -> Optionals.ofNullable(job.dependencies())
                                                        .isEmpty())
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        // Received a start notification for 1+ of the jobs

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error starting analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Started analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a stop notification for the bucket

                                        // Complete the job output
                                        context.completeBucketOutput(msg.bucket());

                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadComplete(msg.bucket(), jobs, context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error stopping analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .suspendAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error stopping analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Stopping analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.deleting == msg.type()), msg -> {
                                        // This is different because it happens as part of a user action related to buckets, whereas stopping occurs based on trigger related actions

                                        final CompletableFuture<BasicMessageBean> top_level_result = CompletableFuture
                                                .completedFuture(ErrorUtils.buildSuccessMessage(
                                                        DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                                        "BucketActionAnalyticJobMessage:deleting", ""));

                                        final List<CompletableFuture<BasicMessageBean>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream().map(job -> tech_module
                                                        .suspendAnalyticJob(bucket, jobs, job, context))
                                                .collect(Collectors.toList());

                                        // Hence do return a legit reply message here

                                        return combineResults(top_level_result, job_results, source);
                                    })
                            .otherwise(msg -> { // return "command not recognized" error
                                return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                        SharedErrorUtils.buildErrorMessage(source, m,
                                                AnalyticsErrorUtils.MESSAGE_NOT_RECOGNIZED, bucket.full_name(),
                                                m.getClass().getSimpleName())));
                            });
                });
    } catch (Throwable e) { // (trying to use Validation to avoid this, but just in case...)
        return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                SharedErrorUtils.buildErrorMessage(source, m,
                        ErrorUtils.getLongForm(SharedErrorUtils.ERROR_LOADING_CLASS, e,
                                err_or_tech_module.success()._1().getClass()))));
    } finally {
        Thread.currentThread().setContextClassLoader(saved_current_classloader);
    }
}

From source file:org.apache.pulsar.broker.lookup.TopicLookup.java

@GET
@Path("{topic-domain}/{property}/{cluster}/{namespace}/{topic}")
@Produces(MediaType.APPLICATION_JSON)//from w w  w.  jav a2  s. co  m
public void lookupTopicAsync(@PathParam("topic-domain") String topicDomain,
        @PathParam("property") String property, @PathParam("cluster") String cluster,
        @PathParam("namespace") String namespace, @PathParam("topic") @Encoded String encodedTopic,
        @QueryParam("authoritative") @DefaultValue("false") boolean authoritative,
        @Suspended AsyncResponse asyncResponse) {
    String topicName = Codec.decode(encodedTopic);
    TopicDomain domain = null;
    try {
        domain = TopicDomain.getEnum(topicDomain);
    } catch (IllegalArgumentException e) {
        log.error("[{}] Invalid topic-domain {}", clientAppId(), topicDomain, e);
        throw new RestException(Status.METHOD_NOT_ALLOWED, "Unsupported topic domain " + topicDomain);
    }
    TopicName topic = TopicName.get(domain.value(), property, cluster, namespace, topicName);

    if (!pulsar().getBrokerService().getLookupRequestSemaphore().tryAcquire()) {
        log.warn("No broker was found available for topic {}", topic);
        asyncResponse.resume(new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE));
        return;
    }

    try {
        validateClusterOwnership(topic.getCluster());
        checkConnect(topic);
        validateGlobalNamespaceOwnership(topic.getNamespaceObject());
    } catch (WebApplicationException we) {
        // Validation checks failed
        log.error("Validation check failed: {}", we.getMessage());
        completeLookupResponseExceptionally(asyncResponse, we);
        return;
    } catch (Throwable t) {
        // Validation checks failed with unknown error
        log.error("Validation check failed: {}", t.getMessage(), t);
        completeLookupResponseExceptionally(asyncResponse, new RestException(t));
        return;
    }

    CompletableFuture<Optional<LookupResult>> lookupFuture = pulsar().getNamespaceService()
            .getBrokerServiceUrlAsync(topic, authoritative);

    lookupFuture.thenAccept(optionalResult -> {
        if (optionalResult == null || !optionalResult.isPresent()) {
            log.warn("No broker was found available for topic {}", topic);
            completeLookupResponseExceptionally(asyncResponse,
                    new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE));
            return;
        }

        LookupResult result = optionalResult.get();
        // We have found either a broker that owns the topic, or a broker to which we should redirect the client to
        if (result.isRedirect()) {
            boolean newAuthoritative = this.isLeaderBroker();
            URI redirect;
            try {
                String redirectUrl = isRequestHttps() ? result.getLookupData().getHttpUrlTls()
                        : result.getLookupData().getHttpUrl();
                checkNotNull(redirectUrl, "Redirected cluster's service url is not configured");
                redirect = new URI(String.format("%s%s%s?authoritative=%s", redirectUrl,
                        "/lookup/v2/destination/", topic.getLookupName(), newAuthoritative));
            } catch (URISyntaxException | NullPointerException e) {
                log.error("Error in preparing redirect url for {}: {}", topic, e.getMessage(), e);
                completeLookupResponseExceptionally(asyncResponse, e);
                return;
            }
            if (log.isDebugEnabled()) {
                log.debug("Redirect lookup for topic {} to {}", topic, redirect);
            }
            completeLookupResponseExceptionally(asyncResponse,
                    new WebApplicationException(Response.temporaryRedirect(redirect).build()));

        } else {
            // Found broker owning the topic
            if (log.isDebugEnabled()) {
                log.debug("Lookup succeeded for topic {} -- broker: {}", topic, result.getLookupData());
            }
            completeLookupResponseSuccessfully(asyncResponse, result.getLookupData());
        }
    }).exceptionally(exception -> {
        log.warn("Failed to lookup broker for topic {}: {}", topic, exception.getMessage(), exception);
        completeLookupResponseExceptionally(asyncResponse, exception);
        return null;
    });

}