Example usage for java.util.concurrent CompletableFuture thenCompose

List of usage examples for java.util.concurrent CompletableFuture thenCompose

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenCompose.

Prototype

public <U> CompletableFuture<U> thenCompose(Function<? super T, ? extends CompletionStage<U>> fn) 

Source Link

Usage

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

/**
 * Find predecessor candidates and find overlaps with given segment's key range.
 *
 * @param number segment number./*from ww w  .j  a  va  2 s .  com*/
 * @return : future of list of predecessor segment numbers
 */
@Override
public CompletableFuture<List<Integer>> getPredecessors(final int number) {
    val legal = verifyLegalState();
    val segmentFuture = getSegment(number);
    val indexTableFuture = getIndexTable();
    val historyTableFuture = getHistoryTable();
    CompletableFuture<Void> all = CompletableFuture.allOf(legal, segmentFuture, indexTableFuture,
            historyTableFuture);

    return all.thenCompose(x -> {
        final Segment segment = segmentFuture.getNow(null);
        List<Integer> candidates = TableHelper.findSegmentPredecessorCandidates(segment,
                indexTableFuture.getNow(null).getData(), historyTableFuture.getNow(null).getData());
        return findOverlapping(segment, candidates);
    }).thenApply(list -> list.stream().map(e -> e.getNumber()).collect(Collectors.toList()));
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

@Override
public CompletableFuture<Map<Integer, List<Integer>>> getSuccessorsWithPredecessors(final int number) {
    val legal = verifyLegalState();
    val indexTableFuture = getIndexTable();
    val historyTableFuture = getHistoryTable();
    CompletableFuture<List<Segment>> segments = getSuccessorsForSegment(number);

    CompletableFuture<Void> all = CompletableFuture.allOf(legal, segments, indexTableFuture,
            historyTableFuture);/*from  w  w w .ja  va 2 s.c  om*/

    return all.thenCompose(v -> {
        List<CompletableFuture<Map.Entry<Segment, List<Integer>>>> resultFutures = new ArrayList<>();
        List<Segment> successors = segments.getNow(null);
        for (Segment successor : successors) {
            List<Integer> candidates = TableHelper.findSegmentPredecessorCandidates(successor,
                    indexTableFuture.getNow(null).getData(), historyTableFuture.getNow(null).getData());
            resultFutures.add(findOverlapping(successor, candidates)
                    .thenApply(list -> new SimpleImmutableEntry<>(successor,
                            list.stream().map(Segment::getNumber).collect(Collectors.toList()))));
        }
        return FutureHelpers.allOfWithResults(resultFutures);
    }).thenApply(
            list -> list.stream().collect(Collectors.toMap(e -> e.getKey().getNumber(), e -> e.getValue())));
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

/** Takes a collection of results from the management side-channel, and uses it to update a harvest node
 * @param key - source key / bucket id//from   w w  w  . j  a  va2  s.  c  om
 * @param status_messages
 * @param source_db
 * @return true - if share updated with errors, false otherwise
 */
protected static CompletableFuture<Boolean> updateV1ShareErrorStatus(final Date main_date, final String id,
        final Collection<BasicMessageBean> status_messages,
        final IManagementCrudService<SharedLibraryBean> library_mgmt, final ICrudService<JsonNode> share_db,
        final boolean create_not_update) {
    final String message_block = status_messages.stream().map(msg -> {
        return "[" + msg.date() + "] " + msg.source() + " (" + msg.command() + "): "
                + (msg.success() ? "INFO" : "ERROR") + ": " + msg.message();
    }).collect(Collectors.joining("\n"));

    final boolean any_errors = status_messages.stream().anyMatch(msg -> !msg.success());

    // Only going to do something if we have errors:

    if (any_errors) {
        _logger.warn(ErrorUtils.get("Error creating/updating shared library bean: {0} error= {1}", id,
                message_block.replace("\n", "; ")));
        return share_db.getObjectById(new ObjectId(id), Arrays.asList("title", "description"), true)
                .thenCompose(jsonopt -> {
                    if (jsonopt.isPresent()) { // (else share has vanished, nothing to do)

                        final CommonUpdateComponent<JsonNode> v1_update = Optional
                                .of(CrudUtils.update().set("description",
                                        safeJsonGet("description", jsonopt.get()).asText() + "\n\n"
                                                + message_block))
                                // If shared lib already exists then can't update the title (or the existing lib bean will get deleted)
                                .map(c -> create_not_update
                                        ? c.set("title",
                                                "ERROR:" + safeJsonGet("title", jsonopt.get()).asText())
                                        : c)
                                .get();

                        @SuppressWarnings("unchecked")
                        final CompletableFuture<Boolean> v2_res = Lambdas.get(() -> {
                            if (!create_not_update) { // also make a token effort to update the timestamp on the shared lib bean, so the same error doesn't keep getting repeated
                                final CommonUpdateComponent<SharedLibraryBean> v2_update = CrudUtils
                                        .update(SharedLibraryBean.class)
                                        .set(SharedLibraryBean::modified, new Date());

                                //(need to do this because as of Aug 2015, the updateObjectById isn't plumbed in)
                                final ICrudService<SharedLibraryBean> library_service = (ICrudService<SharedLibraryBean>) (ICrudService<?>) library_mgmt
                                        .getUnderlyingPlatformDriver(ICrudService.class, Optional.empty())
                                        .get();

                                return library_service.updateObjectById("v1_" + id, v2_update); // (just fire this off and forget about it)
                            } else
                                return CompletableFuture.completedFuture(true);
                        });
                        final CompletableFuture<Boolean> update_res = v2_res.thenCompose(b -> {
                            if (b) {
                                return share_db.updateObjectById(new ObjectId(id), v1_update);
                            } else {
                                _logger.warn(ErrorUtils
                                        .get("Error creating/updating v2 library bean: {0} unknown error", id));
                                return CompletableFuture.completedFuture(false);
                            }
                        }).exceptionally(t -> {
                            _logger.warn(ErrorUtils.getLongForm(
                                    "Error creating/updating shared library bean: {1} error= {0}", t, id));
                            return false;
                        });
                        return update_res;
                    } else {
                        return CompletableFuture.completedFuture(false);
                    }
                });
    } else {
        return CompletableFuture.completedFuture(false);
    }
}

From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java

@VisibleForTesting
CompletableFuture<CreateStreamStatus.Status> createStreamBody(String scope, String stream,
        StreamConfiguration config, long timestamp) {
    return this.streamMetadataStore.createStream(scope, stream, config, timestamp, null, executor)
            .thenComposeAsync(response -> {
                log.info("{}/{} created in metadata store", scope, stream);
                CreateStreamStatus.Status status = translate(response.getStatus());
                // only if its a new stream or an already existing non-active stream then we will create
                // segments and change the state of the stream to active.
                if (response.getStatus().equals(CreateStreamResponse.CreateStatus.NEW)
                        || response.getStatus().equals(CreateStreamResponse.CreateStatus.EXISTS_CREATING)) {
                    List<Integer> newSegments = IntStream
                            .range(0, response.getConfiguration().getScalingPolicy().getMinNumSegments())
                            .boxed().collect(Collectors.toList());
                    return notifyNewSegments(scope, stream, response.getConfiguration(), newSegments)
                            .thenCompose(y -> {
                                final OperationContext context = streamMetadataStore.createContext(scope,
                                        stream);

                                return withRetries(() -> {
                                    CompletableFuture<Void> future;
                                    if (config.getRetentionPolicy() != null) {
                                        future = streamMetadataStore.addUpdateStreamForAutoStreamCut(scope,
                                                stream, config.getRetentionPolicy(), context, executor);
                                    } else {
                                        future = CompletableFuture.completedFuture(null);
                                    }/*from   ww  w  . j a v  a 2s. c  o m*/
                                    return future.thenCompose(v -> streamMetadataStore.setState(scope, stream,
                                            State.ACTIVE, context, executor));
                                }, executor).thenApply(z -> status);
                            });
                } else {
                    return CompletableFuture.completedFuture(status);
                }
            }, executor).handle((result, ex) -> {
                if (ex != null) {
                    Throwable cause = Exceptions.unwrap(ex);
                    if (cause instanceof StoreException.DataNotFoundException) {
                        return CreateStreamStatus.Status.SCOPE_NOT_FOUND;
                    } else {
                        log.warn("Create stream failed due to ", ex);
                        return CreateStreamStatus.Status.FAILURE;
                    }
                } else {
                    return result;
                }
            });
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Internal function to delete the bucket, while notifying active users of the bucket
 * @param to_delete//w w  w .  j a  va2 s .co m
 * @return a management future containing the result 
 */
private ManagementFuture<Boolean> deleteBucket(final DataBucketBean to_delete) {
    try {
        // Also delete the file paths (currently, just add ".deleted" to top level path) 
        deleteFilePath(to_delete, _storage_service.get());
        //delete the logging path as well if it exists (it's okay if it fails, should mean it doesn't exist)
        try {
            deleteFilePath(BucketUtils.convertDataBucketBeanToLogging(to_delete), _storage_service.get());
        } catch (Exception ex) {
        }

        // Add to the deletion queue (do it before trying to delete the bucket in case this bucket deletion fails - if so then delete queue will retry every hour)
        final Date to_delete_date = Timestamp.from(Instant.now().plus(1L, ChronoUnit.MINUTES));
        final CompletableFuture<Supplier<Object>> enqueue_delete = this._bucket_deletion_queue.get()
                .storeObject(new BucketDeletionMessage(to_delete, to_delete_date, false));

        final CompletableFuture<Boolean> delete_reply = enqueue_delete
                .thenCompose(__ -> _underlying_data_bucket_db.get().deleteObjectById(to_delete._id()));

        return FutureUtils.denestManagementFuture(delete_reply.thenCompose(del_reply -> {
            if (!del_reply) { // Didn't find an object to delete, just return that information to the user
                return CompletableFuture.completedFuture(Optional.empty());
            } else { //Get the status and delete it 

                final CompletableFuture<Optional<DataBucketStatusBean>> future_status_bean = _underlying_data_bucket_status_db
                        .get().updateAndReturnObjectBySpec(
                                CrudUtils.allOf(DataBucketStatusBean.class).when(DataBucketStatusBean::_id,
                                        to_delete._id()),
                                Optional.empty(), CrudUtils.update(DataBucketStatusBean.class).deleteObject(),
                                Optional.of(true), Collections.emptyList(), false);

                return future_status_bean;
            }
        }).thenApply(status_bean -> {
            if (!status_bean.isPresent()) {
                return FutureUtils.createManagementFuture(delete_reply);
            } else {
                final BucketActionMessage.DeleteBucketActionMessage delete_message = new BucketActionMessage.DeleteBucketActionMessage(
                        to_delete,
                        new HashSet<String>(Optional
                                .ofNullable(status_bean.isPresent() ? status_bean.get().node_affinity() : null)
                                .orElse(Collections.emptyList())));

                final CompletableFuture<Collection<BasicMessageBean>> management_results = MgmtCrudUtils
                        .applyRetriableManagementOperation(to_delete, _actor_context,
                                _bucket_action_retry_store.get(), delete_message, source -> {
                                    return new BucketActionMessage.DeleteBucketActionMessage(
                                            delete_message.bucket(),
                                            new HashSet<String>(Arrays.asList(source)));
                                });

                // Convert BucketActionCollectedRepliesMessage into a management side-channel:
                return FutureUtils.createManagementFuture(delete_reply, management_results);
            }
        }));
    } catch (Exception e) {
        // This is a serious enough exception that we'll just leave here
        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Boolean> deleteDatastore() {
    try {//from w ww .ja  v  a  2 s . c om
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "deleteDatastore");

        final String[] index_list = rw_context.indexContext().getReadableIndexArray(Optional.empty());
        final boolean involves_wildcards = Arrays.stream(index_list).anyMatch(s -> s.contains("*"));
        DeleteIndexRequestBuilder dir = _state.client.admin().indices().prepareDelete(index_list);

        // First check if the indexes even exist, so can return false if they don't
        // (can bypass this if there are no wildcards, will get an exception instead)
        final CompletableFuture<Boolean> intermed = Lambdas.get(() -> {
            if (involves_wildcards) {
                final IndicesStatsRequestBuilder irb = _state.client.admin().indices().prepareStats(index_list);
                final CompletableFuture<Boolean> check_indexes = ElasticsearchFutureUtils.wrap(irb.execute(),
                        ir -> {
                            return !ir.getIndices().isEmpty();
                        }, (err, future) -> {
                            future.completeExceptionally(err);
                        });
                return check_indexes;
            } else
                return CompletableFuture.completedFuture(true);
        });
        // Now try deleting the indexes
        return intermed.thenCompose(b -> {
            if (b) {
                return ElasticsearchFutureUtils.wrap(dir.execute(), dr -> {
                    return true;
                }, (err, future) -> {
                    if ((err instanceof IndexMissingException)
                            || (err instanceof SearchPhaseExecutionException)) //(this one can come up as on a read on a newly created index)
                    {
                        future.complete(false);
                    } else {
                        future.completeExceptionally(err);
                    }
                });
            } else
                return CompletableFuture.completedFuture(false);
        });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Make various requests of the analytics module based on the message type
 * @param bucket/*w  w w  .  j av a 2  s  . co m*/
 * @param tech_module
 * @param m
 * @return - a future containing the reply or an error (they're the same type at this point hence can discard the Validation finally)
 */
protected static CompletableFuture<BucketActionReplyMessage> talkToAnalytics(final DataBucketBean bucket,
        final BucketActionMessage m, final String source, final AnalyticsContext context,
        final DataImportActorContext dim_context, final Tuple2<ActorRef, ActorSelection> me_sibling,
        final Map<String, Tuple2<SharedLibraryBean, String>> libs, // (if we're here then must be valid)
        final Validation<BasicMessageBean, Tuple2<IAnalyticsTechnologyModule, ClassLoader>> err_or_tech_module, // "pipeline element"
        final ILoggingService _logging_service) {
    final List<AnalyticThreadJobBean> jobs = bucket.analytic_thread().jobs();

    final BiFunction<Stream<AnalyticThreadJobBean>, Tuple2<Boolean, Boolean>, Stream<AnalyticThreadJobBean>> perJobSetup = (
            job_stream, existingbucket_bucketactive) -> {
        return job_stream.filter(
                job -> existingbucket_bucketactive._1() || Optional.ofNullable(job.enabled()).orElse(true))
                .filter(job -> !isBatchJobWithDependencies(bucket, job, existingbucket_bucketactive))
                .peek(job -> setPerJobContextParams(job, context, libs)); //(WARNING: mutates context)
    };

    final ClassLoader saved_current_classloader = Thread.currentThread().getContextClassLoader();
    try {
        return err_or_tech_module.<CompletableFuture<BucketActionReplyMessage>>validation(
                //Error:
                error -> CompletableFuture.completedFuture(new BucketActionHandlerMessage(source, error)),
                // Normal
                techmodule_classloader -> {
                    final IAnalyticsTechnologyModule tech_module = techmodule_classloader._1();

                    if (shouldLog(m))
                        _logging_service.getSystemLogger(bucket).log(Level.INFO, ErrorUtils.lazyBuildMessage(
                                false, () -> DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                () -> "talkToAnalytics", () -> null,
                                () -> "Set active classloader=" + techmodule_classloader._2() + " class="
                                        + tech_module.getClass() + " message=" + m.getClass().getSimpleName()
                                        + " bucket=" + bucket.full_name(),
                                () -> Collections.emptyMap()));
                    Thread.currentThread().setContextClassLoader(techmodule_classloader._2());

                    tech_module.onInit(context);

                    // One final check before we do anything: are we allowed to run multi-node if we're trying
                    // By construction, all the jobs have the same setting, so:
                    final boolean multi_node_enabled = jobs.stream().findFirst()
                            .map(j -> j.multi_node_enabled()).orElse(false);
                    if (multi_node_enabled) {
                        if (!tech_module.supportsMultiNode(bucket, jobs, context)) {
                            return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                    SharedErrorUtils.buildErrorMessage(source, m, ErrorUtils.get(
                                            AnalyticsErrorUtils.TRIED_TO_RUN_MULTI_NODE_ON_UNSUPPORTED_TECH,
                                            bucket.full_name(), tech_module.getClass().getSimpleName()))));
                        }
                    }

                    return Patterns.match(m).<CompletableFuture<BucketActionReplyMessage>>andReturn()
                            .when(BucketActionMessage.BucketActionOfferMessage.class, msg -> {
                                final boolean accept_or_ignore = NodeRuleUtils.canRunOnThisNode(
                                        jobs.stream().map(j -> Optional.ofNullable(j.node_list_rules())),
                                        dim_context) && tech_module.canRunOnThisNode(bucket, jobs, context);

                                return CompletableFuture.completedFuture(accept_or_ignore
                                        ? new BucketActionReplyMessage.BucketActionWillAcceptMessage(source)
                                        : new BucketActionReplyMessage.BucketActionIgnoredMessage(source));
                            }).when(BucketActionMessage.DeleteBucketActionMessage.class, msg -> {
                                //(note have already told the sibling about this)

                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onDeleteThread(bucket, jobs, context);
                                final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                        .apply(jobs.stream(), Tuples._2T(true, false))
                                        .map(job -> Tuples._2T(job,
                                                (CompletableFuture<BasicMessageBean>) tech_module
                                                        .stopAnalyticJob(bucket, jobs, job, context)))
                                        .collect(Collectors.toList());

                                //(no need to call the context.completeJobOutput since we're deleting the bucket)
                                sendOnTriggerEventMessages(job_results, msg.bucket(),
                                        __ -> Optional.of(JobMessageType.stopping), me_sibling,
                                        _logging_service);

                                return combineResults(top_level_result,
                                        job_results.stream().map(jf -> jf._2()).collect(Collectors.toList()),
                                        source);
                            }).when(BucketActionMessage.NewBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onNewThread(bucket, jobs, context, !msg.is_suspended());

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = msg.is_suspended() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(false, true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(
                                                    thread_start_result.success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .is_suspended()
                                                        ? Collections.emptyList()
                                                        : perJobSetup
                                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                                .map(job -> Tuples._2T(job,
                                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                                .startAnalyticJob(bucket, jobs,
                                                                                        job, context)))
                                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(
                                                    j_r._2().success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));

                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.UpdateBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onUpdatedThread(msg.old_bucket(), bucket, jobs, msg.is_enabled(),
                                                Optional.empty(), context);

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = !msg.is_enabled() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(true, true))
                                                        .filter(job -> Optional.ofNullable(job.enabled())
                                                                .orElse(true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }
                                        //(don't need the analog for stopping because the trigger will give me the notification once all jobs are completed)

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(true, msg.is_enabled()))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) ((msg.is_enabled()
                                                                && Optional.ofNullable(job.enabled())
                                                                        .orElse(true))
                                                                                ? tech_module.resumeAnalyticJob(
                                                                                        bucket, jobs, job,
                                                                                        context)
                                                                                : tech_module
                                                                                        .suspendAnalyticJob(
                                                                                                bucket, jobs,
                                                                                                job, context))))
                                                .collect(Collectors.toList());

                                        // Send all stop messages, and start messages for jobs that succeeeded
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            if (msg.is_enabled()
                                                    && Optional.ofNullable(j_r._1().enabled()).orElse(true)) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Starting bucket:job {0}:{1} success={2}{3}",
                                                                        bucket.full_name(), j_r._1().name(),
                                                                        j_r._2().success(),
                                                                        j_r._2().success() ? ""
                                                                                : (" error = "
                                                                                        + j_r._2().message())),
                                                                () -> Collections.emptyMap()));
                                                return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                        : Optional.empty();
                                            } else { // either stopping all, or have disabled certain jobs
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping bucket:job {0}:{1}",
                                                                        bucket.full_name(), j_r._1().name()),
                                                                () -> Collections.emptyMap()));
                                                if (msg.is_enabled()) { //(else stopping the entire bucket)
                                                    context.completeJobOutput(msg.bucket(), j_r._1());
                                                }
                                                return Optional.of(JobMessageType.stopping);
                                            }
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PurgeBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPurge(bucket, jobs, context);
                                // (don't need to tell the sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            }).when(BucketActionMessage.TestBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onTestThread(bucket, jobs, msg.test_spec(), context);
                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJobTest(bucket, jobs, job,
                                                                        msg.test_spec(), context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting test bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PollFreqBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPeriodicPoll(bucket, jobs, context);

                                //(don't need to tell trigger sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            })
                            // Finally, a bunch of analytic messages (don't tell trigger sibling about any of these)
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.check_completion == msg.type()), msg -> {
                                        // Check whether these jobs are complete, send message back to sibling asynchronously

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<Boolean>>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<Boolean>) tech_module
                                                                .checkAnalyticJobProgress(msg.bucket(),
                                                                        msg.jobs(), job, context)))
                                                .collect(Collectors.toList());

                                        // In addition (for now) just log the management results
                                        job_results.stream().forEach(jr -> {
                                            if (jr._2() instanceof ManagementFuture) {
                                                ManagementFuture<Boolean> jr2 = (ManagementFuture<Boolean>) jr
                                                        ._2();
                                                jr2.thenAccept(result -> {
                                                    if (result) {
                                                        jr2.getManagementResults().thenAccept(mgmt_results -> {
                                                            List<String> errs = mgmt_results.stream()
                                                                    .filter(res -> !res.success())
                                                                    .map(res -> res.message())
                                                                    .collect(Collectors.toList());
                                                            if (!errs.isEmpty()) {
                                                                _logging_service.getSystemLogger(bucket).log(
                                                                        Level.INFO,
                                                                        ErrorUtils.lazyBuildMessage(false,
                                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                                        .getSimpleName(),
                                                                                () -> "talkToAnalytics",
                                                                                () -> null,
                                                                                () -> ErrorUtils.get(
                                                                                        "Completed bucket:job {0}:{1} had errors: {2}",
                                                                                        bucket.full_name(),
                                                                                        jr._1().name(),
                                                                                        errs.stream().collect(
                                                                                                Collectors
                                                                                                        .joining(
                                                                                                                ";"))),
                                                                                () -> Collections.emptyMap()));
                                                            }
                                                        });
                                                    }
                                                });
                                            }
                                            //(it will always be)
                                        });

                                        sendOnTriggerEventMessages(job_results, msg.bucket(), t2 -> {
                                            if (t2._2()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Completed: bucket:job {0}:{1}",
                                                                        bucket.full_name(), t2._1().name()),
                                                                () -> Collections.emptyMap()));
                                                context.completeJobOutput(msg.bucket(), t2._1());
                                            }
                                            return t2._2() ? Optional.of(JobMessageType.stopping)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a start notification for the bucket

                                        //TODO (ALEPH-12): get the matching triggers into the message
                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadExecute(msg.bucket(), jobs, Collections.emptyList(),
                                                        context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error starting analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Started analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Now start any enabled jobs that have no dependencies
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = jobs
                                                .stream()
                                                .filter(job -> Optional.ofNullable(job.enabled()).orElse(true))
                                                .filter(job -> Optionals.ofNullable(job.dependencies())
                                                        .isEmpty())
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        // Received a start notification for 1+ of the jobs

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error starting analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Started analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a stop notification for the bucket

                                        // Complete the job output
                                        context.completeBucketOutput(msg.bucket());

                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadComplete(msg.bucket(), jobs, context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error stopping analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .suspendAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error stopping analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Stopping analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.deleting == msg.type()), msg -> {
                                        // This is different because it happens as part of a user action related to buckets, whereas stopping occurs based on trigger related actions

                                        final CompletableFuture<BasicMessageBean> top_level_result = CompletableFuture
                                                .completedFuture(ErrorUtils.buildSuccessMessage(
                                                        DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                                        "BucketActionAnalyticJobMessage:deleting", ""));

                                        final List<CompletableFuture<BasicMessageBean>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream().map(job -> tech_module
                                                        .suspendAnalyticJob(bucket, jobs, job, context))
                                                .collect(Collectors.toList());

                                        // Hence do return a legit reply message here

                                        return combineResults(top_level_result, job_results, source);
                                    })
                            .otherwise(msg -> { // return "command not recognized" error
                                return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                        SharedErrorUtils.buildErrorMessage(source, m,
                                                AnalyticsErrorUtils.MESSAGE_NOT_RECOGNIZED, bucket.full_name(),
                                                m.getClass().getSimpleName())));
                            });
                });
    } catch (Throwable e) { // (trying to use Validation to avoid this, but just in case...)
        return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                SharedErrorUtils.buildErrorMessage(source, m,
                        ErrorUtils.getLongForm(SharedErrorUtils.ERROR_LOADING_CLASS, e,
                                err_or_tech_module.success()._1().getClass()))));
    } finally {
        Thread.currentThread().setContextClassLoader(saved_current_classloader);
    }
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testTrimOccursDuringOffload() throws Exception {
    CountDownLatch offloadStarted = new CountDownLatch(1);
    CompletableFuture<Void> blocker = new CompletableFuture<>();
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//  w  ww . j  a va2 s.c o m
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadStarted.countDown();
            return blocker.thenCompose((f) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");

    // Create 3 ledgers, saving position at start of each
    for (int i = 0; i < 21; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }
    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3);

    PositionImpl startOfSecondLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0);
    PositionImpl startOfThirdLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(2).getLedgerId(), 0);

    // trigger an offload which should offload the first two ledgers
    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(startOfThirdLedger, cbPromise, null);
    offloadStarted.await();

    // trim first ledger
    cursor.markDelete(startOfSecondLedger, new HashMap<>());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);

    // complete offloading
    blocker.complete(null);
    cbPromise.get();

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
    Assert.assertEquals(offloader.offloadedLedgers().size(), 1);
    Assert.assertTrue(
            offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId()));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testTrimOccursDuringOffloadLedgerDeletedBeforeOffload() throws Exception {
    CountDownLatch offloadStarted = new CountDownLatch(1);
    CompletableFuture<Long> blocker = new CompletableFuture<>();
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*from   w  w w  .j  a  v a  2  s .  c o  m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadStarted.countDown();
            return blocker.thenCompose((trimmedLedger) -> {
                if (trimmedLedger == ledger.getId()) {
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");

    for (int i = 0; i < 21; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }
    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3);

    PositionImpl startOfSecondLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0);
    PositionImpl startOfThirdLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(2).getLedgerId(), 0);

    // trigger an offload which should offload the first two ledgers
    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(startOfThirdLedger, cbPromise, null);
    offloadStarted.await();

    // trim first ledger
    long trimmedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    cursor.markDelete(startOfSecondLedger, new HashMap<>());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getLedgerId() == trimmedLedger).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);

    // complete offloading
    blocker.complete(trimmedLedger);
    cbPromise.get();

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
    Assert.assertEquals(offloader.offloadedLedgers().size(), 1);
    Assert.assertTrue(
            offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId()));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadConflict() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override// w  w  w.  j a  v a2 s .c o  m
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return errorLedgers.thenCompose((errors) -> {
                if (errors.remove(ledger.getId())) {
                    failedOffloads.add(Pair.of(ledger.getId(), uuid));
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new Exception("Some kind of error"));
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }

        @Override
        public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uuid,
                Map<String, String> offloadDriverMetadata) {
            deleted.add(Pair.of(ledgerId, uuid));
            return super.deleteOffloaded(ledgerId, uuid, offloadDriverMetadata);
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Set<Long> errorSet = ConcurrentHashMap.newKeySet();
    errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId());
    errorLedgers.complete(errorSet);

    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException e) {
        // expected
    }
    Assert.assertTrue(errorSet.isEmpty());
    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 0);

    long expectedFailedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    UUID expectedFailedUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertEquals(failedOffloads.stream().findFirst().get(),
            Pair.of(expectedFailedLedger, expectedFailedUUID));
    Assert.assertFalse(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());

    // try offload again
    ledger.offloadPrefix(ledger.getLastConfirmedEntry());

    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 1);
    Assert.assertEquals(deleted.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID));
    UUID successUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertFalse(successUUID.equals(expectedFailedUUID));
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
}