List of usage examples for java.util.concurrent CompletableFuture completedFuture
public static <U> CompletableFuture<U> completedFuture(U value)
From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java
@Override public ManagementFuture<Optional<T>> updateAndReturnObjectBySpec(QueryComponent<T> unique_spec, Optional<Boolean> upsert, UpdateComponent<T> update, Optional<Boolean> before_updated, List<String> field_list, boolean include) { return FutureUtils .createManagementFuture(CompletableFuture.completedFuture(_mutable_values.stream().findFirst())); }
From source file:com.ikanow.aleph2.management_db.controllers.actors.BucketDeletionActor.java
/** Deletes the data in all data services * TODO (ALEPH-26): assume default ones for now * @param bucket - the bucket to cleanse *//* ww w .ja v a 2 s . c o m*/ public static CompletableFuture<Collection<BasicMessageBean>> deleteAllDataStoresForBucket( final DataBucketBean bucket, final IServiceContext service_context, boolean delete_bucket) { // Currently the only supported data service is the search index try { final LinkedList<CompletableFuture<BasicMessageBean>> vals = new LinkedList<>(); service_context.listServiceProviders().stream().map(t3 -> t3._1().get()) .filter(s -> IDataServiceProvider.class.isAssignableFrom(s.getClass())) .map(s -> (IDataServiceProvider) s).distinct().forEach(service -> { if (!(delete_bucket && IStorageService.class.isAssignableFrom(service.getClass()))) { // if deleting the bucket then don't need to remove the storage path service.getDataService().ifPresent(ds -> vals .add(ds.handleBucketDeletionRequest(bucket, Optional.empty(), delete_bucket))); } }); return CompletableFuture.allOf(vals.toArray(new CompletableFuture[0])).thenApply(__ -> { return vals.stream().map(x -> x.join()).collect(Collectors.toList()); }); } catch (Throwable t) { return CompletableFuture.completedFuture( Arrays.asList(ErrorUtils.buildErrorMessage(BucketDeletionActor.class.getSimpleName(), "deleteAllDataStoresForBucket", ErrorUtils.getLongForm("{0}", t)))); } }
From source file:com.ikanow.aleph2.example.flume_harvester.services.FlumeHarvestTechnology.java
/** * @param to_suspend//from www. j a v a2s .c o m * @param context * @return */ protected CompletableFuture<BasicMessageBean> onSuspend(DataBucketBean to_suspend, IHarvestContext context) { try { int stopped = removeAgentConfigs(to_suspend, 1); FlumeLaunchUtils.killProcess(FlumeLaunchUtils.getPid(to_suspend)); if (BucketUtils.isTestBucket(to_suspend)) { //(clean up any generated files now that the test is over) FlumeUtils.getAgents(to_suspend).stream() .forEach(agent -> FlumeUtils.deleteGeneratedDirs(to_suspend, agent, true)); } return CompletableFuture.completedFuture(new BasicMessageBean(new Date(), true, "onSuspend", "onSuspend", null, "Stopped " + stopped + " agents", null)); } catch (Exception e) { _logger.error(ErrorUtils.getLongForm("onSuspend: Unknown error {0}", e)); return FutureUtils.returnError(e); } }
From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java
@Override public ManagementFuture<Boolean> deleteObjectById(Object id) { if (null == id) { return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false)); }//from ww w.ja v a 2 s . co m if (_mutable_values.isEmpty()) { _mutable_values.remove(0); return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(true)); } else { return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false)); } }
From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java
/** Top level logic for source synchronization * @param library_mgmt/*from w w w . j a va2s .c om*/ * @param share_db */ protected CompletableFuture<Void> synchronizeLibraryJars( final IManagementCrudService<SharedLibraryBean> library_mgmt, final IStorageService aleph2_fs, final ICrudService<JsonNode> share_db, final GridFS share_fs) { return compareJarsToLibaryBeans_get(library_mgmt, share_db).thenApply(v1_v2 -> { return compareJarsToLibraryBeans_categorize(v1_v2); }).thenCompose(create_update_delete -> { if (create_update_delete._1().isEmpty() && create_update_delete._2().isEmpty() && create_update_delete._3().isEmpty()) { //(nothing to do) return CompletableFuture.completedFuture(null); } _logger.info(ErrorUtils.get("Found [create={0}, delete={1}, update={2}] sources", create_update_delete._1().size(), create_update_delete._2().size(), create_update_delete._3().size())); final List<CompletableFuture<Boolean>> l1 = create_update_delete._1().stream().parallel() .<Tuple2<String, ManagementFuture<?>>>map(id -> Tuples._2T(id, createLibraryBean(id, library_mgmt, aleph2_fs, true, share_db, share_fs, _context))) .<CompletableFuture<Boolean>>map(id_fres -> updateV1ShareErrorStatus_top(id_fres._1(), id_fres._2(), library_mgmt, share_db, true)) .collect(Collectors.toList()); ; final List<CompletableFuture<Boolean>> l2 = create_update_delete._2().stream().parallel() .<Tuple2<String, ManagementFuture<?>>>map( id -> Tuples._2T(id, deleteLibraryBean(id, library_mgmt, aleph2_fs))) .<CompletableFuture<Boolean>>map(id_fres -> CompletableFuture.completedFuture(true)) .collect(Collectors.toList()); ; final List<CompletableFuture<Boolean>> l3 = create_update_delete._3().stream().parallel() .<Tuple2<String, ManagementFuture<?>>>map(id -> Tuples._2T(id, createLibraryBean(id, library_mgmt, aleph2_fs, false, share_db, share_fs, _context))) .<CompletableFuture<Boolean>>map(id_fres -> updateV1ShareErrorStatus_top(id_fres._1(), id_fres._2(), library_mgmt, share_db, false)) .collect(Collectors.toList()); ; List<CompletableFuture<?>> retval = Arrays.asList(l1, l2, l3).stream().flatMap(l -> l.stream()) .collect(Collectors.toList()); ; return CompletableFuture.allOf(retval.toArray(new CompletableFuture[0])); }); }
From source file:com.ikanow.aleph2.harvest.logstash.services.LogstashHarvestService.java
@Override public CompletableFuture<BasicMessageBean> onPeriodicPoll(DataBucketBean polled_bucket, IHarvestContext context) {/* www. j av a 2s. co m*/ final LogstashBucketConfigBean config = Optionals.ofNullable(polled_bucket.harvest_configs()).stream() .findFirst().map(cfg -> BeanTemplateUtils.from(cfg.config(), LogstashBucketConfigBean.class).get()) .orElse(BeanTemplateUtils.build(LogstashBucketConfigBean.class).done().get()); //check if the job is still running //if yes: report its running //if no: restart job if (isConfigRunning(polled_bucket, config, _globals.get())) { return CompletableFuture.completedFuture(ErrorUtils.buildMessage(true, this.getClass().getSimpleName(), "onPeriodicPoll", "Config is currently running!")); } else { return CompletableFuture .completedFuture(startOrUpdateLogstash(polled_bucket, config, _globals.get(), context)); } }
From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java
private CompletableFuture<Void> truncate(String scope, String stream, RetentionPolicy policy, OperationContext context, List<StreamCutRecord> retentionSet, long recordingTime) { return findTruncationRecord(policy, retentionSet, recordingTime).map( record -> startTruncation(scope, stream, record.getStreamCut(), context).thenCompose(started -> { if (started) { return streamMetadataStore.deleteStreamCutBefore(scope, stream, record, context, executor); } else { throw new RuntimeException("Could not start truncation"); }// w ww . j av a2 s. com })).orElse(CompletableFuture.completedFuture(null)); }
From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java
@Override public CompletableFuture<Long> countObjectsBySpec(QueryComponent<FileDescriptor> spec) { try {/*from ww w . ja v a 2 s. c o m*/ return CompletableFuture.completedFuture( new DataStoreCursor(getFolderFilenames(output_directory, fileContext)).count()); } catch (IllegalArgumentException | IOException e) { final CompletableFuture<Long> fut = new CompletableFuture<Long>(); fut.completeExceptionally(e); return fut; } }
From source file:io.pravega.controller.store.stream.AbstractStreamMetadataStore.java
@Override public CompletableFuture<List<Segment>> getActiveSegments(final String scope, final String name, final OperationContext context, final Executor executor) { final Stream stream = getStream(scope, name, context); return withCompletion(stream.getState().thenComposeAsync(state -> { if (State.SEALED.equals(state)) { return CompletableFuture.completedFuture(Collections.<Integer>emptyList()); } else {//from w ww . j av a2 s.c om return stream.getActiveSegments(); } }, executor) .thenComposeAsync( currentSegments -> FutureHelpers.allOfWithResults( currentSegments.stream().map(stream::getSegment).collect(Collectors.toList())), executor), executor); }
From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerMonitorTest.java
@Test public void testClose() throws Exception { @Cleanup// ww w . java 2 s . co m CuratorFramework zkClient = startClient(); initializeHostContainerMapping(zkClient); SegmentContainerRegistry containerRegistry = mock(SegmentContainerRegistry.class); ContainerHandle containerHandle1 = mock(ContainerHandle.class); when(containerHandle1.getContainerId()).thenReturn(1); when(containerRegistry.startContainer(eq(1), any())) .thenReturn(CompletableFuture.completedFuture(containerHandle1)); when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null)); ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient); segMonitor.initialize(Duration.ofSeconds(1)); segMonitor.close(); assertEquals(0, segMonitor.getRegisteredContainers().size()); }