Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:io.druid.client.CachingClusteredClient.java

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache = query.getContextUseCache(true) && strategy != null && cacheConfig.isUseCache()
            && cacheConfig.isQueryCacheable(query);
    final boolean populateCache = query.getContextPopulateCache(true) && strategy != null
            && cacheConfig.isPopulateCache() && cacheConfig.isQueryCacheable(query);
    final boolean isBySegment = query.getContextBySegment(false);

    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();

    final int priority = query.getContextPriority(0);
    contextBuilder.put("priority", priority);

    if (populateCache) {
        // prevent down-stream nodes from caching results as well if we are populating the cache
        contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
        contextBuilder.put("bySegment", true);
    }/*from   w w  w  .  j a  v a2 s  . co m*/
    contextBuilder.put("intermediate", true);

    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());

    if (timeline == null) {
        return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    for (Interval interval : query.getIntervals()) {
        Iterables.addAll(serversLookup, timeline.lookup(interval));
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest
            .filterSegments(query, serversLookup);

    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(),
                    holder.getVersion(), chunk.getChunkNumber());

            segments.add(Pair.of(selector, descriptor));
        }
    }

    final byte[] queryCacheKey;

    if ((populateCache || useCache) // implies strategy != null
            && !isBySegment) // explicit bySegment queries are never cached
    {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }

    if (queryCacheKey != null) {
        // cachKeys map must preserve segment ordering, in order for shards to always be combined in the same order
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newLinkedHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
            final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(
                    segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
            cacheKeys.put(segment, segmentCacheKey);
        }

        // Pull cached segments from cache and remove from set of segments to query
        final Map<Cache.NamedKey, byte[]> cachedValues;
        if (useCache) {
            cachedValues = cache.getBulk(cacheKeys.values());
        } else {
            cachedValues = ImmutableMap.of();
        }

        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();
            final Interval segmentQueryInterval = segment.rhs.getInterval();

            final byte[] cachedValue = cachedValues.get(segmentCacheKey);
            if (cachedValue != null) {
                // remove cached segment from set of segments to query
                segments.remove(segment);
                cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
            } else if (populateCache) {
                // otherwise, if populating cache, add segment to list of segments to cache
                final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
                        new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

        if (queryableDruidServer == null) {
            log.makeAlert("No servers found for %s?! How can this be?!", segment.rhs).emit();
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);

            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }

            descriptors.add(segment.rhs);
        }
    }

    return new LazySequence<>(new Supplier<Sequence<T>>() {
        @Override
        public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);

            return mergeCachedAndUncachedSequences(sequencesByInterval, toolChest);
        }

        private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
                return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<>(
                        new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                            @Override
                            public Iterator<Object> make() {
                                try {
                                    if (cachedResult.length == 0) {
                                        return Iterators.emptyIterator();
                                    }

                                    return objectMapper.readValues(
                                            objectMapper.getFactory().createParser(cachedResult),
                                            cacheObjectClazz);
                                } catch (IOException e) {
                                    throw Throwables.propagate(e);
                                }
                            }

                            @Override
                            public void cleanup(Iterator<Object> iterFromMake) {
                            }
                        });
                listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
        }

        private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());

            final Query<Result<BySegmentResultValueClass<T>>> rewrittenQuery = (Query<Result<BySegmentResultValueClass<T>>>) query
                    .withOverriddenContext(contextBuilder.build());

            // Loop through each server, setting up the query and initiating it.
            // The data gets handled as a Future and parsed in the long Sequence chain in the resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();

                final QueryRunner clientQueryable = serverView.getQueryRunner(server);

                if (clientQueryable == null) {
                    log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                    continue;
                }

                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);

                final Sequence<T> resultSeqToAdd;
                if (!server.isAssignable() || !populateCache || isBySegment) { // Direct server queryable
                    if (!isBySegment) {
                        resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec),
                                responseContext);
                    } else {
                        // bySegment queries need to be de-serialized, see DirectDruidClient.run()

                        @SuppressWarnings("unchecked")
                        final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery = (Query<Result<BySegmentResultValueClass<T>>>) query;

                        @SuppressWarnings("unchecked")
                        final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence = clientQueryable
                                .run(bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);

                        resultSeqToAdd = (Sequence) Sequences.map(resultSequence,
                                new Function<Result<BySegmentResultValueClass<T>>, Result<BySegmentResultValueClass<T>>>() {
                                    @Override
                                    public Result<BySegmentResultValueClass<T>> apply(
                                            Result<BySegmentResultValueClass<T>> input) {
                                        final BySegmentResultValueClass<T> bySegmentValue = input.getValue();
                                        return new Result<>(input.getTimestamp(),
                                                new BySegmentResultValueClass<T>(
                                                        Lists.transform(bySegmentValue.getResults(),
                                                                toolChest.makePreComputeManipulatorFn(query,
                                                                        MetricManipulatorFns.deserializing())),
                                                        bySegmentValue.getSegmentId(),
                                                        bySegmentValue.getInterval()));
                                    }
                                });
                    }
                } else { // Requires some manipulation on broker side
                    @SuppressWarnings("unchecked")
                    final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence = clientQueryable
                            .run(rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                    resultSeqToAdd = toolChest.mergeSequencesUnordered(
                            Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(runningSequence,
                                    new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {
                                        private final Function<T, Object> cacheFn = strategy.prepareForCache();

                                        // Acctually do something with the results
                                        @Override
                                        public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                                            final BySegmentResultValueClass<T> value = input.getValue();
                                            final CachePopulator cachePopulator = cachePopulatorMap
                                                    .get(String.format("%s_%s", value.getSegmentId(),
                                                            value.getInterval()));

                                            final Queue<ListenableFuture<Object>> cacheFutures = new ConcurrentLinkedQueue<>();

                                            return Sequences.<T>withEffect(Sequences.<T, T>map(
                                                    Sequences.<T, T>map(Sequences.<T>simple(value.getResults()),
                                                            new Function<T, T>() {
                                                                @Override
                                                                public T apply(final T input) {
                                                                    if (cachePopulator != null) {
                                                                        // only compute cache data if populating cache
                                                                        cacheFutures
                                                                                .add(backgroundExecutorService
                                                                                        .submit(new Callable<Object>() {
                                                                                            @Override
                                                                                            public Object call() {
                                                                                                return cacheFn
                                                                                                        .apply(input);
                                                                                            }
                                                                                        }));
                                                                    }
                                                                    return input;
                                                                }
                                                            }),
                                                    toolChest.makePreComputeManipulatorFn(
                                                            // Ick... most makePreComputeManipulatorFn directly cast to their ToolChest query type of choice
                                                            // This casting is sub-optimal, but hasn't caused any major problems yet...
                                                            (Query) rewrittenQuery,
                                                            MetricManipulatorFns.deserializing())),
                                                    new Runnable() {
                                                        @Override
                                                        public void run() {
                                                            if (cachePopulator != null) {
                                                                Futures.addCallback(
                                                                        Futures.allAsList(cacheFutures),
                                                                        new FutureCallback<List<Object>>() {
                                                                            @Override
                                                                            public void onSuccess(
                                                                                    List<Object> cacheData) {
                                                                                cachePopulator
                                                                                        .populate(cacheData);
                                                                                // Help out GC by making sure all references are gone
                                                                                cacheFutures.clear();
                                                                            }

                                                                            @Override
                                                                            public void onFailure(
                                                                                    Throwable throwable) {
                                                                                log.error(throwable,
                                                                                        "Background caching failed");
                                                                            }
                                                                        }, backgroundExecutorService);
                                                            }
                                                        }
                                                    }, MoreExecutors.sameThreadExecutor());// End withEffect
                                        }
                                    }));
                }

                listOfSequences.add(resultSeqToAdd);
            }
        }
    }// End of Supplier
    );
}

From source file:c5db.replication.ReplicatorService.java

private ListenableFuture<Void> getDependedOnModules() {
    SettableFuture<Void> doneFuture = SettableFuture.create();

    List<ListenableFuture<C5Module>> moduleFutures = new ArrayList<>();
    moduleFutures.add(moduleInformationProvider.getModule(ModuleType.Log));
    moduleFutures.add(moduleInformationProvider.getModule(ModuleType.Discovery));

    ListenableFuture<List<C5Module>> compositeModulesFuture = Futures.allAsList(moduleFutures);

    LOG.warn("ReplicatorService now waiting for module dependency on Log & Discovery");

    C5Futures.addCallback(compositeModulesFuture, (List<C5Module> modules) -> {
        this.logModule = (LogModule) modules.get(0);
        this.discoveryModule = (DiscoveryModule) modules.get(1);

        doneFuture.set(null);/*from w w  w. ja v  a  2s.c o m*/
    }, this::failModule, fiber);

    return doneFuture;
}

From source file:io.crate.action.sql.DDLAnalysisDispatcher.java

@Override
public ListenableFuture<Long> visitAlterTableAnalysis(final AlterTableAnalysis analysis, Void context) {
    final SettableFuture<Long> result = SettableFuture.create();
    final String[] indices;
    boolean updateTemplate = false;
    if (analysis.table().isPartitioned()) {
        if (analysis.partitionName().isPresent()) {
            indices = new String[] { analysis.partitionName().get().stringValue() };
        } else {/*from   w ww  . java 2s  . co  m*/
            updateTemplate = true; // only update template when updating whole partitioned table
            indices = analysis.table().concreteIndices();
        }
    } else {
        indices = new String[] { analysis.table().ident().name() };
    }

    if (analysis.table().isAlias()) {
        throw new AlterTableAliasException(analysis.table().ident().name());
    }

    final List<ListenableFuture<?>> results = new ArrayList<>(indices.length + (updateTemplate ? 1 : 0));
    if (updateTemplate) {
        final SettableFuture<?> templateFuture = SettableFuture.create();
        results.add(templateFuture);

        // update template
        final String templateName = PartitionName.templateName(analysis.table().ident().name());
        GetIndexTemplatesRequest getRequest = new GetIndexTemplatesRequest(templateName);

        transportGetIndexTemplatesAction.execute(getRequest, new ActionListener<GetIndexTemplatesResponse>() {
            @Override
            public void onResponse(GetIndexTemplatesResponse response) {
                String mapping;
                try {
                    mapping = response.getIndexTemplates().get(0).getMappings()
                            .get(Constants.DEFAULT_MAPPING_TYPE).string();
                } catch (IOException e) {
                    templateFuture.setException(e);
                    return;
                }
                ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder();
                settingsBuilder.put(response.getIndexTemplates().get(0).settings());
                settingsBuilder.put(analysis.settings());

                PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).create(false)
                        .mapping(Constants.DEFAULT_MAPPING_TYPE, mapping).settings(settingsBuilder.build())
                        .template(response.getIndexTemplates().get(0).template());
                for (ObjectObjectCursor<String, AliasMetaData> container : response.getIndexTemplates().get(0)
                        .aliases()) {
                    Alias alias = new Alias(container.key);
                    request.alias(alias);
                }
                transportPutIndexTemplateAction.execute(request,
                        new ActionListener<PutIndexTemplateResponse>() {
                            @Override
                            public void onResponse(PutIndexTemplateResponse putIndexTemplateResponse) {
                                templateFuture.set(null);
                            }

                            @Override
                            public void onFailure(Throwable e) {
                                templateFuture.setException(e);
                            }
                        });

            }

            @Override
            public void onFailure(Throwable e) {
                templateFuture.setException(e);
            }
        });

    }
    // update every concrete index
    for (String index : indices) {
        UpdateSettingsRequest request = new UpdateSettingsRequest(analysis.settings(), index);
        final SettableFuture<?> future = SettableFuture.create();
        results.add(future);
        transportUpdateSettingsAction.execute(request, new ActionListener<UpdateSettingsResponse>() {
            @Override
            public void onResponse(UpdateSettingsResponse updateSettingsResponse) {
                future.set(null);
            }

            @Override
            public void onFailure(Throwable e) {
                future.setException(e);
            }
        });
    }
    Futures.addCallback(Futures.allAsList(results), new FutureCallback<List<?>>() {
        @Override
        public void onSuccess(@Nullable List<?> resultList) {
            result.set(null);
        }

        @Override
        public void onFailure(@Nonnull Throwable t) {
            result.setException(t);
        }
    });

    return result;
}

From source file:org.glowroot.central.repo.GaugeValueDao.java

private ListenableFuture<ResultSet> rollupOneFromChildren(int rollupLevel, String agentRollupId,
        String gaugeName, List<String> childAgentRollups, long captureTime, int adjustedTTL) {
    List<ListenableFuture<ResultSet>> futures = Lists.newArrayList();
    for (String childAgentRollup : childAgentRollups) {
        BoundStatement boundStatement = readValueForRollupFromChildPS.bind();
        int i = 0;
        boundStatement.setString(i++, childAgentRollup);
        boundStatement.setString(i++, gaugeName);
        boundStatement.setTimestamp(i++, new Date(captureTime));
        futures.add(session.executeAsync(boundStatement));
    }// ww w .  j  a v a  2 s  . co m
    return Futures.transformAsync(Futures.allAsList(futures), new AsyncFunction<List<ResultSet>, ResultSet>() {
        @Override
        public ListenableFuture<ResultSet> apply(@Nullable List<ResultSet> results) throws Exception {
            checkNotNull(results);
            List<Row> rows = Lists.newArrayList();
            for (int i = 0; i < results.size(); i++) {
                Row row = results.get(i).one();
                if (row == null) {
                    // this is unexpected since TTL for "needs rollup" records is
                    // shorter than TTL for data
                    logger.warn(
                            "no gauge value table records found for agentRollupId={},"
                                    + " gaugeName={}, captureTime={}, level={}",
                            childAgentRollups.get(i), gaugeName, captureTime, rollupLevel);
                } else {
                    rows.add(row);
                }
            }
            if (rows.isEmpty()) {
                // warning(s) already logged above
                return Futures.immediateFuture(DummyResultSet.INSTANCE);
            }
            return rollupOneFromRows(rollupLevel, agentRollupId, gaugeName, captureTime, adjustedTTL, rows);
        }
    });
}

From source file:org.apache.beam.sdk.util.GcsUtil.java

private static void executeBatches(List<BatchRequest> batches) throws IOException {
    ListeningExecutorService executor = MoreExecutors.listeningDecorator(
            MoreExecutors.getExitingExecutorService(new ThreadPoolExecutor(MAX_CONCURRENT_BATCHES,
                    MAX_CONCURRENT_BATCHES, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>())));

    List<ListenableFuture<Void>> futures = new LinkedList<>();
    for (final BatchRequest batch : batches) {
        futures.add(executor.submit(new Callable<Void>() {
            public Void call() throws IOException {
                batch.execute();//w  ww. j av a 2  s.c o m
                return null;
            }
        }));
    }

    try {
        Futures.allAsList(futures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IOException("Interrupted while executing batch GCS request", e);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof FileNotFoundException) {
            throw (FileNotFoundException) e.getCause();
        }
        throw new IOException("Error executing batch GCS request", e);
    } finally {
        executor.shutdown();
    }
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

@Override
public void removeEventTags(String tenantId, Collection<String> eventIds, Collection<String> tags)
        throws Exception {
    if (isEmpty(tenantId)) {
        throw new IllegalArgumentException("TenantId must be not null");
    }/*from  w  w w .  ja v  a 2 s .co  m*/
    if (isEmpty(eventIds)) {
        throw new IllegalArgumentException("EventIds must be not null");
    }
    if (isEmpty(tags)) {
        throw new IllegalArgumentException("Tags must be not null");
    }

    // Only untag existing events
    EventsCriteria criteria = new EventsCriteria();
    criteria.setEventIds(eventIds);
    Page<Event> existingEvents = getEvents(tenantId, criteria, null);

    PreparedStatement updateEvent = CassStatement.get(session, CassStatement.UPDATE_EVENT);
    PreparedStatement deleteTag = CassStatement.get(session, CassStatement.DELETE_TAG);

    try {
        List<ResultSetFuture> futures = new ArrayList<>();
        BatchStatement batch = new BatchStatement(batchType);
        int i = 0;
        for (Event e : existingEvents) {
            tags.stream().forEach(tag -> {
                if (e.getTags().containsKey(tag)) {
                    batch.add(deleteTag.bind(tenantId, TagType.EVENT.name(), tag, e.getTags().get(tag),
                            e.getId()));
                    e.removeTag(tag);
                }
            });
            batch.add(updateEvent.bind(JsonUtil.toJson(e), tenantId, e.getId()));
            i += batch.size();
            if (i > batchSize) {
                futures.add(session.executeAsync(batch));
                batch.clear();
                i = 0;
            }
        }
        if (batch.size() > 0) {
            futures.add(session.executeAsync(batch));
        }
        Futures.allAsList(futures).get();

    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }
}

From source file:diskCacheV111.srm.SrmHandler.java

private Object dispatch(SrmGetRequestTokensRequest request, Function<Object, SrmRequest> toMessage)
        throws InterruptedException, ExecutionException {
    List<ListenableFuture<SrmResponse>> futures = backends.getCurrentData().stream().map(this::toCellPath)
            .map(path -> srmManagerStub.send(path, toMessage.apply(request), SrmResponse.class))
            .collect(toList());//  w w  w .  ja v a2s  .  c o m
    return mapGetRequestTokensResponse(Futures.allAsList(futures).get());
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyIncrementalImpl.java

private ListenableFuture<RpcResult<Void>> flushAddGroupPortionAndBarrier(
        final InstanceIdentifier<FlowCapableNode> nodeIdent, final ItemSyncBox<Group> groupsPortion) {
    final List<ListenableFuture<RpcResult<AddGroupOutput>>> allResults = new ArrayList<>();
    final List<ListenableFuture<RpcResult<UpdateGroupOutput>>> allUpdateResults = new ArrayList<>();

    for (Group group : groupsPortion.getItemsToPush()) {
        final KeyedInstanceIdentifier<Group, GroupKey> groupIdent = nodeIdent.child(Group.class,
                group.getKey());// w  w  w  .  j  av a2  s.  c o m
        allResults.add(JdkFutureAdapters.listenInPoolThread(groupForwarder.add(groupIdent, group, nodeIdent)));

    }

    for (ItemSyncBox.ItemUpdateTuple<Group> groupTuple : groupsPortion.getItemsToUpdate()) {
        final Group existingGroup = groupTuple.getOriginal();
        final Group group = groupTuple.getUpdated();

        final KeyedInstanceIdentifier<Group, GroupKey> groupIdent = nodeIdent.child(Group.class,
                group.getKey());
        allUpdateResults.add(JdkFutureAdapters
                .listenInPoolThread(groupForwarder.update(groupIdent, existingGroup, group, nodeIdent)));
    }

    final ListenableFuture<RpcResult<Void>> singleVoidAddResult = Futures.transform(
            Futures.allAsList(allResults), ReconcileUtil.<AddGroupOutput>createRpcResultCondenser("group add"));

    final ListenableFuture<RpcResult<Void>> singleVoidUpdateResult = Futures.transform(
            Futures.allAsList(allUpdateResults),
            ReconcileUtil.<UpdateGroupOutput>createRpcResultCondenser("group update"));

    final ListenableFuture<RpcResult<Void>> summaryResult = Futures.transform(
            Futures.allAsList(singleVoidAddResult, singleVoidUpdateResult),
            ReconcileUtil.<Void>createRpcResultCondenser("group add/update"));

    return Futures.transform(summaryResult,
            ReconcileUtil.chainBarrierFlush(PathUtil.digNodePath(nodeIdent), transactionService));
}

From source file:com.facebook.buck.features.apple.project.WorkspaceAndProjectGenerator.java

private void generateProject(Map<Path, ProjectGenerator> projectGenerators,
        ListeningExecutorService listeningExecutorService, WorkspaceGenerator workspaceGenerator,
        ImmutableSet<BuildTarget> targetsInRequiredProjects,
        ImmutableSetMultimap.Builder<PBXProject, PBXTarget> generatedProjectToPbxTargetsBuilder,
        ImmutableMap.Builder<BuildTarget, PBXTarget> buildTargetToPbxTargetMapBuilder,
        ImmutableMap.Builder<PBXTarget, Path> targetToProjectPathMapBuilder)
        throws IOException, InterruptedException {
    ImmutableMultimap.Builder<Cell, BuildTarget> projectCellToBuildTargetsBuilder = ImmutableMultimap.builder();
    for (TargetNode<?> targetNode : projectGraph.getNodes()) {
        BuildTarget buildTarget = targetNode.getBuildTarget();
        projectCellToBuildTargetsBuilder.put(rootCell.getCell(buildTarget), buildTarget);
    }/*  w  w w.j  a  v a2 s.c  o m*/
    ImmutableMultimap<Cell, BuildTarget> projectCellToBuildTargets = projectCellToBuildTargetsBuilder.build();
    List<ListenableFuture<GenerationResult>> projectGeneratorFutures = new ArrayList<>();
    for (Cell projectCell : projectCellToBuildTargets.keySet()) {
        ImmutableMultimap.Builder<Path, BuildTarget> projectDirectoryToBuildTargetsBuilder = ImmutableMultimap
                .builder();
        ImmutableSet<BuildTarget> cellRules = ImmutableSet.copyOf(projectCellToBuildTargets.get(projectCell));
        for (BuildTarget buildTarget : cellRules) {
            projectDirectoryToBuildTargetsBuilder.put(buildTarget.getBasePath(), buildTarget);
        }
        ImmutableMultimap<Path, BuildTarget> projectDirectoryToBuildTargets = projectDirectoryToBuildTargetsBuilder
                .build();
        Path relativeTargetCell = rootCell.getRoot().relativize(projectCell.getRoot());
        for (Path projectDirectory : projectDirectoryToBuildTargets.keySet()) {
            ImmutableSet<BuildTarget> rules = filterRulesForProjectDirectory(projectGraph,
                    ImmutableSet.copyOf(projectDirectoryToBuildTargets.get(projectDirectory)));
            if (Sets.intersection(targetsInRequiredProjects, rules).isEmpty()) {
                continue;
            }

            boolean isMainProject = workspaceArguments.getSrcTarget().isPresent()
                    && rules.contains(workspaceArguments.getSrcTarget().get());
            projectGeneratorFutures.add(listeningExecutorService.submit(() -> {
                GenerationResult result = generateProjectForDirectory(projectGenerators, projectCell,
                        projectDirectory, rules, isMainProject, targetsInRequiredProjects);
                // convert the projectPath to relative to the target cell here
                result = GenerationResult.of(relativeTargetCell.resolve(result.getProjectPath()),
                        result.isProjectGenerated(), result.getRequiredBuildTargets(),
                        result.getXcconfigPaths(), result.getFilesToCopyInXcode(),
                        result.getBuildTargetToGeneratedTargetMap(), result.getGeneratedProjectToPbxTargets());
                return result;
            }));
        }
    }

    List<GenerationResult> generationResults;
    try {
        generationResults = Futures.allAsList(projectGeneratorFutures).get();
    } catch (ExecutionException e) {
        Throwables.throwIfInstanceOf(e.getCause(), IOException.class);
        Throwables.throwIfUnchecked(e.getCause());
        throw new IllegalStateException("Unexpected exception: ", e);
    }
    for (GenerationResult result : generationResults) {
        if (!result.isProjectGenerated()) {
            continue;
        }
        workspaceGenerator.addFilePath(result.getProjectPath());
        processGenerationResult(generatedProjectToPbxTargetsBuilder, buildTargetToPbxTargetMapBuilder,
                targetToProjectPathMapBuilder, result);
    }
}

From source file:io.druid.segment.realtime.appenderator.AppenderatorImpl.java

@Override
public void close() {
    log.info("Shutting down...");

    final List<ListenableFuture<?>> futures = Lists.newArrayList();
    for (Map.Entry<SegmentIdentifier, Sink> entry : sinks.entrySet()) {
        futures.add(abandonSegment(entry.getKey(), entry.getValue(), false));
    }//from ww w.j  a  va 2s .c o m

    try {
        Futures.allAsList(futures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        log.warn(e, "Interrupted during close()");
    } catch (ExecutionException e) {
        log.warn(e, "Unable to abandon existing segments during close()");
    }

    try {
        shutdownExecutors();
        Preconditions.checkState(persistExecutor.awaitTermination(365, TimeUnit.DAYS),
                "persistExecutor not terminated");
        Preconditions.checkState(pushExecutor.awaitTermination(365, TimeUnit.DAYS),
                "pushExecutor not terminated");
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new ISE("Failed to shutdown executors during close()");
    }

    // Only unlock if executors actually shut down.
    unlockBasePersistDirectory();
}