Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.PolicyManager.java

/**
 * @param  tableIDs - IDs of tables to delete
 * @return ListenableFuture<Void> - which will be filled when clearing is done
 *///from ww w  .j a v  a 2  s .c  o  m
private ListenableFuture<Void> removeUnusedTables(final List<Short> tableIDs) {
    List<ListenableFuture<Void>> checkList = new ArrayList<>();
    final ReadWriteTransaction rwTx = dataBroker.newReadWriteTransaction();
    for (Short tableId : tableIDs) {
        for (NodeId nodeId : switchManager.getReadySwitches()) {
            final InstanceIdentifier<Table> tablePath = FlowUtils.createTablePath(nodeId, tableId);
            checkList.add(deleteTableIfExists(rwTx, tablePath));
        }
    }
    ListenableFuture<List<Void>> allAsListFuture = Futures.allAsList(checkList);
    return Futures.transform(allAsListFuture, new AsyncFunction<List<Void>, Void>() {

        @Override
        public ListenableFuture<Void> apply(List<Void> readyToSubmit) {
            return rwTx.submit();
        }
    });
}

From source file:com.google.idea.blaze.java.sync.source.SourceDirectoryCalculator.java

/** Adds the java source directories. */
private void calculateJavaSourceDirectories(BlazeContext context, WorkspaceRoot workspaceRoot,
        ArtifactLocationDecoder artifactLocationDecoder, WorkspacePath directoryRoot,
        Collection<SourceArtifact> javaArtifacts, Collection<JavaPackageReader> javaPackageReaders,
        Collection<BlazeSourceDirectory> result) {

    List<SourceRoot> sourceRootsPerFile = Lists.newArrayList();

    // Get java sources
    List<ListenableFuture<SourceRoot>> sourceRootFutures = Lists.newArrayList();
    for (final SourceArtifact sourceArtifact : javaArtifacts) {
        ListenableFuture<SourceRoot> future = executorService.submit(() -> sourceRootForJavaSource(context,
                artifactLocationDecoder, sourceArtifact, javaPackageReaders));
        sourceRootFutures.add(future);//from ww w  .j  av a2 s  . c o  m
    }
    try {
        for (SourceRoot sourceRoot : Futures.allAsList(sourceRootFutures).get()) {
            if (sourceRoot != null) {
                sourceRootsPerFile.add(sourceRoot);
            }
        }
    } catch (ExecutionException | InterruptedException e) {
        LOG.error(e);
        throw new IllegalStateException("Could not read sources");
    }

    // Sort source roots into their respective directories
    Multimap<WorkspacePath, SourceRoot> sourceDirectoryToSourceRoots = HashMultimap.create();
    for (SourceRoot sourceRoot : sourceRootsPerFile) {
        sourceDirectoryToSourceRoots.put(sourceRoot.workspacePath, sourceRoot);
    }

    // Create a mapping from directory to package prefix
    Map<WorkspacePath, SourceRoot> workspacePathToSourceRoot = Maps.newHashMap();
    for (WorkspacePath workspacePath : sourceDirectoryToSourceRoots.keySet()) {
        Collection<SourceRoot> sources = sourceDirectoryToSourceRoots.get(workspacePath);
        Multiset<String> packages = HashMultiset.create();

        for (SourceRoot source : sources) {
            packages.add(source.packagePrefix);
        }

        final String directoryPackagePrefix;
        // Common case -- all source files agree on a single package
        if (packages.elementSet().size() == 1) {
            directoryPackagePrefix = packages.elementSet().iterator().next();
        } else {
            String preferredPackagePrefix = PackagePrefixCalculator.packagePrefixOf(workspacePath);
            directoryPackagePrefix = pickMostFrequentlyOccurring(packages, preferredPackagePrefix);
        }

        SourceRoot candidateRoot = new SourceRoot(workspacePath, directoryPackagePrefix);
        workspacePathToSourceRoot.put(workspacePath, candidateRoot);
    }

    // Add content entry base if it doesn't exist
    if (!workspacePathToSourceRoot.containsKey(directoryRoot)) {
        SourceRoot candidateRoot = new SourceRoot(directoryRoot,
                PackagePrefixCalculator.packagePrefixOf(directoryRoot));
        workspacePathToSourceRoot.put(directoryRoot, candidateRoot);
    }

    // First, create a graph of the directory structure from root to each source file
    Map<WorkspacePath, SourceRootDirectoryNode> sourceRootDirectoryNodeMap = Maps.newHashMap();
    SourceRootDirectoryNode rootNode = new SourceRootDirectoryNode(directoryRoot, null);
    sourceRootDirectoryNodeMap.put(directoryRoot, rootNode);
    for (SourceRoot sourceRoot : workspacePathToSourceRoot.values()) {
        final String sourcePathRelativeToDirectoryRoot = sourcePathRelativeToDirectoryRoot(directoryRoot,
                sourceRoot.workspacePath);
        List<String> pathComponents = !Strings.isNullOrEmpty(sourcePathRelativeToDirectoryRoot)
                ? PATH_SPLITTER.splitToList(sourcePathRelativeToDirectoryRoot)
                : ImmutableList.of();
        SourceRootDirectoryNode previousNode = rootNode;
        for (int i = 0; i < pathComponents.size(); ++i) {
            final WorkspacePath workspacePath = getWorkspacePathFromPathComponents(directoryRoot,
                    pathComponents, i + 1);
            SourceRootDirectoryNode node = sourceRootDirectoryNodeMap.get(workspacePath);
            if (node == null) {
                node = new SourceRootDirectoryNode(workspacePath, pathComponents.get(i));
                sourceRootDirectoryNodeMap.put(workspacePath, node);
                previousNode.children.add(node);
            }
            previousNode = node;
        }
    }

    // Add package prefix votes at each directory node
    for (SourceRoot sourceRoot : workspacePathToSourceRoot.values()) {
        final String sourcePathRelativeToDirectoryRoot = sourcePathRelativeToDirectoryRoot(directoryRoot,
                sourceRoot.workspacePath);

        List<String> packageComponents = PACKAGE_SPLITTER.splitToList(sourceRoot.packagePrefix);
        List<String> pathComponents = !Strings.isNullOrEmpty(sourcePathRelativeToDirectoryRoot)
                ? PATH_SPLITTER.splitToList(sourcePathRelativeToDirectoryRoot)
                : ImmutableList.of();
        int packageIndex = packageComponents.size();
        int pathIndex = pathComponents.size();
        while (pathIndex >= 0 && packageIndex >= 0) {
            final WorkspacePath workspacePath = getWorkspacePathFromPathComponents(directoryRoot,
                    pathComponents, pathIndex);

            SourceRootDirectoryNode node = sourceRootDirectoryNodeMap.get(workspacePath);

            String packagePrefix = PACKAGE_JOINER.join(packageComponents.subList(0, packageIndex));

            // If this is the source root containing Java files, we *have* to pick its package prefix
            // Otherwise just add a vote
            if (sourceRoot.workspacePath.equals(workspacePath)) {
                node.forcedPackagePrefix = packagePrefix;
            } else {
                node.packagePrefixVotes.add(packagePrefix);
            }

            String pathComponent = pathIndex > 0 ? pathComponents.get(pathIndex - 1) : "";
            String packageComponent = packageIndex > 0 ? packageComponents.get(packageIndex - 1) : "";
            if (!pathComponent.equals(packageComponent)) {
                break;
            }

            --packageIndex;
            --pathIndex;
        }
    }

    Map<WorkspacePath, SourceRoot> sourceRoots = Maps.newHashMap();
    SourceRootDirectoryNode root = sourceRootDirectoryNodeMap.get(directoryRoot);
    visitDirectoryNode(sourceRoots, root, null);

    for (SourceRoot sourceRoot : sourceRoots.values()) {
        result.add(BlazeSourceDirectory.builder(workspaceRoot.fileForPath(sourceRoot.workspacePath))
                .setPackagePrefix(sourceRoot.packagePrefix).setGenerated(false).build());
    }
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

@Override
public void addAlerts(Collection<Alert> alerts) throws Exception {
    if (alerts == null) {
        throw new IllegalArgumentException("Alerts must be not null");
    }//from   w ww.  j  av  a  2s. com
    if (alerts.isEmpty()) {
        return;
    }
    if (log.isDebugEnabled()) {
        log.debug("Adding " + alerts.size() + " alerts");
    }
    PreparedStatement insertAlert = CassStatement.get(session, CassStatement.INSERT_ALERT);
    PreparedStatement insertAlertTrigger = CassStatement.get(session, CassStatement.INSERT_ALERT_TRIGGER);
    PreparedStatement insertAlertCtime = CassStatement.get(session, CassStatement.INSERT_ALERT_CTIME);
    PreparedStatement insertAlertStime = CassStatement.get(session, CassStatement.INSERT_ALERT_STIME);
    PreparedStatement insertTag = CassStatement.get(session, CassStatement.INSERT_TAG);

    try {
        List<ResultSetFuture> futures = new ArrayList<>();
        BatchStatement batch = new BatchStatement(batchType);
        int i = 0;
        for (Alert a : alerts) {
            batch.add(insertAlert.bind(a.getTenantId(), a.getAlertId(), JsonUtil.toJson(a)));
            batch.add(insertAlertTrigger.bind(a.getTenantId(), a.getAlertId(), a.getTriggerId()));
            batch.add(insertAlertCtime.bind(a.getTenantId(), a.getAlertId(), a.getCtime()));
            batch.add(
                    insertAlertStime.bind(a.getTenantId(), a.getAlertId(), a.getCurrentLifecycle().getStime()));

            a.getTags().entrySet().stream().forEach(tag -> {
                batch.add(insertTag.bind(a.getTenantId(), TagType.ALERT.name(), tag.getKey(), tag.getValue(),
                        a.getId()));
            });
            i += batch.size();
            if (i > batchSize) {
                futures.add(session.executeAsync(batch));
                batch.clear();
                i = 0;
            }
        }
        if (batch.size() > 0) {
            futures.add(session.executeAsync(batch));
        }
        Futures.allAsList(futures).get();

    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }

    // Every Alert has a corresponding Event
    List<Event> events = alerts.stream().map(Event::new).collect(Collectors.toList());
    persistEvents(events);
}

From source file:org.apache.druid.query.groupby.epinephelinae.GroupByMergingQueryRunnerV2.java

@Override
public Sequence<Row> run(final QueryPlus<Row> queryPlus, final Map<String, Object> responseContext) {
    final GroupByQuery query = (GroupByQuery) queryPlus.getQuery();
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);

    // CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
    // (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
    // merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
    // will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
    // I'm not sure of a better way to do this without tweaking how realtime servers do queries.
    final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION,
            false);/*from w  w  w . ja  v  a 2s  . c om*/
    final QueryPlus<Row> queryPlusForRunners = queryPlus
            .withQuery(query.withOverriddenContext(
                    ImmutableMap.of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true)))
            .withoutThreadUnsafeState();

    if (QueryContexts.isBySegment(query) || forceChainedExecution) {
        ChainedExecutionQueryRunner<Row> runner = new ChainedExecutionQueryRunner<>(exec, queryWatcher,
                queryables);
        return runner.run(queryPlusForRunners, responseContext);
    }

    final boolean isSingleThreaded = querySpecificConfig.isSingleThreaded();

    final AggregatorFactory[] combiningAggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs()
            .size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        combiningAggregatorFactories[i] = query.getAggregatorSpecs().get(i).getCombiningFactory();
    }

    final File temporaryStorageDirectory = new File(processingTmpDir,
            StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));

    final int priority = QueryContexts.getPriority(query);

    // Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
    // query processing together.
    final long queryTimeout = QueryContexts.getTimeout(query);
    final boolean hasTimeout = QueryContexts.hasTimeout(query);
    final long timeoutAt = System.currentTimeMillis() + queryTimeout;

    return new BaseSequence<>(
            new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {
                @Override
                public CloseableGrouperIterator<RowBasedKey, Row> make() {
                    final List<ReferenceCountingResourceHolder> resources = Lists.newArrayList();

                    try {
                        final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(
                                temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                        final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder
                                .fromCloseable(temporaryStorage);
                        resources.add(temporaryStorageHolder);

                        // If parallelCombine is enabled, we need two merge buffers for parallel aggregating and parallel combining
                        final int numMergeBuffers = querySpecificConfig.getNumParallelCombineThreads() > 1 ? 2
                                : 1;

                        final List<ReferenceCountingResourceHolder<ByteBuffer>> mergeBufferHolders = getMergeBuffersHolder(
                                numMergeBuffers, hasTimeout, timeoutAt);
                        resources.addAll(mergeBufferHolders);

                        final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder = mergeBufferHolders
                                .get(0);
                        final ReferenceCountingResourceHolder<ByteBuffer> combineBufferHolder = numMergeBuffers == 2
                                ? mergeBufferHolders.get(1)
                                : null;

                        Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, Row>> pair = RowBasedGrouperHelper
                                .createGrouperAccumulatorPair(query, false, null, config,
                                        Suppliers.ofInstance(mergeBufferHolder.get()), combineBufferHolder,
                                        concurrencyHint, temporaryStorage, spillMapper,
                                        combiningAggregatorFactories, exec, priority, hasTimeout, timeoutAt,
                                        mergeBufferSize);
                        final Grouper<RowBasedKey> grouper = pair.lhs;
                        final Accumulator<AggregateResult, Row> accumulator = pair.rhs;
                        grouper.init();

                        final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder
                                .fromCloseable(grouper);
                        resources.add(grouperHolder);

                        ListenableFuture<List<AggregateResult>> futures = Futures
                                .allAsList(Lists.newArrayList(Iterables.transform(queryables,
                                        new Function<QueryRunner<Row>, ListenableFuture<AggregateResult>>() {
                                            @Override
                                            public ListenableFuture<AggregateResult> apply(
                                                    final QueryRunner<Row> input) {
                                                if (input == null) {
                                                    throw new ISE(
                                                            "Null queryRunner! Looks to be some segment unmapping action happening");
                                                }

                                                ListenableFuture<AggregateResult> future = exec.submit(
                                                        new AbstractPrioritizedCallable<AggregateResult>(
                                                                priority) {
                                                            @Override
                                                            public AggregateResult call() {
                                                                try (
                                                                        // These variables are used to close releasers automatically.
                                                                        @SuppressWarnings("unused")
                                                                Releaser bufferReleaser = mergeBufferHolder
                                                                        .increment();
                                                                        @SuppressWarnings("unused")
                                                                Releaser grouperReleaser = grouperHolder
                                                                        .increment()) {
                                                                    final AggregateResult retVal = input
                                                                            .run(queryPlusForRunners,
                                                                                    responseContext)
                                                                            .accumulate(AggregateResult.ok(),
                                                                                    accumulator);

                                                                    // Return true if OK, false if resources were exhausted.
                                                                    return retVal;
                                                                } catch (QueryInterruptedException e) {
                                                                    throw e;
                                                                } catch (Exception e) {
                                                                    log.error(e,
                                                                            "Exception with one of the sequences!");
                                                                    throw Throwables.propagate(e);
                                                                }
                                                            }
                                                        });

                                                if (isSingleThreaded) {
                                                    waitForFutureCompletion(query,
                                                            Futures.allAsList(ImmutableList.of(future)),
                                                            hasTimeout, timeoutAt - System.currentTimeMillis());
                                                }

                                                return future;
                                            }
                                        })));

                        if (!isSingleThreaded) {
                            waitForFutureCompletion(query, futures, hasTimeout,
                                    timeoutAt - System.currentTimeMillis());
                        }

                        return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {
                            @Override
                            public void close() {
                                for (Closeable closeable : Lists.reverse(resources)) {
                                    CloseQuietly.close(closeable);
                                }
                            }
                        });
                    } catch (Throwable e) {
                        // Exception caught while setting up the iterator; release resources.
                        for (Closeable closeable : Lists.reverse(resources)) {
                            CloseQuietly.close(closeable);
                        }
                        throw e;
                    }
                }

                @Override
                public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
                    iterFromMake.close();
                }
            });
}

From source file:org.thingsboard.server.dao.relation.BaseRelationService.java

@Override
public ListenableFuture<Void> deleteEntityRelationsAsync(TenantId tenantId, EntityId entityId) {
    Cache cache = cacheManager.getCache(RELATIONS_CACHE);
    log.trace("Executing deleteEntityRelationsAsync [{}]", entityId);
    validate(entityId);/*w w  w .ja v a  2 s  . c  o m*/
    List<ListenableFuture<List<EntityRelation>>> inboundRelationsList = new ArrayList<>();
    for (RelationTypeGroup typeGroup : RelationTypeGroup.values()) {
        inboundRelationsList.add(relationDao.findAllByTo(tenantId, entityId, typeGroup));
    }

    ListenableFuture<List<List<EntityRelation>>> inboundRelations = Futures.allAsList(inboundRelationsList);

    List<ListenableFuture<List<EntityRelation>>> outboundRelationsList = new ArrayList<>();
    for (RelationTypeGroup typeGroup : RelationTypeGroup.values()) {
        outboundRelationsList.add(relationDao.findAllByFrom(tenantId, entityId, typeGroup));
    }

    ListenableFuture<List<List<EntityRelation>>> outboundRelations = Futures.allAsList(outboundRelationsList);

    ListenableFuture<List<Boolean>> inboundDeletions = Futures.transformAsync(inboundRelations, relations -> {
        List<ListenableFuture<Boolean>> results = deleteRelationGroupsAsync(tenantId, relations, cache, true);
        return Futures.allAsList(results);
    });

    ListenableFuture<List<Boolean>> outboundDeletions = Futures.transformAsync(outboundRelations, relations -> {
        List<ListenableFuture<Boolean>> results = deleteRelationGroupsAsync(tenantId, relations, cache, false);
        return Futures.allAsList(results);
    });

    ListenableFuture<List<List<Boolean>>> deletionsFuture = Futures.allAsList(inboundDeletions,
            outboundDeletions);

    return Futures.transform(
            Futures.transformAsync(deletionsFuture,
                    (deletions) -> relationDao.deleteOutboundRelationsAsync(tenantId, entityId)),
            result -> null);
}

From source file:org.apache.cassandra.repair.RepairJob.java

/**
 * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially.
 *///from ww w.  ja v a2  s .  c  o m
private ListenableFuture<List<TreeResponse>> sendSequentialValidationRequest(
        Collection<InetAddress> endpoints) {
    int gcBefore = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily)
            .gcBefore(System.currentTimeMillis());
    List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size());

    Queue<InetAddress> requests = new LinkedList<>(endpoints);
    InetAddress address = requests.poll();
    ValidationTask firstTask = new ValidationTask(desc, address, gcBefore);
    logger.info("Validating {}", address);
    session.waitForValidation(Pair.create(desc, address), firstTask);
    tasks.add(firstTask);
    ValidationTask currentTask = firstTask;
    while (requests.size() > 0) {
        final InetAddress nextAddress = requests.poll();
        final ValidationTask nextTask = new ValidationTask(desc, nextAddress, gcBefore);
        tasks.add(nextTask);
        Futures.addCallback(currentTask, new FutureCallback<TreeResponse>() {
            public void onSuccess(TreeResponse result) {
                logger.info("Validating {}", nextAddress);
                session.waitForValidation(Pair.create(desc, nextAddress), nextTask);
                taskExecutor.execute(nextTask);
            }

            // failure is handled at root of job chain
            public void onFailure(Throwable t) {
            }
        });
        currentTask = nextTask;
    }
    // start running tasks
    taskExecutor.execute(firstTask);
    return Futures.allAsList(tasks);
}

From source file:com.facebook.buck.distributed.DistBuildArtifactCacheImpl.java

@Override
@SuppressWarnings("CheckReturnValue")
public void close() {
    Futures.transform(Futures.allAsList(remoteCacheContainsFutures.values()), remoteContainsResults -> {
        LOG.info(//  ww  w.  ja v  a 2  s. com
                "Hit [%d out of %d] targets checked in the remote cache. "
                        + "[%d] targets were uploaded from the local cache.",
                remoteContainsResults.stream().filter(r -> r).count(), remoteContainsResults.size(),
                localUploadFutures.size());
        return null;
    }, MoreExecutors.directExecutor());
}

From source file:com.microsoft.intellij.forms.CreateWebSiteForm.java

private void fillWebHostingPlans() {
    if (fillWebHostinPlansTaskHandle != null && !fillWebHostinPlansTaskHandle.isFinished()) {
        fillWebHostinPlansTaskHandle.cancel();
    }/* ww  w  .j a v a2  s  . c  o m*/

    webHostingPlan = null;
    webHostingPlanComboBox.setModel(new DefaultComboBoxModel(new String[] { "<Loading...>" }));

    IDEHelper.ProjectDescriptor projectDescriptor = new IDEHelper.ProjectDescriptor(project.getName(),
            project.getBasePath() == null ? "" : project.getBasePath());

    try {
        fillWebHostinPlansTaskHandle = DefaultLoader.getIdeHelper().runInBackground(projectDescriptor,
                "Loading app service plans...", null, new CancellableTask() {
                    final AzureManager manager = AzureManagerImpl.getManager();
                    final Object lock = new Object();

                    CancellationHandle cancellationHandle;
                    List<WebHostingPlan> webHostingPlans;

                    @Override
                    public synchronized void run(final CancellationHandle cancellationHandle) throws Throwable {
                        this.cancellationHandle = cancellationHandle;
                        webHostingPlans = new ArrayList<WebHostingPlan>();

                        List<ListenableFuture<Void>> webSpaceFutures = new ArrayList<ListenableFuture<Void>>();

                        for (final String webSpaceName : manager.getWebSpaces(subscription.getId())) {
                            if (cancellationHandle.isCancelled()) {
                                return;
                            }

                            final SettableFuture<Void> webSpaceFuture = SettableFuture.create();

                            DefaultLoader.getIdeHelper().executeOnPooledThread(new Runnable() {
                                @Override
                                public void run() {
                                    loadWebHostingPlans(subscription, webSpaceName, webSpaceFuture);
                                }
                            });

                            webSpaceFutures.add(webSpaceFuture);
                        }

                        try {
                            Futures.allAsList(webSpaceFutures).get();
                        } catch (InterruptedException e) {
                            throw new AzureCmdException(e.getMessage(), e);
                        } catch (ExecutionException e) {
                            throw new AzureCmdException(e.getCause().getMessage(), e.getCause());
                        }
                    }

                    @Override
                    public void onCancel() {
                    }

                    @Override
                    public void onSuccess() {
                        DefaultLoader.getIdeHelper().invokeLater(new Runnable() {
                            @Override
                            public void run() {
                                DefaultComboBoxModel appServicePlanComboModel = new DefaultComboBoxModel(
                                        webHostingPlans.toArray());
                                appServicePlanComboModel.insertElementAt(createWebHostingPlanLabel, 0);
                                appServicePlanComboModel.setSelectedItem(null);
                                webHostingPlanComboBox.setModel(appServicePlanComboModel);

                                if (!webHostingPlans.isEmpty()) {
                                    webHostingPlanComboBox.setSelectedIndex(1);
                                }
                            }
                        });
                    }

                    @Override
                    public void onError(Throwable throwable) {
                        DefaultLoader.getUIHelper().showException(
                                "An error occurred while trying to load the app service plans", throwable,
                                "Azure Services Explorer - Error Loading App Service Plans", false, true);
                    }

                    private void loadWebHostingPlans(final Subscription subscription, final String webSpace,
                            final SettableFuture<Void> webSpaceFuture) {
                        try {
                            List<WebHostingPlan> webHostingPlans = manager
                                    .getWebHostingPlans(subscription.getId(), webSpace);

                            synchronized (lock) {
                                this.webHostingPlans.addAll(webHostingPlans);
                            }

                            webSpaceFuture.set(null);
                        } catch (AzureCmdException e) {
                            webSpaceFuture.setException(e);
                        }
                    }
                });
    } catch (AzureCmdException e) {
        DefaultLoader.getUIHelper().showException(
                "An error occurred while trying to load the app service plans", e,
                "Azure Services Explorer - Error Loading App Service Plans", false, true);
    }
}

From source file:com.facebook.presto.hive.HiveSplitIterable.java

private void loadPartitionSplits(final HiveSplitQueue hiveSplitQueue, SuspendingExecutor suspendingExecutor)
        throws InterruptedException {
    final Semaphore semaphore = new Semaphore(maxPartitionBatchSize);
    try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
        ImmutableList.Builder<ListenableFuture<Void>> futureBuilder = ImmutableList.builder();

        Iterator<String> nameIterator = partitionNames.iterator();
        for (Partition partition : partitions) {
            checkState(nameIterator.hasNext(), "different number of partitions and partition names!");
            semaphore.acquire();//from  ww w .j a  v  a 2 s  . c o m
            final String partitionName = nameIterator.next();
            final Properties schema = getPartitionSchema(table, partition);
            final List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition);

            Path path = new Path(getPartitionLocation(table, partition));
            final Configuration configuration = hdfsEnvironment.getConfiguration(path);
            final InputFormat<?, ?> inputFormat = getInputFormat(configuration, schema, false);
            Path partitionPath = hdfsEnvironment.getFileSystemWrapper().wrap(path);

            FileSystem fs = partitionPath.getFileSystem(configuration);
            final LastSplitMarkingQueue markerQueue = new LastSplitMarkingQueue(hiveSplitQueue);

            if (inputFormat instanceof SymlinkTextInputFormat) {
                JobConf jobConf = new JobConf(configuration);
                FileInputFormat.setInputPaths(jobConf, partitionPath);
                InputSplit[] splits = inputFormat.getSplits(jobConf, 0);
                for (InputSplit rawSplit : splits) {
                    FileSplit split = ((SymlinkTextInputFormat.SymlinkTextInputSplit) rawSplit)
                            .getTargetSplit();

                    // get the filesystem for the target path -- it may be a different hdfs instance
                    FileSystem targetFilesystem = split.getPath().getFileSystem(configuration);
                    FileStatus fileStatus = targetFilesystem.getFileStatus(split.getPath());
                    markerQueue.addToQueue(createHiveSplits(partitionName, fileStatus,
                            targetFilesystem.getFileBlockLocations(fileStatus, split.getStart(),
                                    split.getLength()),
                            split.getStart(), split.getLength(), schema, partitionKeys, false));
                }
                markerQueue.finish();
                continue;
            }

            ListenableFuture<Void> partitionFuture = new AsyncRecursiveWalker(fs, suspendingExecutor)
                    .beginWalk(partitionPath, new FileStatusCallback() {
                        @Override
                        public void process(FileStatus file, BlockLocation[] blockLocations) {
                            if (bucket.isPresent()
                                    && !fileMatchesBucket(file.getPath().getName(), bucket.get())) {
                                return;
                            }

                            try {
                                boolean splittable = isSplittable(inputFormat,
                                        file.getPath().getFileSystem(configuration), file.getPath());

                                markerQueue.addToQueue(createHiveSplits(partitionName, file, blockLocations, 0,
                                        file.getLen(), schema, partitionKeys, splittable));
                            } catch (IOException e) {
                                hiveSplitQueue.fail(e);
                            }
                        }
                    });

            // release the semaphore when the partition finishes
            Futures.addCallback(partitionFuture, new FutureCallback<Void>() {
                @Override
                public void onSuccess(Void result) {
                    markerQueue.finish();
                    semaphore.release();
                }

                @Override
                public void onFailure(Throwable t) {
                    markerQueue.finish();
                    semaphore.release();
                }
            });
            futureBuilder.add(partitionFuture);
        }

        // when all partitions finish, mark the queue as finished
        Futures.addCallback(Futures.allAsList(futureBuilder.build()), new FutureCallback<List<Void>>() {
            @Override
            public void onSuccess(List<Void> result) {
                hiveSplitQueue.finished();
            }

            @Override
            public void onFailure(Throwable t) {
                hiveSplitQueue.fail(t);
            }
        });
    } catch (Throwable e) {
        hiveSplitQueue.fail(e);
        Throwables.propagateIfInstanceOf(e, Error.class);
    }
}

From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java

public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeAsyncBatchSet(
        final Executor executor, final AsyncFunction<List<SET>, SEQUENCE> source, List<SET> keys, Tracer tracer,
        Timeout timeout, TimeoutHandler handler) throws Exception {
    List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList();
    final Tracer childTracer = tracer.start(tracer.getGroup(), tracer.getName());
    List<SET> methodArgs = Lists.newArrayList();
    for (int i = 0; i < keys.size(); i++) {
        if (keys.get(i) != null) {
            methodArgs.add(keys.get(i));
        } else {//from ww w . java  2 s  . com
            ListenableFuture<SEQUENCE> result = source.apply(methodArgs);
            results.add(result);
            result.addListener(new Runnable() {
                @Override
                public void run() {
                    childTracer.end();
                }
            }, MoreExecutors.sameThreadExecutor());
            methodArgs = Lists.newArrayList();
        }
    }
    ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results);
    return handler.withTimeout(gatherResults(executor, gather, 1), timeout.verify(), timeout.getTickUnits());
}