Example usage for com.google.common.util.concurrent Futures getUnchecked

List of usage examples for com.google.common.util.concurrent Futures getUnchecked

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures getUnchecked.

Prototype

@GwtIncompatible("TODO")
public static <V> V getUnchecked(Future<V> future) 

Source Link

Document

Returns the result of calling Future#get() uninterruptibly on a task known not to throw a checked exception.

Usage

From source file:com.continuuity.weave.discovery.ZKDiscoveryService.java

private void updateService(NodeChildren children, final String service) {
    final String sb = "/" + service;
    final Multimap<String, Discoverable> newServices = HashMultimap.create(services.get());
    newServices.removeAll(service);/*from   ww w  .j  a v a  2  s  .c om*/

    // Fetch data of all children nodes in parallel.
    List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.getChildren().size());
    for (String child : children.getChildren()) {
        String path = sb + "/" + child;
        dataFutures.add(zkClient.getData(path));
    }

    // Update the service map when all fetching are done.
    final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
    fetchFuture.addListener(new Runnable() {
        @Override
        public void run() {
            for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
                // For successful fetch, decode the content.
                if (nodeData != null) {
                    Discoverable discoverable = decode(nodeData.getData());
                    if (discoverable != null) {
                        newServices.put(service, discoverable);
                    }
                }
            }
            // Replace the local service register with changes.
            services.set(newServices);
        }
    }, Threads.SAME_THREAD_EXECUTOR);
}

From source file:com.dogecoin.dogecoinj.examples.ExamplePaymentChannelClient.java

private void waitForSufficientBalance(Coin amount) {
    // Not enough money in the wallet.
    Coin amountPlusFee = amount.add(Wallet.SendRequest.DEFAULT_FEE_PER_KB);
    // ESTIMATED because we don't really need to wait for confirmation.
    ListenableFuture<Coin> balanceFuture = appKit.wallet().getBalanceFuture(amountPlusFee,
            Wallet.BalanceType.ESTIMATED);
    if (!balanceFuture.isDone()) {
        System.out//from  w w w.  ja va  2  s .  c om
                .println("Please send " + amountPlusFee.toFriendlyString() + " to " + myKey.toAddress(params));
        Futures.getUnchecked(balanceFuture);
    }
}

From source file:org.onosproject.store.primitives.impl.DatabaseManager.java

@Activate
public void activate() {
    localNodeId = clusterService.getLocalNode().id();

    Map<PartitionId, Set<NodeId>> partitionMap = Maps.newHashMap();
    clusterMetadataService.getClusterMetadata().getPartitions().forEach(p -> {
        partitionMap.put(p.getId(), Sets.newHashSet(p.getMembers()));
    });//  w  w w  .  j a v  a2s . c om

    String[] activeNodeUris = partitionMap.values().stream().reduce((s1, s2) -> Sets.union(s1, s2)).get()
            .stream().map(this::nodeIdToUri).toArray(String[]::new);

    String localNodeUri = nodeIdToUri(clusterMetadataService.getLocalNode().id());
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig = new ClusterConfig().withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris)).withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig = new CopycatConfig().withName("onos").withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer()).withDefaultExecutor(
                    Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    Function<PartitionId, Log> logFunction = id -> id.asInt() == 0 ? newInMemoryLog() : newPersistentLog();

    Map<PartitionId, Database> databases = Maps.transformEntries(partitionMap, (k, v) -> {
        String[] replicas = v.stream().map(this::nodeIdToUri).toArray(String[]::new);
        DatabaseConfig config = newDatabaseConfig(String.format("p%s", k), logFunction.apply(k), replicas);
        return coordinator.<Database>getResource(config.getName(),
                config.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                        .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
    });

    inMemoryDatabase = databases.remove(PartitionId.from(0));

    partitionedDatabase = new PartitionedDatabase("onos-store", databases.values());

    CompletableFuture<Void> status = coordinator.open().thenCompose(v -> CompletableFuture
            .allOf(inMemoryDatabase.open(), partitionedDatabase.open()).whenComplete((db, error) -> {
                if (error != null) {
                    log.error("Failed to initialize database.", error);
                } else {
                    log.info("Successfully initialized database.");
                }
            }));

    Futures.getUnchecked(status);

    AsyncConsistentMap<TransactionId, Transaction> transactions = this
            .<TransactionId, Transaction>consistentMapBuilder().withName("onos-transactions")
            .withSerializer(Serializer.using(KryoNamespaces.API, MapUpdate.class, MapUpdate.Type.class,
                    Transaction.class, Transaction.State.class))
            .buildAsyncMap();

    transactionManager = new TransactionManager(partitionedDatabase, transactions);
    partitionedDatabase.setTransactionManager(transactionManager);

    log.info("Started");
}

From source file:co.cask.tigon.internal.app.runtime.distributed.AbstractProgramTwillRunnable.java

@Override
public void destroy() {
    LOG.info("Releasing resources: {}", name);
    Futures.getUnchecked(Services.chainStop(resourceReporter, metricsCollectionService, zkClientService));
    LOG.info("Runnable stopped: {}", name);
}

From source file:com.palantir.atlasdb.keyvalue.impl.TieredKeyValueService.java

@Override
public void truncateTable(final String tableName) {
    if (isNotTiered(tableName)) {
        primary.truncateTable(tableName);
        return;/* w  w w  .  j  a va 2s.c o  m*/
    }
    Future<?> primaryFuture = executor.submit(new Runnable() {
        @Override
        public void run() {
            primary.truncateTable(tableName);
        }
    });
    secondary.truncateTable(tableName);
    Futures.getUnchecked(primaryFuture);
}

From source file:org.onosproject.store.consistent.impl.DatabaseManager.java

@Activate
public void activate() {
    localNodeId = clusterService.getLocalNode().id();
    // load database configuration
    File databaseDefFile = new File(PARTITION_DEFINITION_FILE);
    log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath());

    Map<String, Set<NodeInfo>> partitionMap;
    try {//from  w  ww. ja  va  2s  .  co  m
        DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile);
        if (!databaseDefFile.exists()) {
            createDefaultDatabaseDefinition(databaseDefStore);
        }
        partitionMap = databaseDefStore.read().getPartitions();
    } catch (IOException e) {
        throw new IllegalStateException("Failed to load database config", e);
    }

    String[] activeNodeUris = partitionMap.values().stream().reduce((s1, s2) -> Sets.union(s1, s2)).get()
            .stream().map(this::nodeToUri).toArray(String[]::new);

    String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode()));
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig = new ClusterConfig().withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris)).withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig = new CopycatConfig().withName("onos").withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer()).withDefaultExecutor(
                    Executors.newSingleThreadExecutor(new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    DatabaseConfig inMemoryDatabaseConfig = newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(),
            activeNodeUris);
    inMemoryDatabase = coordinator.getResource(inMemoryDatabaseConfig.getName(),
            inMemoryDatabaseConfig.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                    .withDefaultExecutor(copycatConfig.getDefaultExecutor()));

    List<Database> partitions = partitionMap.entrySet().stream().map(entry -> {
        String[] replicas = entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new);
        return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas);
    }).map(config -> {
        Database db = coordinator.getResource(config.getName(),
                config.resolve(clusterConfig).withSerializer(copycatConfig.getDefaultSerializer())
                        .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
        return db;
    }).collect(Collectors.toList());

    partitionedDatabase = new PartitionedDatabase("onos-store", partitions);

    CompletableFuture<Void> status = coordinator.open().thenCompose(v -> CompletableFuture
            .allOf(inMemoryDatabase.open(), partitionedDatabase.open()).whenComplete((db, error) -> {
                if (error != null) {
                    log.error("Failed to initialize database.", error);
                } else {
                    log.info("Successfully initialized database.");
                }
            }));

    Futures.getUnchecked(status);

    transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder());
    partitionedDatabase.setTransactionManager(transactionManager);

    log.info("Started");
}

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.node.FlowCapableNodeConnectorListener.java

private Map<Name, Endpoint> readEpsWithOfOverlayAugByPortName(ReadTransaction rTx) {
    Optional<Endpoints> potentialEps = Futures
            .getUnchecked(rTx.read(LogicalDatastoreType.OPERATIONAL, endpointsIid));
    if (!potentialEps.isPresent() || potentialEps.get().getEndpoint() == null) {
        return Collections.emptyMap();
    }//from   ww w .j a  va  2  s . com
    Map<Name, Endpoint> epsByPortName = new HashMap<>();
    for (Endpoint ep : potentialEps.get().getEndpoint()) {
        OfOverlayContext ofOverlayEp = ep.getAugmentation(OfOverlayContext.class);
        if (ofOverlayEp != null && ofOverlayEp.getPortName() != null) {
            epsByPortName.put(ofOverlayEp.getPortName(), ep);
        }
    }
    return epsByPortName;
}

From source file:org.onosproject.store.primitives.impl.StorageManager.java

@Override
public Collection<TransactionId> getPendingTransactions() {
    return Futures.getUnchecked(transactions.keySet());
}

From source file:com.facebook.buck.distributed.DistBuildArtifactCacheImpl.java

@Override
public synchronized void prewarmRemoteContains(ImmutableSet<BuildRule> rulesToBeChecked) {
    @SuppressWarnings("PMD.PrematureDeclaration")
    Stopwatch stopwatch = Stopwatch.createStarted();
    Set<BuildRule> unseenRules = rulesToBeChecked.stream()
            .filter(rule -> !remoteCacheContainsFutures.containsKey(rule)).collect(Collectors.toSet());

    if (unseenRules.isEmpty()) {
        return;/*from  w ww. ja  v a  2 s . co  m*/
    }

    LOG.info("Checking remote cache for [%d] new rules.", unseenRules.size());
    Map<BuildRule, ListenableFuture<RuleKey>> rulesToKeys = Maps.asMap(unseenRules,
            rule -> ruleKeyCalculator.calculate(eventBus, rule));

    ListenableFuture<Map<RuleKey, CacheResult>> keysToCacheResultFuture = Futures
            .transformAsync(Futures.allAsList(rulesToKeys.values()), ruleKeys -> {
                LOG.info("Computing RuleKeys for %d new rules took %dms.", unseenRules.size(),
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                stopwatch.start();
                return multiContainsAsync(ruleKeys);
            }, executorService);

    Map<BuildRule, ListenableFuture<Boolean>> containsResultsForUnseenRules = Maps
            .asMap(unseenRules,
                    rule -> Futures.transform(keysToCacheResultFuture, keysToCacheResult -> Objects
                            .requireNonNull(keysToCacheResult.get(Futures.getUnchecked(rulesToKeys.get(rule))))
                            .getType().isSuccess(), MoreExecutors.directExecutor()));

    remoteCacheContainsFutures.putAll(containsResultsForUnseenRules);
    Futures.allAsList(containsResultsForUnseenRules.values())
            .addListener(() -> LOG.info("Checking the remote cache for %d rules took %dms.", unseenRules.size(),
                    stopwatch.elapsed(TimeUnit.MILLISECONDS)), MoreExecutors.directExecutor());
}

From source file:com.continuuity.weave.internal.appmaster.RunningContainers.java

/**
 * Stops all running services. Only called when the AppMaster stops.
 *///from  w  ww. j  av  a2 s. c o m
void stopAll() {
    containerLock.lock();
    try {
        // Stop it one by one in reverse order of start sequence
        Iterator<String> itor = startSequence.descendingIterator();
        List<ListenableFuture<ServiceController.State>> futures = Lists.newLinkedList();
        while (itor.hasNext()) {
            String runnableName = itor.next();
            LOG.info("Stopping all instances of " + runnableName);

            futures.clear();
            // Parallel stops all running containers of the current runnable.
            for (WeaveContainerController controller : containers.row(runnableName).values()) {
                futures.add(controller.stop());
            }
            // Wait for containers to stop. Assumes the future returned by Futures.successfulAsList won't throw exception.
            Futures.getUnchecked(Futures.successfulAsList(futures));

            LOG.info("Terminated all instances of " + runnableName);
        }
        containers.clear();
        runnableInstances.clear();
    } finally {
        containerLock.unlock();
    }
}