Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get() throws InterruptedException, ExecutionException;

Source Link

Document

Waits if necessary for the computation to complete, and then retrieves its result.

Usage

From source file:org.thingsboard.server.controller.DeviceController.java

@PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/devices", params = { "deviceIds" }, method = RequestMethod.GET)
@ResponseBody//  w ww  .  j  a va 2s.  co  m
public List<Device> getDevicesByIds(@RequestParam("deviceIds") String[] strDeviceIds)
        throws ThingsboardException {
    checkArrayParameter("deviceIds", strDeviceIds);
    try {
        SecurityUser user = getCurrentUser();
        TenantId tenantId = user.getTenantId();
        CustomerId customerId = user.getCustomerId();
        List<DeviceId> deviceIds = new ArrayList<>();
        for (String strDeviceId : strDeviceIds) {
            deviceIds.add(new DeviceId(toUUID(strDeviceId)));
        }
        ListenableFuture<List<Device>> devices;
        if (customerId == null || customerId.isNullUid()) {
            devices = deviceService.findDevicesByTenantIdAndIdsAsync(tenantId, deviceIds);
        } else {
            devices = deviceService.findDevicesByTenantIdCustomerIdAndIdsAsync(tenantId, customerId, deviceIds);
        }
        return checkNotNull(devices.get());
    } catch (Exception e) {
        throw handleException(e);
    }
}

From source file:org.opendaylight.alto.services.provider.simple.SimpleAltoService.java

@Override
public PropertyResponse getEndpointProperty(PropertyRequest request) {
    InstanceIdentifier<EndpointPropertyMap> eiid = getEndpointPropertyMapIID();
    m_logger.info("EndpointPropertyMap IID: {}", eiid);
    updatePrivateNetworkMap();//from ww  w .  j av  a 2 s  .  c om

    try {
        ReadOnlyTransaction tx = m_db.newReadOnlyTransaction();
        ListenableFuture<Optional<EndpointPropertyMap>> result = tx.read(LogicalDatastoreType.CONFIGURATION,
                eiid);
        if (result.get().isPresent()) {
            EndpointPropertyMap epm = result.get().get();
            ObjectNode node = m_mapper.valueToTree(epm);
            m_logger.info(m_mapper.writeValueAsString(epm));

            RFC7285EndpointPropertyMap endpointPropMap = m_epmconverter.convert(node);
            RFC7285EndpointPropertyMap ret = new RFC7285EndpointPropertyMap();
            ret.meta = endpointPropMap.meta;
            ret.meta.netmap_tags = getDependentTags(endpointPropMap.meta, request.properties);
            for (String addr : request.endpoints) {
                Map<String, String> newProps = new LinkedHashMap<String, String>();
                if (endpointPropMap.map.containsKey(addr.toLowerCase())) {
                    Map<String, String> props = endpointPropMap.map.get(addr);
                    for (String type : request.properties) {
                        if (props.containsKey(type)) {
                            newProps.put(type, props.get(type));
                        }
                    }
                } else if (request.properties.contains(PRIV_ENDPOINT_PROPERTY_NAME)) {
                    newProps = getPrivateEndpointProperty(addr);
                }
                if (!newProps.isEmpty())
                    ret.map.put(addr, newProps);
            }
            return j_mapper.asPropertyResponse(j_mapper.asJSON(ret));
        } else {
            m_logger.info("Failed to read with eiid: {}", eiid);
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
    return null;
}

From source file:org.waveprotocol.box.server.waveserver.WaveletContainerImpl.java

protected void persist(final HashedVersion version, final ImmutableSet<String> domainsToNotify) {
    Preconditions.checkState(writeLock.isHeldByCurrentThread(), "must hold write lock");
    final ListenableFuture<Void> result = waveletState.persist(version);
    result.addListener(new Runnable() {
        @Override//from w w w  . j  a  v  a  2 s.  c o m
        public void run() {
            try {
                result.get();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            } catch (ExecutionException e) {
                LOG.severe("Version " + version, e);
            }
            acquireWriteLock();
            try {
                waveletState.flush(version);
                notifyOfCommit(version, domainsToNotify);
            } finally {
                releaseWriteLock();
            }
        }
    }, storageContinuationExecutor);
}

From source file:org.jclouds.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

protected void prepareUploadPart(final String container, final String key, final String uploadId,
        final Integer part, final Payload payload, final long offset, final long size,
        final SortedMap<Integer, String> etags, final BlockingQueue<Integer> activeParts,
        final Map<Integer, ListenableFuture<String>> futureParts, final AtomicInteger errors,
        final int maxRetries, final Map<Integer, Exception> errorMap, final Queue<Part> toRetry,
        final CountDownLatch latch) {
    if (errors.get() > maxRetries) {
        activeParts.remove(part); // remove part from the bounded-queue without blocking
        latch.countDown();/*from   w  ww. ja  va2  s.  c o m*/
        return;
    }
    final S3Client client = blobstore.getContext().unwrapApi(S3Client.class);
    final Payload chunkedPart = slicer.slice(payload, offset, size);
    logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key,
            container, uploadId));
    final long start = System.currentTimeMillis();
    final ListenableFuture<String> futureETag = executor.submit(new Callable<String>() {
        @Override
        public String call() throws Exception {
            return client.uploadPart(container, key, part, uploadId, chunkedPart);
        }
    });
    futureETag.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                etags.put(part, futureETag.get());
                logger.debug(
                        String.format("async uploaded part %s of %s to container %s in %sms with uploadId %s",
                                part, key, container, System.currentTimeMillis() - start, uploadId));
            } catch (CancellationException e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
                        e.getMessage(), part, offset, size, container, uploadId,
                        System.currentTimeMillis() - start);
                logger.debug(message);
            } catch (Exception e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
                        e.getMessage(), part, offset, size, container, uploadId,
                        System.currentTimeMillis() - start);
                logger.error(message, e);
                if (errors.incrementAndGet() <= maxRetries)
                    toRetry.add(new Part(part, offset, size));
            } finally {
                activeParts.remove(part); // remove part from the bounded-queue without blocking
                futureParts.remove(part);
                latch.countDown();
            }
        }
    }, executor);
    futureParts.put(part, futureETag);
}

From source file:org.apache.hadoop.fs.s3a.S3AFastOutputStream.java

private void putObject() throws IOException {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key);
    final ObjectMetadata om = createDefaultMetadata();
    final int size = buffer.size();
    om.setContentLength(size);/*from www. j  a v a 2s.  c  om*/
    final PutObjectRequest putObjectRequest = fs.newPutObjectRequest(key, om,
            new ByteArrayInputStream(buffer.toByteArray()));
    putObjectRequest.setGeneralProgressListener(progressListener);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
            fs.incrementPutStartStatistics(size);
            return client.putObject(putObjectRequest);
        }
    });
    //wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload: {}", ie, ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw extractException("regular upload", key, ee);
    }
}

From source file:io.datakernel.service.ServiceGraph.java

private ListenableFuture<LongestPath> processNode(final Object node, final boolean start,
        Map<Object, ListenableFuture<LongestPath>> futures, final Executor executor) {
    List<ListenableFuture<LongestPath>> dependencyFutures = new ArrayList<>();
    for (Object dependencyNode : (start ? forwards : backwards).get(node)) {
        ListenableFuture<LongestPath> dependencyFuture = processNode(dependencyNode, start, futures, executor);
        dependencyFutures.add(dependencyFuture);
    }//from ww w.  j  a v a 2  s.  c o  m

    if (futures.containsKey(node)) {
        return futures.get(node);
    }

    final SettableFuture<LongestPath> future = SettableFuture.create();
    futures.put(node, future);

    final ListenableFuture<LongestPath> dependenciesFuture = combineDependenciesFutures(dependencyFutures,
            executor);

    dependenciesFuture.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                final LongestPath longestPath = dependenciesFuture.get();

                Service service = services.get(node);
                if (service == null) {
                    logger.debug("...skipping no-service node: " + nodeToString(node));
                    future.set(longestPath);
                    return;
                }

                if (!start && !runningNodes.contains(node)) {
                    logger.debug("...skipping not running node: " + nodeToString(node));
                    future.set(longestPath);
                    return;
                }

                final Stopwatch sw = Stopwatch.createStarted();
                final ListenableFuture<?> serviceFuture = (start ? service.start() : service.stop());
                logger.info((start ? "Starting" : "Stopping") + " node: " + nodeToString(node));
                serviceFuture.addListener(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            serviceFuture.get();

                            if (start) {
                                runningNodes.add(node);
                            } else {
                                runningNodes.remove(node);
                            }

                            long elapsed = sw.elapsed(MILLISECONDS);
                            logger.info((start ? "Started" : "Stopped") + " " + nodeToString(node)
                                    + (elapsed >= 1L ? (" in " + sw) : ""));
                            future.set(
                                    new LongestPath(elapsed + (longestPath != null ? longestPath.totalTime : 0),
                                            elapsed, node, longestPath));
                        } catch (InterruptedException | ExecutionException e) {
                            logger.error("error: " + nodeToString(node), e);
                            future.setException(getRootCause(e));
                        }
                    }
                }, executor);
            } catch (InterruptedException | ExecutionException e) {
                future.setException(getRootCause(e));
            }
        }
    }, executor);

    return future;
}

From source file:org.opendaylight.faas.fabric.general.FabricManagementAPIProvider.java

private long genNextFabricNum() {
    long ret = 1;

    FabricsSettingBuilder settingBuilder = new FabricsSettingBuilder();
    ReadWriteTransaction trans = dataBroker.newReadWriteTransaction();

    InstanceIdentifier<FabricsSetting> fabricImplPath = InstanceIdentifier.create(FabricsSetting.class);
    ListenableFuture<Optional<FabricsSetting>> readFuture = trans.read(LogicalDatastoreType.CONFIGURATION,
            fabricImplPath);/*from w  w w .  jav a  2s  .  co  m*/
    Optional<FabricsSetting> optional;
    try {
        optional = readFuture.get();
        if (optional.isPresent()) {
            ret = optional.get().getNextFabricNum();
        }
    } catch (InterruptedException | ExecutionException e) {
        LOG.error("can not read fabric setting", e);
    }

    settingBuilder.setNextFabricNum(ret + 1);
    trans.put(LogicalDatastoreType.CONFIGURATION, fabricImplPath, settingBuilder.build());
    MdSalUtils.wrapperSubmit(trans, executor);

    return ret;
}

From source file:c5db.tablet.TabletService.java

@Override
protected void doStart() {

    fiber.start();/*from  ww w . j av  a  2 s . c om*/
    fiber.execute(() -> {
        ListenableFuture<C5Module> discoveryService = server.getModule(ModuleType.Discovery);
        ListenableFuture<C5Module> controlService = server.getModule(ModuleType.ControlRpc);

        try {
            discoveryModule = (DiscoveryModule) discoveryService.get();
            controlModule = (ControlModule) controlService.get();
        } catch (InterruptedException | ExecutionException e) {
            notifyFailed(e);
            return;
        }

        ListenableFuture<C5Module> replicatorService = server.getModule(ModuleType.Replication);
        Futures.addCallback(replicatorService, new FutureCallback<C5Module>() {
            @Override
            public void onSuccess(@NotNull C5Module result) {
                replicationModule = (ReplicationModule) result;
                fiber.execute(() -> {
                    tabletRegistry = new TabletRegistry(server, server.getConfigDirectory(), conf,
                            getTabletStateChanges(), replicationModule, ReplicatedTablet::new,
                            (basePath, regionInfo, tableDescriptor, log, conf) -> {
                                HRegionServicesBridge hRegionBridge = new HRegionServicesBridge(conf);
                                return new HRegionBridge(
                                        HRegion.openHRegion(new org.apache.hadoop.fs.Path(basePath.toString()),
                                                regionInfo, tableDescriptor, log, conf, hRegionBridge, null));
                            });
                    try {
                        startBootstrap();
                        notifyStarted();
                    } catch (Exception e) {
                        notifyFailed(e);
                    }
                });

            }

            @Override
            public void onFailure(@NotNull Throwable t) {
                notifyFailed(t);
            }
        }, fiber);
    });

}

From source file:co.cask.cdap.app.runtime.spark.SparkRuntimeService.java

@Override
protected void shutDown() throws Exception {
    // Try to get from the submission future to see if the job completed successfully.
    ListenableFuture<RunId> jobCompletion = completion.get();
    boolean succeeded = true;
    try {/*  ww  w  . j  a va  2s. c  o  m*/
        jobCompletion.get();
    } catch (Exception e) {
        succeeded = false;
    }

    try {
        onFinish(succeeded, new BasicSparkClientContext(runtimeContext));
    } finally {
        cleanupTask.run();
        LOG.debug("Spark program completed: {}", runtimeContext);
    }
}

From source file:de.xaniox.heavyspleef.core.HeavySpleef.java

public void disable() {
    gameManager.shutdown();//from w w w  .  j  ava2s  . co m
    ListenableFuture<?> future = databaseHandler.saveGames(gameManager.getGames(), null);

    try {
        //Wait for the task to be completed, as Bukkit  
        //does not mind async thread and just kills them
        future.get();
    } catch (InterruptedException e) {
        logger.log(Level.SEVERE, "Server-Thread interrupted while saving games to database", e);
    } catch (ExecutionException e) {
        logger.log(Level.SEVERE, "Could not save games to database", e);
    }

    HandlerList.unregisterAll(plugin);
    moduleManager.disableModules();
}