Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get() throws InterruptedException, ExecutionException;

Source Link

Document

Waits if necessary for the computation to complete, and then retrieves its result.

Usage

From source file:dmg.cells.services.CoreRoutingManager.java

private void notifyDownstreamOfDomainDeath() {
    canary = null;//from   ww  w.  j  av  a 2s.  c  o  m

    ListenableFuture<List<CellMessage>> future;
    synchronized (this) {
        future = sendToPeers(new PeerShutdownNotification(getCellDomainName()), satelliteTunnels.values(),
                7000);
    }
    try {
        future.get();
    } catch (ExecutionException e) {
        LOG.info("Failed to notify downstream of shutdown: {}", e.toString());
    } catch (InterruptedException ignored) {
    }
}

From source file:org.sosy_lab.cpachecker.core.algorithm.ParallelAlgorithm.java

private void handleFutureResults(List<ListenableFuture<ParallelAnalysisResult>> futures)
        throws InterruptedException, Error, CPAException {

    List<CPAException> exceptions = new ArrayList<>();
    for (ListenableFuture<ParallelAnalysisResult> f : Futures.inCompletionOrder(futures)) {
        try {//from  w w  w.j av a 2  s  . c  om
            ParallelAnalysisResult result = f.get();
            if (result.hasValidReachedSet() && finalResult == null) {
                finalResult = result;
                stats.successfulAnalysisName = result.getAnalysisName();

                // cancel other computations
                futures.forEach(future -> future.cancel(true));
                logger.log(Level.INFO, result.getAnalysisName() + " finished successfully.");
                shutdownManager.requestShutdown(SUCCESS_MESSAGE);
            } else if (!result.hasValidReachedSet()) {
                logger.log(Level.INFO, result.getAnalysisName() + " finished without usable result.");
            }
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            if (cause instanceof CPAException) {
                if (cause.getMessage().contains("recursion")) {
                    logger.logUserException(Level.WARNING, cause, "Analysis not completed due to recursion");
                }
                if (cause.getMessage().contains("pthread_create")) {
                    logger.logUserException(Level.WARNING, cause, "Analysis not completed due to concurrency");
                }
                exceptions.add((CPAException) cause);

            } else {
                // cancel other computations
                futures.forEach(future -> future.cancel(true));
                shutdownManager.requestShutdown("cancelling all remaining analyses");
                throw new CPAException("An unexpected exception occured", cause);
            }
        } catch (CancellationException e) {
            // do nothing, this is normal if we cancel other analyses
        }
    }

    // we do not have any result, so we propagate the found CPAExceptions upwards
    if (finalResult == null && !exceptions.isEmpty()) {
        if (exceptions.size() == 1) {
            throw Iterables.getOnlyElement(exceptions);
        } else {
            throw new CompoundException("Several exceptions occured during the analysis", exceptions);
        }
    }
}

From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java

private void putObject() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key);
    }//from ww  w .j  a  v  a 2  s .  com
    final ObjectMetadata om = createDefaultMetadata();
    om.setContentLength(buffer.size());
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
            new ByteArrayInputStream(buffer.toByteArray()), om);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setGeneralProgressListener(progressListener);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
            return client.putObject(putObjectRequest);
        }
    });
    //wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload:" + ie, ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw new IOException("Regular upload failed", ee.getCause());
    }
}

From source file:io.datakernel.service.ServiceGraph.java

private ListenableFuture<?> actionInThread(final boolean start, final Collection<Object> rootNodes) {
    final SettableFuture<?> resultFuture = SettableFuture.create();
    final ExecutorService executor = newSingleThreadExecutor();
    executor.execute(new Runnable() {
        @Override/*from   w  ww.j av a 2s  .  co m*/
        public void run() {
            Map<Object, ListenableFuture<LongestPath>> futures = new HashMap<>();
            List<ListenableFuture<LongestPath>> rootFutures = new ArrayList<>();
            for (Object rootNode : rootNodes) {
                rootFutures.add(processNode(rootNode, start, futures, executor));
            }
            final ListenableFuture<LongestPath> rootFuture = combineDependenciesFutures(rootFutures, executor);
            rootFuture.addListener(new Runnable() {
                @Override
                public void run() {
                    try {
                        LongestPath longestPath = rootFuture.get();
                        StringBuilder sb = new StringBuilder();
                        printLongestPath(sb, longestPath);
                        if (sb.length() != 0)
                            sb.deleteCharAt(sb.length() - 1);
                        logger.info("Longest path:\n" + sb);
                        resultFuture.set(null);
                        executor.shutdown();
                    } catch (InterruptedException | ExecutionException e) {
                        resultFuture.setException(getRootCause(e));
                        executor.shutdown();
                    }
                }
            }, executor);
        }
    });
    return resultFuture;
}

From source file:org.opendaylight.atrium.routingservice.config.RoutingConfigServiceImpl.java

@Override
public Set<AtriumInterface> getInterfaces() {

    Set<AtriumInterface> interfaceSet = new HashSet<>();
    ReadOnlyTransaction readOnlyTransaction = dataBroker.newReadOnlyTransaction();
    InstanceIdentifier<Addresses> addressBuilder = InstanceIdentifier.builder(Addresses.class).build();
    Addresses addresses = null;/*from   w ww.j  a v a 2  s. co m*/

    // Cautious wait to ensure data is filled..
    try {
        Thread.sleep(250);
    } catch (InterruptedException e1) {
        e1.printStackTrace();
    }

    try {
        ListenableFuture<Optional<Addresses>> lfONT;
        lfONT = readOnlyTransaction.read(LogicalDatastoreType.CONFIGURATION, addressBuilder);
        Optional<Addresses> oNT = lfONT.get();
        if (oNT.isPresent()) {
            addresses = oNT.get();
        } else {
            log.warn("Coudn't get addresses in data store..");
        }
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    } finally {
        readOnlyTransaction.close();
    }

    for (Address address : addresses.getAddress()) {

        NodeId nodeId = new NodeId(address.getDpid());
        NodeConnectorId connectorId = new NodeConnectorId(
                address.getDpid() + ":" + address.getOfPortId().getValue());

        InstanceIdentifier<NodeConnector> instanceIdentifier = InstanceIdentifier.builder(Nodes.class)
                .child(Node.class, new NodeKey(nodeId))
                .child(NodeConnector.class, new NodeConnectorKey(connectorId)).build();

        NodeConnector nodeConnector = null;
        try {
            ListenableFuture<Optional<NodeConnector>> lfONT;
            lfONT = readOnlyTransaction.read(LogicalDatastoreType.OPERATIONAL, instanceIdentifier);
            Optional<NodeConnector> oNT = lfONT.get();
            if (oNT.isPresent()) {
                nodeConnector = oNT.get();
            } else {
                log.warn("Coudn't get Node Connector {} in data store", connectorId);
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        } finally {
            readOnlyTransaction.close();
        }

        MacAddress mac = address.getMac();
        AtriumVlanId vlanId = AtriumVlanId.vlanId(address.getVlan().shortValue());

        IpAddress ipAddress = address.getIpAddress();
        AtriumIp4Address ip4Address = AtriumIp4Address.valueOf(ipAddress.getIpv4Address().getValue());

        // TODO
        // Include subnet in yang
        AtriumIp4Prefix ip4Prefix = AtriumIp4Prefix.valueOf(ip4Address.getIp4Address().toString() + "/24");
        AtriumInterfaceIpAddress interfaceIpAddress = new AtriumInterfaceIpAddress(ip4Address, ip4Prefix);
        Set<AtriumInterfaceIpAddress> interfaceIpAddressSet = new HashSet<>();
        interfaceIpAddressSet.add(interfaceIpAddress);

        AtriumInterface matchingInterface = new AtriumInterface(nodeConnector, interfaceIpAddressSet, mac,
                vlanId);
        interfaceSet.add(matchingInterface);
    }
    return interfaceSet;
}

From source file:eu.esdihumboldt.hale.app.transform.ExecTransformation.java

private int transform() throws InterruptedException, ExecutionException {
    status("Running hale transformation...");

    TransformationSettings settings = new DefaultTransformationSettings();
    // TODO make settings configurable?

    // run transformation
    ListenableFuture<Boolean> res = Transformation.transform(sources, target, env, reportHandler, id,
            validators, context.getFilters(), settings);

    boolean orgSuccess = res.get();

    // Job threads might still be active, wait a moment to allow
    // them to complete and file their report (otherwise error may get lost)
    try {//from ww w.  j a  v a  2 s .  c o m
        Thread.sleep(3000);
    } catch (Throwable e) {
        // ignore
    }

    boolean success;
    try {
        success = evaluateSuccess(orgSuccess);
    } catch (Throwable e) {
        error("Success evaluation resulted in an error:\n" + e.getMessage());
        if (context.isLogException()) {
            e.printStackTrace();
        }
        return 2;
    }

    if (success) {
        info("Transformation completed. Please check the reports for more details.");
    } else {
        if (orgSuccess) {
            error("Transformation failed according to the success evaluation script.");
            return 2;
        } else {
            error("Transformation failed, please check the reports for details.");
            return 1;
        }
    }

    // exit OK
    return 0;
}

From source file:org.opendaylight.atrium.routingservice.config.RoutingConfigServiceImpl.java

@Override
public AtriumInterface getMatchingInterface(IpAddress ipAddress) {

    AtriumInterface matchingInterface = null;
    ReadOnlyTransaction readOnlyTransaction = dataBroker.newReadOnlyTransaction();

    BgpPeerKey bgpPeerKey = new BgpPeerKey(ipAddress);
    InstanceIdentifier<BgpPeer> bgpPeersBuilder = InstanceIdentifier.builder(BgpPeers.class)
            .child(BgpPeer.class, bgpPeerKey).build();

    String peerDpId = null;/*ww  w  .  j  av a 2 s  .  c  om*/
    String peerPort = null;
    try {
        ListenableFuture<Optional<BgpPeer>> lfONT;
        lfONT = readOnlyTransaction.read(LogicalDatastoreType.CONFIGURATION, bgpPeersBuilder);
        Optional<BgpPeer> oNT = lfONT.get();
        BgpPeer bgpPeer = oNT.get();
        peerDpId = bgpPeer.getPeerDpId().getValue();
        peerPort = bgpPeer.getPeerPort().toString();
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    } finally {
        readOnlyTransaction.close();
    }

    Addresses addresses = null;
    InstanceIdentifier<Addresses> addressBuilder = InstanceIdentifier.builder(Addresses.class).build();
    try {
        ListenableFuture<Optional<Addresses>> lfONT;
        lfONT = readOnlyTransaction.read(LogicalDatastoreType.CONFIGURATION, addressBuilder);
        Optional<Addresses> oNT = lfONT.get();
        addresses = oNT.get();
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    } finally {
        readOnlyTransaction.close();
    }

    for (Address address : addresses.getAddress()) {

        String addressDpId = address.getDpid();
        String addressPort = address.getOfPortId().getValue();

        if (peerDpId.equals(addressDpId) && peerPort.equals(addressPort)) {

            MacAddress mac = address.getMac();
            AtriumVlanId vlanId = AtriumVlanId.vlanId(address.getVlan().shortValue());

            AtriumIp4Address ip4Address = AtriumIp4Address
                    .valueOf(address.getIpAddress().getIpv4Address().getValue());

            // TODO
            // Include subnet in yang
            AtriumIp4Prefix ip4Prefix = AtriumIp4Prefix.valueOf(ip4Address.getIp4Address().toString() + "/24");
            AtriumInterfaceIpAddress interfaceIpAddress = new AtriumInterfaceIpAddress(ip4Address, ip4Prefix);
            Set<AtriumInterfaceIpAddress> interfaceIpAddressSet = new HashSet<>();
            interfaceIpAddressSet.add(interfaceIpAddress);

            NodeId nodeId = new NodeId(address.getDpid());
            NodeConnectorId connectorId = new NodeConnectorId(
                    address.getDpid() + ":" + address.getOfPortId().getValue());

            InstanceIdentifier<NodeConnector> instanceIdentifier = InstanceIdentifier.builder(Nodes.class)
                    .child(Node.class, new NodeKey(nodeId))
                    .child(NodeConnector.class, new NodeConnectorKey(connectorId)).build();

            NodeConnector nodeConnector = null;
            try {
                ListenableFuture<Optional<NodeConnector>> lfONT;
                lfONT = readOnlyTransaction.read(LogicalDatastoreType.OPERATIONAL, instanceIdentifier);
                Optional<NodeConnector> oNT = lfONT.get();
                nodeConnector = oNT.get();
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            } finally {
                readOnlyTransaction.close();
            }

            matchingInterface = new AtriumInterface(nodeConnector, interfaceIpAddressSet, mac, vlanId);
            return matchingInterface;
        }
    }
    return null;
}

From source file:com.continuuity.loom.scheduler.SolverScheduler.java

@Override
public void run() {
    try {//from   w w w . j  a va2s. c o  m
        while (true) {
            final GroupElement gElement = solverQueues.take(id);
            if (gElement == null) {
                return;
            }
            final Element solveElement = gElement.getElement();

            final ListenableFuture<String> future = executorService.submit(new SolverRunner(gElement));
            future.addListener(new Runnable() {
                @Override
                public void run() {
                    try {
                        solverQueues.recordProgress(id, gElement.getQueueName(), solveElement.getId(),
                                TrackingQueue.ConsumingStatus.FINISHED_SUCCESSFULLY, future.get());
                    } catch (Exception e) {
                        LOG.error("Unable to record progress for cluster {}", solveElement.getId());
                    }
                }
            }, executorService);
        }
    } catch (Exception e) {
        LOG.error("Got exception:", e);
    }
}

From source file:org.apache.tez.runtime.task.TezChild.java

public ContainerExecutionResult run() throws IOException, InterruptedException, TezException {

    ContainerContext containerContext = new ContainerContext(containerIdString);
    ContainerReporter containerReporter = new ContainerReporter(umbilical, containerContext,
            getTaskMaxSleepTime);/*ww  w  .  j  a  v  a 2  s .c  o m*/

    taskReporter = new TaskReporter(umbilical, amHeartbeatInterval, sendCounterInterval, maxEventsToGet,
            heartbeatCounter, containerIdString);

    UserGroupInformation childUGI = null;

    while (!executor.isTerminated() && !isShutdown.get()) {
        if (taskCount > 0) {
            TezUtilsInternal.updateLoggers("");
        }
        ListenableFuture<ContainerTask> getTaskFuture = executor.submit(containerReporter);
        boolean error = false;
        ContainerTask containerTask = null;
        try {
            containerTask = getTaskFuture.get();
        } catch (ExecutionException e) {
            error = true;
            Throwable cause = e.getCause();
            LOG.error("Error fetching new work for container {}", containerIdString, cause);
            return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.EXECUTION_FAILURE, cause,
                    "Execution Exception while fetching new work: " + e.getMessage());
        } catch (InterruptedException e) {
            error = true;
            LOG.info("Interrupted while waiting for new work for container {}", containerIdString);
            return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.INTERRUPTED, e,
                    "Interrupted while waiting for new work");
        } finally {
            if (error) {
                shutdown();
            }
        }
        if (containerTask.shouldDie()) {
            LOG.info("ContainerTask returned shouldDie=true for container {}, Exiting", containerIdString);
            shutdown();
            return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null,
                    "Asked to die by the AM");
        } else {
            String loggerAddend = containerTask.getTaskSpec().getTaskAttemptID().toString();
            taskCount++;
            String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
                    .format(Calendar.getInstance().getTime());
            System.err.println(timeStamp + " Starting to run new task attempt: "
                    + containerTask.getTaskSpec().getTaskAttemptID().toString());
            System.out.println(timeStamp + " Starting to run new task attempt: "
                    + containerTask.getTaskSpec().getTaskAttemptID().toString());
            TezUtilsInternal.setHadoopCallerContext(hadoopShim, containerTask.getTaskSpec().getTaskAttemptID());
            TezUtilsInternal.updateLoggers(loggerAddend);
            FileSystem.clearStatistics();

            childUGI = handleNewTaskCredentials(containerTask, childUGI);
            handleNewTaskLocalResources(containerTask, childUGI);
            cleanupOnTaskChanged(containerTask);

            // Execute the Actual Task
            TezTaskRunner2 taskRunner = new TezTaskRunner2(defaultConf, childUGI, localDirs,
                    containerTask.getTaskSpec(), appAttemptNumber, serviceConsumerMetadata,
                    serviceProviderEnvMap, startedInputsMap, taskReporter, executor, objectRegistry, pid,
                    executionContext, memAvailable, updateSysCounters, hadoopShim, sharedExecutor);
            boolean shouldDie;
            try {
                TaskRunner2Result result = taskRunner.run();
                LOG.info("TaskRunner2Result: {}", result);
                shouldDie = result.isContainerShutdownRequested();
                if (shouldDie) {
                    LOG.info("Got a shouldDie notification via heartbeats for container {}. Shutting down",
                            containerIdString);
                    shutdown();
                    return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null,
                            "Asked to die by the AM");
                }
                if (result.getError() != null) {
                    Throwable e = result.getError();
                    handleError(result.getError());
                    return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.EXECUTION_FAILURE,
                            e, "TaskExecutionFailure: " + e.getMessage());
                }
            } finally {
                FileSystem.closeAllForUGI(childUGI);
            }
        }
    }
    return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null, null);
}

From source file:org.opendaylight.netconf.sal.restconf.impl.BrokerFacade.java

private NormalizedNode<?, ?> readDataViaTransaction(final DOMDataReadTransaction transaction,
        final LogicalDatastoreType datastore, final YangInstanceIdentifier path) {
    LOG.trace("Read {} via Restconf: {}", datastore.name(), path);
    final ListenableFuture<Optional<NormalizedNode<?, ?>>> listenableFuture = transaction.read(datastore, path);
    if (listenableFuture != null) {
        Optional<NormalizedNode<?, ?>> optional;
        try {/*from ww  w . j a v a 2 s.co  m*/
            LOG.debug("Reading result data from transaction.");
            optional = listenableFuture.get();
        } catch (InterruptedException | ExecutionException e) {
            LOG.warn("Exception by reading {} via Restconf: {}", datastore.name(), path, e);
            throw new RestconfDocumentedException("Problem to get data from transaction.", e.getCause());

        }
        if (optional != null) {
            if (optional.isPresent()) {
                return optional.get();
            }
        }
    }
    return null;
}