Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;

Source Link

Document

Waits if necessary for at most the given time for the computation to complete, and then retrieves its result, if available.

Usage

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.AbstractFrmSyncListener.java

@Override
public void onDataTreeChanged(@Nonnull final Collection<DataTreeModification<T>> modifications) {
    for (DataTreeModification<T> modification : modifications) {
        final NodeId nodeId = PathUtil.digNodeId(modification.getRootPath().getRootIdentifier());

        try {//from   www .  j  a v a 2  s.  c  om
            final Optional<ListenableFuture<Boolean>> optFuture = processNodeModification(modification);
            if (optFuture.isPresent()) {
                final ListenableFuture<Boolean> future = optFuture.get();
                final Boolean ret = future.get(15000, TimeUnit.MILLISECONDS);
                LOG.debug("syncup return in {} listener for: {} [{}] thread:{}", dsType(), nodeId.getValue(),
                        ret, threadName());
            }
        } catch (InterruptedException e) {
            LOG.warn("permit for forwarding rules sync not acquired: {}", nodeId.getValue());
        } catch (Exception e) {
            LOG.error("error processing inventory node modification: {}", nodeId.getValue(), e);
        }
    }
}

From source file:co.cask.cdap.internal.app.deploy.pipeline.LocalArtifactLoaderStage.java

/**
 * Instantiates the Application class and calls configure() on it to generate the {@link ApplicationSpecification}.
 *
 * @param deploymentInfo information needed to deploy the application, such as the artifact to create it from
 *                       and the application config to use.
 *//*w w w .jav a 2  s.  c  o m*/
@Override
public void process(AppDeploymentInfo deploymentInfo)
        throws InterruptedException, ExecutionException, TimeoutException, IOException {

    Id.Artifact artifactId = deploymentInfo.getArtifactId();
    Location artifactLocation = deploymentInfo.getArtifactLocation();
    String appClassName = deploymentInfo.getAppClassName();
    String configString = deploymentInfo.getConfigString();

    InMemoryConfigurator inMemoryConfigurator = new InMemoryConfigurator(cConf, namespace, artifactId,
            appClassName, artifactLocation, configString, artifactRepository);

    ListenableFuture<ConfigResponse> result = inMemoryConfigurator.config();
    ConfigResponse response = result.get(120, TimeUnit.SECONDS);
    if (response.getExitCode() != 0) {
        throw new IllegalArgumentException("Failed to configure application: " + deploymentInfo);
    }
    ApplicationSpecification specification = adapter.fromJson(response.get());
    if (appName != null) {
        specification = new ForwardingApplicationSpecification(specification) {
            @Override
            public String getName() {
                return appName;
            }
        };
    }

    Id.Application application = Id.Application.from(namespace, specification.getName());
    emit(new ApplicationDeployable(application, specification, store.getApplication(application),
            ApplicationDeployScope.USER, artifactLocation));
}

From source file:com.github.nethad.clustermeister.integration.sc04.Scenario04.java

@Override
public void runScenario() throws Exception {

    new Thread(new Runnable() {

        @Override/*from w w w  .  j  a v  a 2 s.c  om*/
        public void run() {
            logger.info("Scenario 04 started...");

            Clustermeister clustermeister = ClustermeisterFactory.create();
            try {
                logger.info("Clustermeister started.");
                logger.info("Getting nodes...");
                Collection<ExecutorNode> allNodes = clustermeister.getAllNodes();
                addToReport("node size", allNodes.size());
                logger.info(allNodes.size() + " nodes found.");

                logger.info("Execute Pi()");
                ListenableFuture<String> result = allNodes.iterator().next().execute(new Pi());

                logger.info("Waiting for result.");
                String resultString = result.get(100, TimeUnit.SECONDS);
                addToReport("Result: ", resultString);
            } catch (Exception ex) {
                logger.warn("Exception on result", ex);
                addToReport("Exception on result", ex);
            } finally {
                clustermeister.shutdown();
            }

        }
    }).start();
}

From source file:org.opendaylight.netconf.test.tool.client.stress.SyncExecutionStrategy.java

private void waitForResponse(AtomicInteger responseCounter,
        final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture) {
    try {//from   w ww  . java 2  s. c om
        final RpcResult<NetconfMessage> netconfMessageRpcResult = netconfMessageFuture
                .get(getParams().msgTimeout, TimeUnit.SECONDS);
        if (netconfMessageRpcResult.isSuccessful()) {
            responseCounter.incrementAndGet();
            LOG.debug("Received response {}", responseCounter.get());
        } else {
            LOG.warn("Request failed {}", netconfMessageRpcResult);
        }

    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    } catch (final ExecutionException | TimeoutException e) {
        throw new RuntimeException("Request not finished", e);
    }
}

From source file:org.opendaylight.controller.netconf.cli.commands.remote.RemoteCommand.java

@Override
public Output invoke(final Input inputArgs) throws CommandInvocationException {
    final ListenableFuture<RpcResult<CompositeNode>> invokeRpc = rpc.invokeRpc(getCommandId(),
            inputArgs.wrap(getCommandId()));
    try {// w  w  w.j a va2 s .  co m
        return new Output(invokeRpc.get(DEFAULT_TIMEOUT, DEFAULT_TIMEOUT_UNIT).getResult());
    } catch (final ExecutionException e) {
        throw new CommandInvocationException(getCommandId(), e);
    } catch (final TimeoutException e) {
        // Request timed out, cancel request
        invokeRpc.cancel(true);
        throw new CommandInvocationException.CommandTimeoutException(getCommandId(), e);
    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:co.cask.cdap.explore.client.DatasetExploreFacade.java

/**
 * Enables ad-hoc exploration of the given {@link co.cask.cdap.api.data.batch.RecordScannable}.
 * @param datasetInstance dataset instance name.
 *///from w w  w  . j a v  a  2 s .  c o  m
public void enableExplore(String datasetInstance) throws ExploreException, SQLException {
    if (!exploreEnabled) {
        return;
    }

    ListenableFuture<Void> futureSuccess = exploreClient.enableExplore(datasetInstance);
    try {
        futureSuccess.get(20, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        LOG.error("Caught exception", e);
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        Throwable t = Throwables.getRootCause(e);
        if (t instanceof ExploreException) {
            LOG.error("Enable explore did not finish successfully for dataset instance {}.", datasetInstance);
            throw (ExploreException) t;
        } else if (t instanceof SQLException) {
            throw (SQLException) t;
        } else if (t instanceof HandleNotFoundException) {
            // Cannot happen unless explore server restarted, or someone calls close in between.
            LOG.error("Error running enable explore", e);
            throw Throwables.propagate(e);
        } else if (t instanceof UnexpectedQueryStatusException) {
            UnexpectedQueryStatusException sE = (UnexpectedQueryStatusException) t;
            LOG.error("Enable explore operation ended in an unexpected state - {}", sE.getStatus().name(), e);
            throw Throwables.propagate(e);
        }
    } catch (TimeoutException e) {
        LOG.error("Error running enable explore - operation timed out", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.explore.client.DatasetExploreFacade.java

/**
 * Disable ad-hoc exploration of the given {@link co.cask.cdap.api.data.batch.RecordScannable}.
 * @param datasetInstance dataset instance name.
 *//*from   w  w  w .  jav  a  2  s.c o m*/
public void disableExplore(String datasetInstance) throws ExploreException, SQLException {
    if (!exploreEnabled) {
        return;
    }

    ListenableFuture<Void> futureSuccess = exploreClient.disableExplore(datasetInstance);
    try {
        futureSuccess.get(20, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        LOG.error("Caught exception", e);
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        Throwable t = Throwables.getRootCause(e);
        if (t instanceof ExploreException) {
            LOG.error("Disable explore did not finish successfully for dataset instance {}.", datasetInstance);
            throw (ExploreException) t;
        } else if (t instanceof SQLException) {
            throw (SQLException) t;
        } else if (t instanceof HandleNotFoundException) {
            // Cannot happen unless explore server restarted, or someone calls close in between.
            LOG.error("Error running disable explore", e);
            throw Throwables.propagate(e);
        } else if (t instanceof UnexpectedQueryStatusException) {
            UnexpectedQueryStatusException sE = (UnexpectedQueryStatusException) t;
            LOG.error("Disable explore operation ended in an unexpected state - {}", sE.getStatus().name(), e);
            throw Throwables.propagate(e);
        }
    } catch (TimeoutException e) {
        LOG.error("Error running disable explore - operation timed out", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.internal.app.deploy.pipeline.LocalArchiveLoaderStage.java

/**
 * Creates a {@link co.cask.cdap.internal.app.deploy.InMemoryConfigurator} to run through
 * the process of generation of {@link ApplicationSpecification}
 *
 * @param deploymentInfo Location of the input and output location
 *///from w ww . j  a v a  2s.c  om
@Override
public void process(DeploymentInfo deploymentInfo) throws Exception {

    Location outputLocation = deploymentInfo.getDestination();
    Location parent = Locations.getParent(outputLocation);
    Locations.mkdirsIfNotExists(parent);

    File input = deploymentInfo.getAppJarFile();
    Location tmpLocation = parent.getTempFile(".tmp");
    LOG.debug("Copy from {} to {}", input.getName(), tmpLocation.toURI());
    Files.copy(input, Locations.newOutputSupplier(tmpLocation));

    // Finally, move archive to final location
    try {
        if (tmpLocation.renameTo(outputLocation) == null) {
            throw new IOException(String.format("Could not move archive from location: %s, to location: %s",
                    tmpLocation.toURI(), outputLocation.toURI()));
        }
    } catch (IOException e) {
        // In case copy to temporary file failed, or rename failed
        tmpLocation.delete();
        throw e;
    }

    InMemoryConfigurator inMemoryConfigurator = new InMemoryConfigurator(
            new LocalLocationFactory().create(input.toURI()), deploymentInfo.getConfigString());
    ListenableFuture<ConfigResponse> result = inMemoryConfigurator.config();
    ConfigResponse response = result.get(120, TimeUnit.SECONDS);
    if (response.getExitCode() != 0) {
        throw new IllegalArgumentException("Failed to configure application: " + deploymentInfo);
    }
    ApplicationSpecification specification = adapter.fromJson(response.get());
    if (appId != null) {
        specification = new ForwardingApplicationSpecification(specification) {
            @Override
            public String getName() {
                return appId;
            }
        };
    }

    Id.Application application = Id.Application.from(id, specification.getName());
    emit(new ApplicationDeployable(application, specification, store.getApplication(application),
            deploymentInfo.getApplicationDeployScope(), outputLocation));
}

From source file:org.opendaylight.controller.netconf.test.tool.client.stress.SyncExecutionStrategy.java

private void waitForResponse(AtomicInteger responseCounter,
        final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture) {
    try {//from ww w  .  j  ava2  s .  c  o m
        final RpcResult<NetconfMessage> netconfMessageRpcResult = netconfMessageFuture.get(params.msgTimeout,
                TimeUnit.SECONDS);
        if (netconfMessageRpcResult.isSuccessful()) {
            responseCounter.incrementAndGet();
            LOG.debug("Received response {}", responseCounter.get());
        } else {
            LOG.warn("Request failed {}", netconfMessageRpcResult);
        }

    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    } catch (final ExecutionException | TimeoutException e) {
        throw new RuntimeException("Request not finished", e);
    }
}

From source file:com.rackspacecloud.blueflood.inputs.handlers.HttpStatsDIngestionHandler.java

@Override
public void handle(ChannelHandlerContext ctx, HttpRequest request) {

    final Timer.Context timerContext = handlerTimer.time();

    // this is all JSON.
    final String body = request.getContent().toString(Constants.DEFAULT_CHARSET);
    try {//  w  ww .  j  av  a2s .  co m
        // block until things get ingested.
        requestCount.inc();
        MetricsCollection collection = new MetricsCollection();
        collection.add(PreaggregateConversions.buildMetricsCollection(createBundle(body)));
        ListenableFuture<List<Boolean>> futures = processor.apply(collection);
        List<Boolean> persisteds = futures.get(timeout.getValue(), timeout.getUnit());
        for (Boolean persisted : persisteds) {
            if (!persisted) {
                HttpMetricsIngestionHandler.sendResponse(ctx, request, null,
                        HttpResponseStatus.INTERNAL_SERVER_ERROR);
                return;
            }
        }
        HttpMetricsIngestionHandler.sendResponse(ctx, request, null, HttpResponseStatus.OK);

    } catch (JsonParseException ex) {
        log.error("BAD JSON: %s", body);
        log.error(ex.getMessage(), ex);
        HttpMetricsIngestionHandler.sendResponse(ctx, request, ex.getMessage(), HttpResponseStatus.BAD_REQUEST);
    } catch (ConnectionException ex) {
        log.error(ex.getMessage(), ex);
        HttpMetricsIngestionHandler.sendResponse(ctx, request, "Internal error saving data",
                HttpResponseStatus.INTERNAL_SERVER_ERROR);
    } catch (TimeoutException ex) {
        HttpMetricsIngestionHandler.sendResponse(ctx, request, "Timed out persisting metrics",
                HttpResponseStatus.ACCEPTED);
    } catch (Exception ex) {
        log.warn("Other exception while trying to parse content", ex);
        HttpMetricsIngestionHandler.sendResponse(ctx, request, "Failed parsing content",
                HttpResponseStatus.INTERNAL_SERVER_ERROR);
    } finally {
        requestCount.dec();
        timerContext.stop();
    }
}