Example usage for com.google.common.util.concurrent ListeningExecutorService submit

List of usage examples for com.google.common.util.concurrent ListeningExecutorService submit

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListeningExecutorService submit.

Prototype

@Override
ListenableFuture<?> submit(Runnable task);

Source Link

Usage

From source file:us.physion.ovation.ui.editor.ResourceInfoPanel.java

private void initUi() {
    DefaultComboBoxModel<String> model = new DefaultComboBoxModel<>(
            getAvailableContentTypes().toArray(new String[0]));
    contentTypeComboBox.setModel(model);
    contentTypeComboBox.setSelectedItem(getContentType());

    contentTypeComboBox.addItemListener((ItemEvent e) -> {
        if (e.getStateChange() == ItemEvent.SELECTED) {
            String selection = (String) e.getItem();
            setContentType(selection);//w w  w . j a va2 s. c om
        }
    });

    final DataContext ctx = Lookup.getDefault().lookup(ConnectionProvider.class).getDefaultContext();

    addSourcesTextField.addActionListener((ActionEvent e) -> {
        addSourceFromText(ctx, addSourcesTextField.getText());
    });

    addSourcesTextField.setEnabled(getMeasurements().size() > 0 || getResources().stream().anyMatch((r) -> {
        return !(r instanceof Measurement);
    }) || getRevisions().stream().anyMatch((r) -> {
        return !(r.getResource() instanceof Measurement);
    }));

    OvationEntity e = Iterables.getFirst(getEntities(OvationEntity.class), null);
    if (e != null) {
        ListeningExecutorService svc = e.getDataContext().getCoordinator().getExecutorService();
        ListenableFuture<List<String>> sourceIds = svc.submit(() -> {
            try {
                //TODO make this async
                List<String> sourceIds1 = getSourceIds(ctx.getTopLevelSources());
                List<String> sortedIds = Lists.newArrayList(sourceIds1);
                Collections.sort(sortedIds);
                AutoCompleteDecorator.decorate(addSourcesTextField, sortedIds, false);
                return sortedIds;
            } catch (Throwable ex) {
                logger.error("Unable to retrieve Sources. Autocomplete for Source IDs disabled.");
                return Lists.newArrayList();
            }
        });
    }

    revisionFileWell
            .setDelegate(new FileWell.AbstractDelegate(Bundle.ResourceInfoPanel_Drop_Files_For_New_Revision()) {

                @Override
                public void filesDropped(File[] files) {
                    if (files.length == 0 || getResources().size() > 1) {
                        return;
                    }

                    for (Resource r : getResources()) {
                        try {
                            File main = null;
                            List<URL> supporting = Lists.newLinkedList();
                            for (File f : files) {
                                if (f.getName().equals(r.getFilename())) {
                                    main = f;
                                } else {
                                    supporting.add(f.toURI().toURL());
                                }

                            }

                            if (main == null) {
                                main = files[0];
                                supporting.remove(0);
                            }

                            r.addRevision(main.toURI().toURL(), ContentTypes.getContentType(main),
                                    main.getName(), supporting);

                        } catch (MalformedURLException ex) {
                            throw new OvationException("Unable to create new revision", ex);
                        } catch (IOException ex) {
                            throw new OvationException("Unable to create new revision", ex);
                        }
                    }
                }
            });

    updateInputs();
}

From source file:org.sosy_lab.cpachecker.core.algorithm.ParallelAlgorithm.java

@Override
public AlgorithmStatus run(ReachedSet pReachedSet) throws CPAException, InterruptedException {
    mainEntryNode = AbstractStates.extractLocation(pReachedSet.getFirstState());
    ForwardingReachedSet forwardingReachedSet = (ForwardingReachedSet) pReachedSet;

    ListeningExecutorService exec = listeningDecorator(newFixedThreadPool(configFiles.size()));
    List<ListenableFuture<ParallelAnalysisResult>> futures = new ArrayList<>();

    for (AnnotatedValue<Path> p : configFiles) {
        futures.add(exec.submit(createParallelAnalysis(p, ++stats.noOfAlgorithmsUsed)));
    }//from  w w w. jav a  2 s .c  o m

    // shutdown the executor service,
    exec.shutdown();

    handleFutureResults(futures);

    // wait some time so that all threads are shut down and have (hopefully) finished their logging
    if (!exec.awaitTermination(10, TimeUnit.SECONDS)) {
        logger.log(Level.WARNING, "Not all threads are terminated although we have a result.");
    }

    exec.shutdownNow();

    if (finalResult != null) {
        forwardingReachedSet.setDelegate(finalResult.getReached());
        return finalResult.getStatus();
    }

    return AlgorithmStatus.UNSOUND_AND_PRECISE;
}

From source file:org.apache.abdera2.common.protocol.Session.java

/**
 * Processes requests asynchronously.. the listener will
 * be invoked once the call completes/*from   w  w  w .  j  a  va  2s .co m*/
 */
public <X extends ClientResponse> void process(ExecutorService executor, Callable<X> resp,
        final Listener<X> listener) {
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(executor);
    final ListenableFuture<X> lf = exec.submit(resp);
    lf.addListener(new Runnable() {
        public void run() {
            X resp = null;
            try {
                resp = lf.get();
                listener.onResponse(resp);
            } catch (Throwable t) {
                throw ExceptionHelper.propogate(t);
            } finally { // auto release since by this point we know we're done with it
                if (resp != null)
                    resp.release();
            }
        }
    }, executor);
}

From source file:com.appdynamics.monitors.azure.AzureServiceBusMonitor.java

private void collectAndPrintMetrics(final Configuration config) throws TaskExecutionException {
    final Azure azure = config.getAzure();
    final String metricPrefix = config.getMetricPrefix();
    List<Namespace> namespaces = config.getNamespaces();
    if (namespaces == null || namespaces.isEmpty()) {
        logger.info("No namespaces configured. Please configure namespaces in config.yml file to get stats");
        return;/*  w  w w .j  a v a2  s.  c om*/
    }

    ListeningExecutorService namespaceService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(config.getNamespaceThreads()));
    try {
        for (final Namespace namespace : namespaces) {

            final String namespaceName = namespace.getNamespace();
            if (Strings.isNullOrEmpty(namespaceName)) {
                logger.info("No value for namespaces in configuration. Ignoring the entry");
                continue;
            }

            //Queue Stats
            ListenableFuture<Set<String>> getQueueNames = namespaceService.submit(new Callable<Set<String>>() {
                public Set<String> call() {
                    return getQueueNames(config, namespaceName, namespace.getExcludeQueues());
                }
            });

            Futures.addCallback(getQueueNames, new FutureCallback<Set<String>>() {
                public void onSuccess(Set<String> queueNames) {
                    if (queueNames != null && !queueNames.isEmpty()) {
                        try {
                            Map<String, String> queueStats = azureServiceBusStatsCollector.collectQueueStats(
                                    azure, namespaceName, queueNames, namespace.getQueueStats(),
                                    config.getQueueThreads());
                            printMetrics(queueStats, metricPrefix);
                        } catch (TaskExecutionException e) {
                            logger.error("Unable to get queue stats for namespace [" + namespaceName, e);
                        }
                    }
                }

                public void onFailure(Throwable thrown) {
                    logger.error("Unable to get queues for namespace [" + namespaceName, thrown);
                }
            });

            //Topic stats
            ListenableFuture<Set<String>> getTopicNames = namespaceService.submit(new Callable<Set<String>>() {
                public Set<String> call() {
                    return getTopicNames(config, namespaceName, namespace.getExcludeTopics());
                }
            });

            Futures.addCallback(getTopicNames, new FutureCallback<Set<String>>() {
                public void onSuccess(Set<String> topicNames) {
                    if (topicNames != null && !topicNames.isEmpty()) {
                        try {
                            Map<String, String> topicStats = azureServiceBusStatsCollector.collectTopicStats(
                                    azure, namespaceName, topicNames, namespace.getTopicStats(),
                                    config.getTopicThreads());
                            printMetrics(topicStats, metricPrefix);
                        } catch (TaskExecutionException e) {
                            logger.error("Unable to get topic stats for namespace [" + namespaceName, e);
                        }
                    }
                }

                public void onFailure(Throwable thrown) {
                    logger.error("Unable to get topics for namespace [" + namespaceName, thrown);
                }
            });
        }
    } finally {
        namespaceService.shutdown();
    }
}

From source file:com.ngdata.hbaseindexer.master.IndexerMaster.java

private void startFullIndexBuild(final String indexerName) {
    try {/*from   ww w. jav a 2  s .  com*/
        String lock = indexerModel.lockIndexer(indexerName);
        try {
            // Read current situation of record and assure it is still actual
            final IndexerDefinition indexer = indexerModel.getFreshIndexer(indexerName);
            IndexerDefinitionBuilder updatedIndexer = new IndexerDefinitionBuilder().startFrom(indexer);
            final String[] batchArguments = createBatchArguments(indexer);
            if (needsBatchBuildStart(indexer)) {
                final ListeningExecutorService executor = MoreExecutors
                        .listeningDecorator(Executors.newSingleThreadExecutor());
                ListenableFuture<Integer> future = executor.submit(new Callable<Integer>() {
                    @Override
                    public Integer call() throws Exception {
                        HBaseMapReduceIndexerTool tool = new HBaseMapReduceIndexerTool();
                        tool.setConf(hbaseConf);
                        return tool.run(batchArguments,
                                new IndexerDefinitionUpdaterJobProgressCallback(indexerName));
                    }
                });

                Futures.addCallback(future, new FutureCallback<Integer>() {
                    @Override
                    public void onSuccess(Integer exitCode) {
                        markBatchBuildCompleted(indexerName, exitCode == 0);
                        executor.shutdownNow();
                    }

                    @Override
                    public void onFailure(Throwable throwable) {
                        log.error("batch index build failed", throwable);
                        markBatchBuildCompleted(indexerName, false);
                        executor.shutdownNow();
                    }
                });

                BatchBuildInfo jobInfo = new BatchBuildInfo(System.currentTimeMillis(), null, null,
                        batchArguments);
                updatedIndexer.activeBatchBuildInfo(jobInfo).batchIndexingState(BatchIndexingState.BUILDING)
                        .batchIndexCliArguments(null).build();

                indexerModel.updateIndexerInternal(updatedIndexer.build());

                log.info("Started batch index build for index " + indexerName);

            }
        } finally {
            indexerModel.unlockIndexer(lock);
        }
    } catch (Throwable t) {
        log.error("Error trying to start index build job for index " + indexerName, t);
    }
}

From source file:org.grycap.gpf4med.cloud.CloudService.java

/**
 * Starts new Gpf4Med study servers in the specified group, creating the group if necessary. The servers 
 * will be created with the specified configuration.
 * @param group the group to which the servers will be created.
 * @param count number of servers to be created in the group.
 * @param config optional configuration. If undefined, default configuration will be applied as defined 
 *        in: {@link DefaultNodeConfiguration}.
 *///from  w w w.j av  a 2s  . c o m
public void addServers(final String group, final int count, final @Nullable NodeConfiguration config) {
    checkArgument(StringUtils.isNotBlank(group), "Uninitialized or invalid group");
    checkArgument(count > 0, "Invalid number of servers");
    final NodeConfiguration config2 = (config != null ? config : fromDefaults());
    final Gpf4MedServiceController controller = controller();
    // quietly bypass node creation on providers that don't support it
    if (!controller.nodesCanBeAcquired()) {
        LOGGER.info("Bypassing unsupported node creation");
        return;
    }
    // create nodes asynchronously
    final ListeningExecutorService executorService = listeningDecorator(newCachedThreadPool());
    final List<ListenableFuture<HostAndPort>> futures = synchronizedList(
            new ArrayList<ListenableFuture<HostAndPort>>());
    for (final UnmodifiableIterator<Integer> it = create(closed(1, count), integers()).iterator(); it
            .hasNext(); it.next()) {
        final ListenableFuture<HostAndPort> future = executorService.submit(new Callable<HostAndPort>() {
            @Override
            public HostAndPort call() throws Exception {
                return controller.add(group, config2);
            }
        });
        futures.add(future);
    }
    final ListenableFuture<List<HostAndPort>> nodesFuture = successfulAsList(futures);
    List<HostAndPort> nodes = null;
    try {
        nodes = nodesFuture.get(MILLISECONDS.convert(ADD_SERVERS_TIMEOUT_MINUTES, MINUTES), MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        LOGGER.warn("Failed to acquire servers in group: " + group, e);
    }
    // create a mutable copy to remove null entries (failed nodes)
    if (nodes != null && !nodes.isEmpty()) {
        nodes = new ArrayList<HostAndPort>(nodes);
        nodes.removeAll(singleton(null));
    }
    // register group
    if (nodes != null && !nodes.isEmpty()) {
        groups.add(group);
    }
    LOGGER.info(String.format("Server(s) acquired in the group %s (%s)", group, nodes));
}

From source file:org.trinity.foundation.api.render.binding.BinderImpl.java

@Override
public ListenableFuture<Void> bind(final ListeningExecutorService modelExecutor, final Object model,
        final Object view) {
    checkNotNull(modelExecutor);// w ww . ja v  a  2s.c o  m
    checkNotNull(model);
    checkNotNull(view);

    return modelExecutor.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            bindImpl(modelExecutor, model, view);
            return null;
        }
    });

}

From source file:org.jenkinsci.plugins.mesos.MesosCleanupThread.java

@Override
protected void execute(TaskListener listener) {
    final ImmutableList.Builder<ListenableFuture<?>> deletedNodesBuilder = ImmutableList
            .<ListenableFuture<?>>builder();
    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Computer.threadPoolForRemoting);
    final ImmutableList.Builder<MesosComputer> computersToDeleteBuilder = ImmutableList
            .<MesosComputer>builder();

    for (final Computer c : Jenkins.getInstance().getComputers()) {
        if (MesosComputer.class.isInstance(c)) {
            MesosSlave mesosSlave = (MesosSlave) c.getNode();

            if (mesosSlave != null && mesosSlave.isPendingDelete()) {
                final MesosComputer comp = (MesosComputer) c;
                computersToDeleteBuilder.add(comp);
                logger.log(Level.INFO, "Marked " + comp.getName() + " for deletion");
                ListenableFuture<?> f = executor.submit(new Runnable() {
                    public void run() {
                        logger.log(Level.INFO, "Deleting pending node " + comp.getName());
                        try {
                            comp.getNode().terminate();
                        } catch (RuntimeException e) {
                            logger.log(Level.WARNING, "Failed to disconnect and delete " + comp.getName() + ": "
                                    + e.getMessage());
                            throw e;
                        }/*ww  w. j  a  v a  2s.  c om*/
                    }
                });
                deletedNodesBuilder.add(f);
            } else {
                logger.log(Level.FINE, c.getName() + " with slave " + mesosSlave
                        + " is not pending deletion or the slave is null");
            }
        } else {
            logger.log(Level.FINER,
                    c.getName() + " is not a mesos computer, it is a " + c.getClass().getName());
        }
    }

    Futures.getUnchecked(Futures.successfulAsList(deletedNodesBuilder.build()));

    for (MesosComputer c : computersToDeleteBuilder.build()) {
        try {
            c.deleteSlave();
        } catch (IOException e) {
            logger.log(Level.WARNING, "Failed to disconnect and delete " + c.getName() + ": " + e.getMessage());
        } catch (InterruptedException e) {
            logger.log(Level.WARNING, "Failed to disconnect and delete " + c.getName() + ": " + e.getMessage());
        }

    }
}

From source file:org.trinity.foundation.api.render.binding.BinderImpl.java

@Override
public ListenableFuture<Void> updateBinding(final ListeningExecutorService modelExecutor, final Object model,
        final String propertyName) {
    checkNotNull(modelExecutor);/*  ww w.j  a  va 2 s  .co m*/
    checkNotNull(model);
    checkNotNull(propertyName);

    return modelExecutor.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            updateBindingImpl(modelExecutor, model, propertyName);
            return null;
        }
    });
}

From source file:org.jclouds.chef.strategy.internal.BaseListCookbookVersionsImpl.java

protected Iterable<? extends CookbookVersion> executeConcurrently(final ListeningExecutorService executor,
        Iterable<String> cookbookNames) {
    return concat(transform(cookbookNames, new Function<String, Iterable<? extends CookbookVersion>>() {

        @Override/*from   w ww . jav  a  2  s. c  o  m*/
        public Iterable<? extends CookbookVersion> apply(final String cookbook) {
            // TODO getting each version could also go parallel
            Set<String> cookbookVersions = api.listVersionsOfCookbook(cookbook);
            ListenableFuture<List<CookbookVersion>> futures = allAsList(
                    transform(cookbookVersions, new Function<String, ListenableFuture<CookbookVersion>>() {
                        @Override
                        public ListenableFuture<CookbookVersion> apply(final String version) {
                            return executor.submit(new Callable<CookbookVersion>() {
                                @Override
                                public CookbookVersion call() throws Exception {
                                    return api.getCookbook(cookbook, version);
                                }
                            });
                        }
                    }));

            logger.trace(String.format("getting versions of cookbook: %s", cookbook));
            return getUnchecked(futures);
        }
    }));
}