List of usage examples for com.google.common.util.concurrent Futures successfulAsList
@Beta @CheckReturnValue public static <V> ListenableFuture<List<V>> successfulAsList( Iterable<? extends ListenableFuture<? extends V>> futures)
From source file:co.cask.cdap.internal.app.AbstractInMemoryProgramRunner.java
protected Table<String, Integer, ProgramController> startPrograms(Program program, RunId runId, ProgramOptions options, ProgramRunnerFactory.Type type, int numInstances) { Table<String, Integer, ProgramController> components = HashBasedTable.create(); try {/* ww w.java 2s. c om*/ startComponent(program, program.getName(), numInstances, runId, options, components, type); } catch (Throwable t) { LOG.error("Failed to start all program instances", t); try { // Need to stop all started components Futures.successfulAsList(Iterables.transform(components.values(), new Function<ProgramController, ListenableFuture<?>>() { @Override public ListenableFuture<?> apply(ProgramController controller) { return controller.stop(); } })).get(); throw Throwables.propagate(t); } catch (Exception e) { LOG.error("Failed to stop all program instances upon startup failure.", e); throw Throwables.propagate(e); } } return components; }
From source file:org.dcache.poolmanager.RendezvousPoolManagerHandler.java
private <T extends Serializable> ListenableFuture<List<T>> allSuccessful(CellEndpoint endpoint, Serializable msg, Class<T> reply, long timeout) { return Futures.successfulAsList(backends.stream() .map(b -> submit(endpoint, new CellPath(b), msg, reply, timeout)).collect(toList())); }
From source file:com.github.nethad.clustermeister.provisioning.ec2.commands.AbstractAmazonExecutableCommand.java
/** * Wait for a list of futures to complete. * /*from ww w.j a v a 2s . c o m*/ * <p> * The futures are considered as failed when they return null or fail to return. * </p> * @param futures the futures to wait for. * @param interruptedMessage * Log this message when the thread waiting for the futures to return * is interrupted. The exception's stack trace is appended to this message. * @param executionExceptionMessage * Log this message when the thread waiting for the futures throws an * exception while waiting. The exception's stack trace is appended to * this message. * @param unsuccessfulFuturesMessage * Log this message when at least one future failed (or returned null). * Can be a formatted string where '{}' is replaced with the number of * failed futures. * */ protected void waitForFuturesToComplete(List<ListenableFuture<? extends Object>> futures, String interruptedMessage, String executionExceptionMessage, String unsuccessfulFuturesMessage) { try { List<Object> startedNodes = Futures.successfulAsList(futures).get(); int failedNodes = Iterables.frequency(startedNodes, null); if (failedNodes > 0) { logger.warn(unsuccessfulFuturesMessage, failedNodes); } } catch (InterruptedException ex) { logger.warn(interruptedMessage, ex); } catch (ExecutionException ex) { logger.warn(executionExceptionMessage, ex); } }
From source file:co.cask.cdap.watchdog.election.MultiLeaderElection.java
@Override protected void shutDown() throws Exception { LOG.info("Stopping leader election."); List<ListenableFuture<?>> futures = Lists.newArrayList(); for (LeaderElection election : electionCancels) { futures.add(election.stop());//from w w w .jav a 2 s.c o m } try { Futures.successfulAsList(futures).get(10, TimeUnit.SECONDS); } finally { executor.shutdown(); executor.awaitTermination(10, TimeUnit.SECONDS); LOG.info("Leader election stopped."); } }
From source file:org.thingsboard.server.dao.sql.alarm.JpaAlarmDao.java
@Override public ListenableFuture<List<AlarmInfo>> findAlarms(TenantId tenantId, AlarmQuery query) { log.trace("Try to find alarms by entity [{}], status [{}] and pageLink [{}]", query.getAffectedEntityId(), query.getStatus(), query.getPageLink()); EntityId affectedEntity = query.getAffectedEntityId(); String searchStatusName;//from ww w.j a v a2s. co m if (query.getSearchStatus() == null && query.getStatus() == null) { searchStatusName = AlarmSearchStatus.ANY.name(); } else if (query.getSearchStatus() != null) { searchStatusName = query.getSearchStatus().name(); } else { searchStatusName = query.getStatus().name(); } String relationType = BaseAlarmService.ALARM_RELATION_PREFIX + searchStatusName; ListenableFuture<List<EntityRelation>> relations = relationDao.findRelations(tenantId, affectedEntity, relationType, RelationTypeGroup.ALARM, EntityType.ALARM, query.getPageLink()); return Futures.transformAsync(relations, input -> { List<ListenableFuture<AlarmInfo>> alarmFutures = new ArrayList<>(input.size()); for (EntityRelation relation : input) { alarmFutures.add( Futures.transform(findAlarmByIdAsync(tenantId, relation.getTo().getId()), AlarmInfo::new)); } return Futures.successfulAsList(alarmFutures); }); }
From source file:co.cask.hydrator.plugin.RunExternalProgramExecutor.java
/** * Sends input to the executable threads, and emits the output structured records. * * @param line - Space separated sequence of the inputs that will be passed as STDIN to the executable binary. * @param emitter//from ww w . ja v a 2s . c o m * @param structuredRecord * @param outputSchema */ void submit(String line, Emitter<StructuredRecord> emitter, StructuredRecord structuredRecord, Schema outputSchema) { SettableFuture<String> completion = SettableFuture.create(); try { eventQueue.put(new Event(line, completion)); Futures.successfulAsList(completion).get(); // Read the output and emit the structured record. for (String output : outputList) { StructuredRecord.Builder builder = StructuredRecord.builder(outputSchema); for (Schema.Field field : outputSchema.getFields()) { if (structuredRecord.getSchema().getField(field.getName()) != null) { builder.set(field.getName(), structuredRecord.get(field.getName())); } else { if (field.getSchema().getType().equals(Schema.Type.STRING)) { builder.set(field.getName(), output); } else { builder.convertAndSet(field.getName(), output); } } } emitter.emit(builder.build()); outputList.clear(); } } catch (Exception e) { completion.setException(e); } }
From source file:org.jenkinsci.plugins.mesos.MesosCleanupThread.java
@Override protected void execute(TaskListener listener) { final ImmutableList.Builder<ListenableFuture<?>> deletedNodesBuilder = ImmutableList .<ListenableFuture<?>>builder(); ListeningExecutorService executor = MoreExecutors.listeningDecorator(Computer.threadPoolForRemoting); final ImmutableList.Builder<MesosComputer> computersToDeleteBuilder = ImmutableList .<MesosComputer>builder(); for (final Computer c : Jenkins.getInstance().getComputers()) { if (MesosComputer.class.isInstance(c)) { MesosSlave mesosSlave = (MesosSlave) c.getNode(); if (mesosSlave != null && mesosSlave.isPendingDelete()) { final MesosComputer comp = (MesosComputer) c; computersToDeleteBuilder.add(comp); logger.log(Level.INFO, "Marked " + comp.getName() + " for deletion"); ListenableFuture<?> f = executor.submit(new Runnable() { public void run() { logger.log(Level.INFO, "Deleting pending node " + comp.getName()); try { comp.getNode().terminate(); } catch (RuntimeException e) { logger.log(Level.WARNING, "Failed to disconnect and delete " + comp.getName() + ": " + e.getMessage()); throw e; }//from ww w .j a v a 2 s. com } }); deletedNodesBuilder.add(f); } else { logger.log(Level.FINE, c.getName() + " with slave " + mesosSlave + " is not pending deletion or the slave is null"); } } else { logger.log(Level.FINER, c.getName() + " is not a mesos computer, it is a " + c.getClass().getName()); } } Futures.getUnchecked(Futures.successfulAsList(deletedNodesBuilder.build())); for (MesosComputer c : computersToDeleteBuilder.build()) { try { c.deleteSlave(); } catch (IOException e) { logger.log(Level.WARNING, "Failed to disconnect and delete " + c.getName() + ": " + e.getMessage()); } catch (InterruptedException e) { logger.log(Level.WARNING, "Failed to disconnect and delete " + c.getName() + ": " + e.getMessage()); } } }
From source file:co.cask.cdap.internal.app.runtime.service.InMemoryServiceRunner.java
private Table<String, Integer, ProgramController> createRunnables(Program program, RunId runId, ServiceSpecification serviceSpec) { Table<String, Integer, ProgramController> runnables = HashBasedTable.create(); try {// w w w . ja v a 2s. c o m for (Map.Entry<String, RuntimeSpecification> entry : serviceSpec.getRunnables().entrySet()) { int instanceCount = entry.getValue().getResourceSpecification().getInstances(); for (int instanceId = 0; instanceId < instanceCount; instanceId++) { runnables.put(entry.getKey(), instanceId, startRunnable(program, createRunnableOptions(entry.getKey(), instanceId, instanceCount, runId))); } } } catch (Throwable t) { // Need to stop all started runnable here. try { Futures.successfulAsList(Iterables.transform(runnables.values(), new Function<ProgramController, ListenableFuture<ProgramController>>() { @Override public ListenableFuture<ProgramController> apply(ProgramController input) { return input.stop(); } })).get(); } catch (Exception e) { LOG.error("Failed to stop all the runnables"); } throw Throwables.propagate(t); } return runnables; }
From source file:google.registry.tools.LoadSnapshotCommand.java
/** * Block on the completion of the load jobs in the provided map, printing out information on * each job's success or failure.//from www. ja va 2s . c om */ private void waitForLoadJobs(Map<String, ListenableFuture<?>> loadJobs) throws Exception { final long startTime = System.currentTimeMillis(); System.err.println("Waiting for load jobs..."); // Add callbacks to each load job that print information on successful completion or failure. for (final String jobId : loadJobs.keySet()) { final String jobName = "load-" + jobId; Futures.addCallback(loadJobs.get(jobId), new FutureCallback<Object>() { private double elapsedSeconds() { return (System.currentTimeMillis() - startTime) / 1000.0; } @Override public void onSuccess(Object unused) { System.err.printf("Job %s succeeded (%.3fs)\n", jobName, elapsedSeconds()); } @Override public void onFailure(Throwable error) { System.err.printf("Job %s failed (%.3fs): %s\n", jobName, elapsedSeconds(), error.getMessage()); } }); } // Block on the completion of all the load jobs. List<?> results = Futures.successfulAsList(loadJobs.values()).get(); int numSucceeded = FluentIterable.from(results).filter(notNull()).size(); System.err.printf("All load jobs have terminated: %d/%d successful.\n", numSucceeded, loadJobs.size()); }
From source file:com.joyveb.dbpimpl.cass.prepare.schema.AbstractQueryOperation.java
protected CassandraFuture<List<ResultSet>> doExecuteAsync(Iterator<Statement> queryIterator) { if (!queryIterator.hasNext()) { ListenableFuture<List<ResultSet>> emptyResultFuture = Futures .immediateFuture(Collections.<ResultSet>emptyList()); CassandraFuture<List<ResultSet>> wrappedFuture = new CassandraFuture<List<ResultSet>>(emptyResultFuture, CassandraUtils.EXCEPTION_TRANSLATOR); return wrappedFuture; }/* w w w . ja va 2s .c o m*/ final Iterator<ListenableFuture<ResultSet>> resultSetFutures = Iterators.transform(queryIterator, new Function<Statement, ListenableFuture<ResultSet>>() { @Override public ListenableFuture<ResultSet> apply(Statement query) { return doExecuteAsync(query); } }); ListenableFuture<List<ResultSet>> allResultSetFuture = Futures .successfulAsList(new Iterable<ListenableFuture<ResultSet>>() { @Override public Iterator<ListenableFuture<ResultSet>> iterator() { return resultSetFutures; } }); CassandraFuture<List<ResultSet>> wrappedFuture = new CassandraFuture<List<ResultSet>>(allResultSetFuture, CassandraUtils.EXCEPTION_TRANSLATOR); return wrappedFuture; }