List of usage examples for com.google.common.util.concurrent ListeningExecutorService shutdown
void shutdown();
From source file:co.cask.cdap.internal.app.deploy.pipeline.ProgramGenerationStage.java
@Override public void process(final ApplicationDeployable input) throws Exception { ImmutableList.Builder<Program> programs = ImmutableList.builder(); final ApplicationSpecification appSpec = input.getSpecification(); final String applicationName = appSpec.getName(); final ArchiveBundler bundler = new ArchiveBundler(input.getLocation()); // Make sure the namespace directory exists Id.Namespace namespaceId = input.getId().getNamespace(); Location namespacedLocation = namespacedLocationFactory.get(namespaceId); // Note: deployApplication/deployAdapters have already checked for namespaceDir existence, so not checking again // Make sure we have a directory to store the original artifact. final Location appFabricDir = namespacedLocation.append(configuration.get(Constants.AppFabric.OUTPUT_DIR)); // Check exists, create, check exists again to avoid failure due to race condition. if (!appFabricDir.exists() && !appFabricDir.mkdirs() && !appFabricDir.exists()) { throw new IOException(String.format("Failed to create directory %s", appFabricDir.toURI().getPath())); }// w w w . j a va 2 s . c o m // Now, we iterate through all ProgramSpecification and generate programs Iterable<ProgramSpecification> specifications = Iterables.concat(appSpec.getMapReduce().values(), appSpec.getFlows().values(), appSpec.getWorkflows().values(), appSpec.getServices().values(), appSpec.getSpark().values(), appSpec.getWorkers().values()); // Generate webapp program if required Set<String> servingHostNames = WebappProgramRunner .getServingHostNames(Locations.newInputSupplier(input.getLocation())); if (!servingHostNames.isEmpty()) { specifications = Iterables.concat(specifications, ImmutableList.of(createWebappSpec(ProgramType.WEBAPP.toString().toLowerCase()))); } ListeningExecutorService executorService = MoreExecutors.listeningDecorator( Executors.newFixedThreadPool(10, Threads.createDaemonThreadFactory("program-gen-%d"))); try { List<ListenableFuture<Location>> futures = Lists.newArrayList(); for (final ProgramSpecification spec : specifications) { ListenableFuture<Location> future = executorService.submit(new Callable<Location>() { @Override public Location call() throws Exception { ProgramType type = ProgramTypes.fromSpecification(spec); String name = String.format(Locale.ENGLISH, "%s/%s", applicationName, type); Location programDir = appFabricDir.append(name); if (!programDir.exists()) { programDir.mkdirs(); } Location output = programDir.append(String.format("%s.jar", spec.getName())); Id.Program programId = Id.Program.from(input.getId(), type, spec.getName()); return ProgramBundle.create(programId, bundler, output, spec.getClassName(), appSpec); } }); futures.add(future); } for (Location jarLocation : Futures.allAsList(futures).get()) { programs.add(Programs.create(jarLocation, null)); } } finally { executorService.shutdown(); } // moves the <appfabricdir>/archive/<app-name>.jar to <appfabricdir>/<app-name>/archive/<app-name>.jar // Cannot do this before starting the deploy pipeline because appId could be null at that time. // However, it is guaranteed to be non-null from VerificationsStage onwards Location newArchiveLocation = appFabricDir.append(applicationName).append(Constants.ARCHIVE_DIR); moveAppArchiveUnderAppDirectory(input.getLocation(), newArchiveLocation); Location programLocation = newArchiveLocation.append(input.getLocation().getName()); ApplicationDeployable updatedAppDeployable = new ApplicationDeployable(input.getId(), input.getSpecification(), input.getExistingAppSpec(), input.getApplicationDeployScope(), programLocation); // Emits the received specification with programs. emit(new ApplicationWithPrograms(updatedAppDeployable, programs.build())); }
From source file:com.spectralogic.ds3client.helpers.ChunkTransferrer.java
public void transferChunks(final Iterable<JobNode> nodes, final Iterable<Objects> chunks) throws IOException, XmlProcessingException { LOG.debug("Getting ready to process chunks"); final ImmutableMap<UUID, JobNode> nodeMap = buildNodeMap(nodes); LOG.debug("Starting executor service"); final ListeningExecutorService executor = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(maxParallelRequests)); LOG.debug("Executor service started"); try {//from w w w. j a v a 2s.c o m final List<ListenableFuture<?>> tasks = new ArrayList<>(); for (final Objects chunk : chunks) { LOG.debug("Processing parts for chunk: {}", chunk.getChunkId().toString()); final Ds3Client client = getClient(nodeMap, chunk.getNodeId(), mainClient); for (final BulkObject ds3Object : chunk.getObjects()) { final ObjectPart part = new ObjectPart(ds3Object.getOffset(), ds3Object.getLength()); if (this.partTracker.containsPart(ds3Object.getName(), part)) { LOG.debug("Adding {} to executor for processing", ds3Object.getName()); tasks.add(executor.submit(new Callable<Object>() { @Override public Object call() throws Exception { LOG.debug("Processing {}", ds3Object.getName()); ChunkTransferrer.this.itemTransferrer.transferItem(client, ds3Object); ChunkTransferrer.this.partTracker.completePart(ds3Object.getName(), part); return null; } })); } } } executeWithExceptionHandling(tasks); } finally { LOG.debug("Shutting down executor"); executor.shutdown(); } }
From source file:org.n52.lod.csw.CSWLoDEnabler.java
protected Map<String, GetRecordByIdResponseDocument> retrieveRecordsThreaded(int startPos, int maxRecords, long recordsInTotal) { log.info("Retrieve {} records, starting from {} of {}", maxRecords, startPos, recordsInTotal); // one thread for getting ids List<String> recordIdList = getRecordIds(startPos, maxRecords); // many threads getting records descriptions final Map<String, GetRecordByIdResponseDocument> recordDescriptions = Maps.newConcurrentMap(); ListeningExecutorService executorService = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(maxRecords)); for (String id : recordIdList) { final String recordId = id; log.debug("Adding {} to the model", recordId); CallableRecordDescription c = new CallableRecordDescription(id, csw); ListenableFuture<GetRecordByIdResponseDocument> responseFuture = executorService.submit(c); Futures.addCallback(responseFuture, new FutureCallback<GetRecordByIdResponseDocument>() { private final Logger logger = LoggerFactory.getLogger("Record Downloader"); @Override/*w w w . jav a 2s. c o m*/ public void onFailure(Throwable t) { logger.error("Error retrieving and parsing record {}", t); report.retrievalIssues.put(recordId, t); } @Override public void onSuccess(GetRecordByIdResponseDocument result) { logger.trace("SUCCESS with {}", result); recordDescriptions.put(recordId, result); report.added++; report.addedIds.add(recordId); } }); } executorService.shutdown(); while (!executorService.isTerminated()) { try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { log.error("Could not await termination", e); } } log.info("Done with requests and parsing, have {} GetRecordById documents.", recordDescriptions.size()); return recordDescriptions; }
From source file:edu.dirla.app.ws.rest.services.LogsRestService.java
@ResponseBody @RequestMapping(method = RequestMethod.POST) public DataTrafficResult addLogs(@RequestBody CheckTrafficRep checkTrafficRep) { List<LogData> results = null; long t1 = Calendar.getInstance().getTimeInMillis(); final List<String> filesToUpload = checkTrafficRep.getFilesToUpload(); ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(3)); for (final String fileName : filesToUpload) { Callable<Integer> job = new Callable<Integer>() { @Override// ww w . j a v a2 s .c o m public Integer call() throws Exception { List<LogData> lines = new ArrayList<LogData>(); try { lines.addAll(readFile(fileName)); } catch (IOException e) { e.printStackTrace(); } Map<String, Long> data = new HashMap<String, Long>(); for (LogData res : lines) { String key = res.getDomain(); long value = res.getSize(); Long oldValue = data.get(key); data.put(key, value + (oldValue != null ? oldValue : 0)); } logsService.pushLogs("accessLogs." + fileName, data); return 0; } }; // create the job here ListenableFuture<Integer> completion = executor.submit(job); Futures.addCallback(completion, new FutureCallback<Integer>() { @Override public void onFailure(Throwable t) { // log error } @Override public void onSuccess(Integer result) { // do something with the result } }); } executor.shutdown(); while (!executor.isTerminated()) { try { executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { e.printStackTrace(); } } UserDataTrafficResult userTrafficData = logsService.checkDataTraffic(checkTrafficRep.getClientAddress()); long t2 = Calendar.getInstance().getTimeInMillis(); DataTrafficResult dtr = new DataTrafficResult(); dtr.setCheckTrafficRequest(checkTrafficRep); dtr.setTrafficValue(userTrafficData); dtr.setTime(t2 - t1); return dtr; }
From source file:org.excalibur.driver.google.compute.GoogleCompute.java
@Override public Instances createInstances(InstanceTemplate template) { Instances instances = new Instances(); CountDownLatch latch = new CountDownLatch(template.getMaxCount()); ListeningExecutorService listeningExecutorService = MoreExecutors.listeningDecorator(this.executorService); List<DeployInstanceTask> tasks = Lists.newArrayList(); final Instance model = createInstanceFromTemplate(template); for (int i = 0; i < template.getMaxCount(); i++) { Instance instance = model.clone(); if (template.getMaxCount() > 1) { instance.setName(String.format("%s-%s", model.getName(), i + 1)); instance.getDisks().get(0).getInitializeParams().setDiskName(instance.getName()); instance.getDisks().get(0).setDeviceName(instance.getName()); }/*from w w w .ja v a 2 s. co m*/ tasks.add(new DeployInstanceTask(instance, latch)); } try { List<Future<VirtualMachine>> submittedTasks = listeningExecutorService.invokeAll(tasks); latch.await(5, TimeUnit.MINUTES); for (Future<VirtualMachine> f : submittedTasks) { instances.addInstance(f.get()); } } catch (InterruptedException e) { LOGGER.error(e.getMessage(), e); } catch (ExecutionException e) { LOGGER.error(e.getMessage(), e); } listeningExecutorService.shutdown(); LOGGER.debug("Waiting the instances' ready state...."); ThreadUtils.sleep(30 * 1000); LOGGER.debug("Created [{}] instance(s) of [{}/{}]", instances.size(), template.getMinCount(), template.getMaxCount()); return instances; }
From source file:com.google.caliper.runner.ExperimentingCaliperRun.java
@Override public void run() throws InvalidBenchmarkException { ImmutableSet<Experiment> allExperiments = selector.selectExperiments(); // TODO(lukes): move this standard-out handling into the ConsoleOutput class? stdout.println("Experiment selection: "); stdout.println(" Benchmark Methods: " + FluentIterable.from(allExperiments).transform(new Function<Experiment, String>() { @Override//from w ww. j a v a2 s. c o m public String apply(Experiment experiment) { return experiment.instrumentation().benchmarkMethod().getName(); } }).toSet()); stdout.println(" Instruments: " + FluentIterable.from(selector.instruments()).transform(new Function<Instrument, String>() { @Override public String apply(Instrument instrument) { return instrument.name(); } })); stdout.println(" User parameters: " + selector.userParameters()); stdout.println(" Virtual machines: " + FluentIterable.from(selector.vms()).transform(new Function<VirtualMachine, String>() { @Override public String apply(VirtualMachine vm) { return vm.name; } })); stdout.println(" Selection type: " + selector.selectionType()); stdout.println(); if (allExperiments.isEmpty()) { throw new InvalidBenchmarkException( "There were no experiments to be performed for the class %s using the instruments %s", benchmarkClass.benchmarkClass().getSimpleName(), instruments); } stdout.format("This selection yields %s experiments.%n", allExperiments.size()); stdout.flush(); // always dry run first. ImmutableSet<Experiment> experimentsToRun = dryRun(allExperiments); if (experimentsToRun.size() != allExperiments.size()) { stdout.format("%d experiments were skipped.%n", allExperiments.size() - experimentsToRun.size()); } if (experimentsToRun.isEmpty()) { throw new InvalidBenchmarkException("All experiments were skipped."); } if (options.dryRun()) { return; } stdout.flush(); int totalTrials = experimentsToRun.size() * options.trialsPerScenario(); Stopwatch stopwatch = Stopwatch.createStarted(); List<ScheduledTrial> trials = createScheduledTrials(experimentsToRun, totalTrials); final ListeningExecutorService executor = executorProvider.get(); List<ListenableFuture<TrialResult>> pendingTrials = scheduleTrials(trials, executor); ConsoleOutput output = new ConsoleOutput(stdout, totalTrials, stopwatch); try { // Process results as they complete. for (ListenableFuture<TrialResult> trialFuture : inCompletionOrder(pendingTrials)) { try { TrialResult result = trialFuture.get(); output.processTrial(result); for (ResultProcessor resultProcessor : resultProcessors) { resultProcessor.processTrial(result.getTrial()); } } catch (ExecutionException e) { if (e.getCause() instanceof TrialFailureException) { output.processFailedTrial((TrialFailureException) e.getCause()); } else { for (ListenableFuture<?> toCancel : pendingTrials) { toCancel.cancel(true); } throw Throwables.propagate(e.getCause()); } } catch (InterruptedException e) { // be responsive to interruption, cancel outstanding work and exit for (ListenableFuture<?> toCancel : pendingTrials) { // N.B. TrialRunLoop is responsive to interruption. toCancel.cancel(true); } throw new RuntimeException(e); } } } finally { executor.shutdown(); output.close(); } for (ResultProcessor resultProcessor : resultProcessors) { try { resultProcessor.close(); } catch (IOException e) { logger.log(WARNING, "Could not close a result processor: " + resultProcessor, e); } } }
From source file:com.github.rinde.rinsim.experiment.LocalComputer.java
@Override public ExperimentResults compute(Builder builder, Set<SimArgs> inputs) { final ImmutableList.Builder<ExperimentRunner> runnerBuilder = ImmutableList.builder(); for (final SimArgs args : inputs) { runnerBuilder.add(new ExperimentRunner(args)); }/*from ww w .ja v a 2 s . c om*/ final List<ExperimentRunner> runners = runnerBuilder.build(); final int threads = Math.min(builder.numThreads, runners.size()); final ListeningExecutorService executor; if (builder.showGui) { executor = MoreExecutors.newDirectExecutorService(); } else { executor = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(threads, new LocalThreadFactory())); } final List<SimulationResult> results = Collections.synchronizedList(new ArrayList<SimulationResult>()); final ResultCollector resultCollector = new ResultCollector(executor, results, builder.resultListeners); try { for (final ExperimentRunner r : runners) { checkForError(executor, resultCollector); final ListenableFuture<SimulationResult> f = executor.submit(r); Futures.addCallback(f, resultCollector); } while (results.size() < inputs.size() && !resultCollector.hasError()) { Thread.sleep(THREAD_SLEEP_TIME_MS); } checkForError(executor, resultCollector); } catch (final InterruptedException e) { LOGGER.trace("Interrupt, shutting down the executor."); executor.shutdownNow(); LOGGER.trace("Waiting for executor to shutdown."); try { final boolean executorStopped = executor.awaitTermination(MAX_WAIT_FOR_SHUTDOWN_S, TimeUnit.SECONDS); if (executorStopped) { LOGGER.trace("Executor is shutdown."); } else { LOGGER.warn("Executor did not stop, timed out after {} seconds.", MAX_WAIT_FOR_SHUTDOWN_S); } } catch (final InterruptedException e1) { LOGGER.warn("Waiting for executor to shutdown is interrupted."); } return ExperimentResults.create(builder, ImmutableSet.<SimulationResult>of()); } checkForError(executor, resultCollector); executor.shutdown(); final ExperimentResults er = ExperimentResults.create(builder, ImmutableSet.copyOf(results)); for (final ResultListener rl : builder.resultListeners) { rl.doneComputing(er); } return er; }
From source file:com.appdynamics.monitors.azure.statsCollector.AzureServiceBusStatsCollector.java
public Map<String, String> collectQueueStats(final Azure azure, final String namespaceName, Set<String> queueNames, Set<String> queueStats, int queueThreads) throws TaskExecutionException { final Map<String, String> valueMap = createValueMap(azure, namespaceName, QUEUES, queueStats); ListeningExecutorService queueService = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(queueThreads)); final Map<String, String> queueMetricMap = new HashMap<String, String>(); final CountDownLatch countDownLatch = new CountDownLatch(queueNames.size()); try {/* www .j a v a2s .c o m*/ for (final String queueName : queueNames) { valueMap.put("ResourceName", queueName); try { ListenableFuture<Map<String, String>> getQueueNames = queueService .submit(new Callable<Map<String, String>>() { public Map<String, String> call() throws IOException { return getStatsFromAzure(azure, namespaceName, valueMap, queueName, QUEUES); } }); Futures.addCallback(getQueueNames, new FutureCallback<Map<String, String>>() { public void onSuccess(Map<String, String> queueStats) { countDownLatch.countDown(); queueMetricMap.putAll(queueStats); } public void onFailure(Throwable thrown) { countDownLatch.countDown(); logger.error("Unable to get stats for queue [" + queueName + "] in namespace [" + namespaceName + "]", thrown); } }); } catch (Exception e) { logger.error("Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e); throw new TaskExecutionException( "Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e); } } } finally { queueService.shutdown(); } try { countDownLatch.await(); } catch (InterruptedException e) { logger.error("Unable to wait till getting the queue stats", e); } return queueMetricMap; }
From source file:com.appdynamics.monitors.azure.statsCollector.AzureServiceBusStatsCollector.java
public Map<String, String> collectTopicStats(final Azure azure, final String namespaceName, Set<String> topicNames, Set<String> topicStats, int topicThreads) throws TaskExecutionException { final Map<String, String> valueMap = createValueMap(azure, namespaceName, TOPICS, topicStats); ListeningExecutorService topicService = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(topicThreads)); final Map<String, String> topicMetricMap = new ConcurrentHashMap<String, String>(); final CountDownLatch countDownLatch = new CountDownLatch(topicNames.size()); try {/* w w w.j a v a 2s . c o m*/ for (final String topicName : topicNames) { valueMap.put("ResourceName", topicName); try { ListenableFuture<Map<String, String>> getQueueNames = topicService .submit(new Callable<Map<String, String>>() { public Map<String, String> call() throws IOException { return getStatsFromAzure(azure, namespaceName, valueMap, topicName, TOPICS); } }); Futures.addCallback(getQueueNames, new FutureCallback<Map<String, String>>() { public void onSuccess(Map<String, String> queueStats) { countDownLatch.countDown(); topicMetricMap.putAll(queueStats); } public void onFailure(Throwable thrown) { countDownLatch.countDown(); logger.error("Unable to get stats for topic [" + topicName + "] in namespace [" + namespaceName + "]", thrown); } }); } catch (Exception e) { logger.error("Error getting stats for topic [" + namespaceName + "/" + topicName + "]", e); throw new TaskExecutionException( "Error getting stats for topic [" + namespaceName + "/" + topicName + "]", e); } } } finally { topicService.shutdown(); } try { countDownLatch.await(); } catch (InterruptedException e) { logger.error("Unable to wait till getting the topic stats", e); } return topicMetricMap; }
From source file:org.apache.qpid.server.security.SiteSpecificTrustStoreImpl.java
private ListenableFuture<X509Certificate> downloadCertificate(final String url) { final ListeningExecutorService workerService = MoreExecutors.listeningDecorator( Executors.newSingleThreadExecutor(getThreadFactory("download-certificate-worker-" + getName()))); try {// w w w. j ava 2s . c om return workerService.submit(new Callable<X509Certificate>() { @Override public X509Certificate call() { try { final URL siteUrl = new URL(url); final int port = siteUrl.getPort() == -1 ? siteUrl.getDefaultPort() : siteUrl.getPort(); SSLContext sslContext = SSLUtil.tryGetSSLContext(); sslContext.init(new KeyManager[0], new TrustManager[] { new AlwaysTrustManager() }, null); try (SSLSocket socket = (SSLSocket) sslContext.getSocketFactory().createSocket()) { socket.setSoTimeout(_readTimeout); socket.connect(new InetSocketAddress(siteUrl.getHost(), port), _connectTimeout); socket.startHandshake(); final Certificate[] certificateChain = socket.getSession().getPeerCertificates(); if (certificateChain != null && certificateChain.length != 0 && certificateChain[0] instanceof X509Certificate) { final X509Certificate x509Certificate = (X509Certificate) certificateChain[0]; LOGGER.debug( "Successfully downloaded X509Certificate with DN {} certificate from {}", x509Certificate.getSubjectDN(), url); return x509Certificate; } else { throw new IllegalConfigurationException(String.format( "TLS handshake for '%s' from '%s' " + "did not provide a X509Certificate", getName(), url)); } } } catch (IOException | GeneralSecurityException e) { throw new IllegalConfigurationException( String.format("Unable to get certificate for '%s' from '%s'", getName(), url), e); } } }); } finally { workerService.shutdown(); } }