List of usage examples for java.util.concurrent Future isDone
boolean isDone();
From source file:com.jillesvangurp.httpclientfuture.HttpClientWithFutureTest.java
@Test(invocationCount = 3) public void shouldInvokeMultiple() throws ExecutionException, TimeoutException { long before = SimpleServlet.counter.get(); HttpGet req1 = new HttpGet(UrlBuilder.url("localhost", port).append("ping").queryParam("sleep", "10") .queryParam("req", "shouldInvokeMultiple_1").build()); HttpGet req2 = new HttpGet(UrlBuilder.url("localhost", port).append("ping").queryParam("sleep", "10") .queryParam("req", "shouldInvokeMultiple_2").build()); HttpGet req3 = new HttpGet(UrlBuilder.url("localhost", port).append("ping").queryParam("sleep", "3000") .queryParam("req", "shouldInvokeMultiple_3").build()); try {//from w w w . ja v a 2 s . c o m List<Future<Boolean>> futures = client.executeMultiple(null, 100, TimeUnit.MILLISECONDS, req1, req2, req3); int cancelled = 0; for (Future<Boolean> future : futures) { if (future.isCancelled()) { cancelled++; } else { boolean done = future.isDone(); if (done) { future.get(1, TimeUnit.MILLISECONDS); } } } assertThat(cancelled, is(1)); } catch (InterruptedException e) { e.printStackTrace(); } long after = SimpleServlet.counter.get(); assertThat(after, is(before + 2)); }
From source file:org.geowebcache.diskquota.CacheCleanerTask.java
private void innerRun() throws InterruptedException { // first, save the config to account for changes in used quotas final DiskQuotaConfig quotaConfig = monitor.getConfig(); if (!quotaConfig.isEnabled()) { log.trace("DiskQuota disabled, ignoring run..."); return;// w w w. j ava 2 s . c o m } quotaConfig.setLastCleanUpTime(new Date()); final Set<String> allLayerNames = monitor.getLayerNames(); final Set<String> configuredLayerNames = quotaConfig.layerNames(); final Set<String> globallyManagedLayerNames = new HashSet<String>(allLayerNames); globallyManagedLayerNames.removeAll(configuredLayerNames); for (String layerName : configuredLayerNames) { if (monitor.isCacheInfoBuilderRunning(layerName)) { if (log.isInfoEnabled()) { log.info("Cache information is still being gathered for layer '" + layerName + "'. Skipping quota enforcement task for this layer."); } continue; } Future<?> runningCleanup = perLayerRunningCleanUps.get(layerName); if (runningCleanup != null && !runningCleanup.isDone()) { if (log.isDebugEnabled()) { log.debug("Cache clean up task still running for layer '" + layerName + "'. Ignoring it for this run."); } continue; } final LayerQuota definedQuotaForLayer = quotaConfig.layerQuota(layerName); final ExpirationPolicy policy = definedQuotaForLayer.getExpirationPolicyName(); final Quota quota = definedQuotaForLayer.getQuota(); final Quota usedQuota = monitor.getUsedQuotaByLayerName(layerName); Quota excedent = usedQuota.difference(quota); if (excedent.getBytes().compareTo(BigInteger.ZERO) > 0) { if (log.isInfoEnabled()) { log.info("Layer '" + layerName + "' exceeds its quota of " + quota.toNiceString() + " by " + excedent.toNiceString() + ". Currently used: " + usedQuota.toNiceString() + ". Clean up task will be performed using expiration policy " + policy); } Set<String> layerNames = Collections.singleton(layerName); QuotaResolver quotaResolver; quotaResolver = monitor.newLayerQuotaResolver(layerName); LayerQuotaEnforcementTask task; task = new LayerQuotaEnforcementTask(layerNames, quotaResolver, monitor); Future<Object> future = this.cleanUpExecutorService.submit(task); perLayerRunningCleanUps.put(layerName, future); } } if (globallyManagedLayerNames.size() > 0) { ExpirationPolicy globalExpirationPolicy = quotaConfig.getGlobalExpirationPolicyName(); if (globalExpirationPolicy == null) { return; } final Quota globalQuota = quotaConfig.getGlobalQuota(); if (globalQuota == null) { log.info("There's not a global disk quota configured. The following layers " + "will not be checked for excess of disk usage: " + globallyManagedLayerNames); return; } if (globalCleanUpTask != null && !globalCleanUpTask.isDone()) { log.debug("Global cache quota enforcement task still running, avoiding issueing a new one..."); return; } Quota globalUsedQuota = monitor.getGloballyUsedQuota(); Quota excedent = globalUsedQuota.difference(globalQuota); if (excedent.getBytes().compareTo(BigInteger.ZERO) > 0) { log.debug("Submitting global cache quota enforcement task"); LayerQuotaEnforcementTask task; QuotaResolver quotaResolver = monitor.newGlobalQuotaResolver(); task = new LayerQuotaEnforcementTask(globallyManagedLayerNames, quotaResolver, monitor); this.globalCleanUpTask = this.cleanUpExecutorService.submit(task); } else { if (log.isTraceEnabled()) { log.trace("Won't launch global quota enforcement task, " + globalUsedQuota.toNiceString() + " used out of " + globalQuota.toNiceString() + " configured for the whole cache size."); } } } }
From source file:br.unb.cic.bionimbuz.plugin.AbstractPlugin.java
private void checkFinishedTasks() { Future<PluginTask> futureTask; PluginTask task;/* ww w . j av a 2 s . c om*/ for (final Pair<PluginTask, Future<PluginTask>> pair : this.executingTasks.values()) { futureTask = pair.second; if (!futureTask.isDone()) { continue; } try { task = futureTask.get(); } catch (InterruptedException | ExecutionException e) { task = pair.first; continue; } this.executingTasks.remove(task.getId()); if (task.getJobInfo().getOutputs().size() > 0) { int count = 0; for (final String output : task.getJobInfo().getOutputs()) { final File file = new File(output); final FileInfo info = new FileInfo(); info.setName(output); info.setSize(file.length()); count++; } this.endingTasks.put(task.getId(), new Pair<>(task, count)); } } }
From source file:com.mozilla.bagheera.consumer.KafkaConsumer.java
@Override public void poll() { final CountDownLatch latch = new CountDownLatch(streams.size()); for (final KafkaStream<byte[], byte[]> stream : streams) { workers.add(executor.submit(new Callable<Void>() { @Override//w w w . java2s . c o m public Void call() { try { for (MessageAndMetadata<byte[], byte[]> mam : stream) { BagheeraMessage bmsg = BagheeraMessage.parseFrom(mam.message()); // get the sink for this message's namespace // (typically only one sink unless a regex pattern was used to listen to multiple topics) KeyValueSink sink = sinkFactory.getSink(bmsg.getNamespace()); if (sink == null) { LOG.error("Could not obtain sink for namespace: " + bmsg.getNamespace()); break; } if (bmsg.getOperation() == Operation.CREATE_UPDATE && bmsg.hasId() && bmsg.hasPayload()) { if (validationPipeline == null || validationPipeline.isValid(bmsg.getPayload().toByteArray())) { if (bmsg.hasTimestamp()) { sink.store(bmsg.getId(), bmsg.getPayload().toByteArray(), bmsg.getTimestamp()); } else { sink.store(bmsg.getId(), bmsg.getPayload().toByteArray()); } } else { invalidMessageMeter.mark(); // TODO: sample out an example payload LOG.warn("Invalid payload for namespace: " + bmsg.getNamespace()); } } else if (bmsg.getOperation() == Operation.DELETE && bmsg.hasId()) { sink.delete(bmsg.getId()); } consumed.mark(); } } catch (InvalidProtocolBufferException e) { LOG.error("Invalid protocol buffer in data stream", e); } catch (UnsupportedEncodingException e) { LOG.error("Message ID was not in UTF-8 encoding", e); } catch (IOException e) { LOG.error("IO error while storing to data sink", e); } finally { latch.countDown(); } return null; } })); } // Wait for all tasks to complete which in the normal case they will // run indefinitely unless we detect that a thread exited try { while (true) { latch.await(10, TimeUnit.SECONDS); if (latch.getCount() != streams.size()) { // we have a dead thread and should exit break; } } } catch (InterruptedException e) { LOG.info("Interrupted during polling", e); } // Spit out errors if there were any for (Future<Void> worker : workers) { try { if (worker.isDone() && !worker.isCancelled()) { worker.get(1, TimeUnit.SECONDS); } } catch (InterruptedException e) { LOG.error("Thread was interrupted:", e); } catch (ExecutionException e) { LOG.error("Exception occured in thread:", e); } catch (TimeoutException e) { LOG.error("Timed out waiting for thread result:", e); } catch (CancellationException e) { LOG.error("Thread has been canceled: ", e); } } }
From source file:org.omnaest.utils.operation.foreach.ForEach.java
/** * @see #map(Operation)//from w w w . ja v a 2s . c o m * @param operation * @param executorService * @return * @throws ExecutionException */ public <R> IterationResult<R> map(final Operation<R, E> operation, ExecutorService executorService) throws ExecutionException { IterationResult<R> retval = null; List<Callable<R>> callableList = new ArrayList<Callable<R>>(); for (final E element : this.iterable) { callableList.add(new Callable<R>() { @Override public R call() throws Exception { return operation.execute(element); } }); } List<R> resultList = new ArrayList<R>(); { try { List<Future<R>> futureList = executorService.invokeAll(callableList); for (Future<R> future : futureList) { do { try { resultList.add(future.get()); } catch (InterruptedException e) { } } while (!future.isDone()); } } catch (InterruptedException e) { } } retval = new IterationResult<R>(resultList); return retval; }
From source file:eu.cloud4soa.tests.TestReqSec_Deploy.java
public void deploy() { Collection<CallableNode> children = new ArrayList<CallableNode>(); for (String applicationInstanceName : applicationInstances.keySet()) { String applicationInstanceUriId = applicationInstances.get(applicationInstanceName); //Creazione Thread... CallableNode callableNode = new CallableNode(BASE_URI, applicationInstanceUriId, getPaaSInstanceUriId(selectedPaaS), getPublicKey(), getSecretKey()); children.add(callableNode);//w w w . j a v a 2s.c o m } ExecutorService executor = Executors.newFixedThreadPool(numberTests); try { List<Future<Boolean>> invokeAll = executor.invokeAll(children); for (Future<Boolean> future : invokeAll) { while (!future.isDone()) ; Boolean get = future.get(); } System.out.print("All " + numberTests + " deploy requests are completed!"); executor.shutdownNow(); } catch (InterruptedException ex) { Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex); } catch (ExecutionException ex) { Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex); } // //Applications // try { // deployApplication(applicationInstanceUriId); // } catch (FileNotFoundException ex) { // Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex); // } }
From source file:com.imaginary.home.controller.Command.java
private @Nonnull boolean[] waitFor(@Nonnull ArrayList<Future<Boolean>> threads) throws ControllerException, CommunicationException { boolean[] results = new boolean[threads.size()]; long t = System.currentTimeMillis() + timeout; boolean done = true; int i = 0;/*from w ww.j a v a 2 s .co m*/ for (Future<Boolean> f : threads) { done = false; while (t > System.currentTimeMillis()) { try { if (f.isDone()) { results[i++] = f.get(); done = true; break; } try { Thread.sleep(500L); } catch (InterruptedException ignore) { } } catch (ExecutionException e) { if (e.getCause() instanceof ControllerException) { throw (ControllerException) e.getCause(); } else if (e.getCause() instanceof CommunicationException) { throw (CommunicationException) e.getCause(); } throw new ControllerException(e); } catch (InterruptedException e) { throw new ControllerException(e); } } if (!done && !f.isDone()) { f.cancel(true); } } if (!done) { throw new ControllerException("Action timed out"); } return results; }
From source file:org.opencb.cellbase.client.rest.ParentRestClient.java
protected <U> QueryResponse<U> execute(List<String> idList, String resource, QueryOptions options, Class<U> clazz, boolean post) throws IOException { if (idList == null || idList.isEmpty()) { return new QueryResponse<>(); }/*from ww w.ja v a2s. c o m*/ // If the list contain less than REST_CALL_BATCH_SIZE variants then we can make a normal REST call. if (idList.size() <= REST_CALL_BATCH_SIZE) { return fetchData(idList, resource, options, clazz, post); } // But if there are more than REST_CALL_BATCH_SIZE variants then we launch several threads to increase performance. int numThreads = (options != null) ? options.getInt("numThreads", DEFAULT_NUM_THREADS) : DEFAULT_NUM_THREADS; ExecutorService executorService = Executors.newFixedThreadPool(numThreads); List<Future<QueryResponse<U>>> futureList = new ArrayList<>((idList.size() / REST_CALL_BATCH_SIZE) + 1); for (int i = 0; i < idList.size(); i += REST_CALL_BATCH_SIZE) { final int from = i; final int to = (from + REST_CALL_BATCH_SIZE > idList.size()) ? idList.size() : from + REST_CALL_BATCH_SIZE; futureList.add(executorService .submit(() -> fetchData(idList.subList(from, to), resource, options, clazz, post))); } List<QueryResult<U>> queryResults = new ArrayList<>(idList.size()); for (Future<QueryResponse<U>> responseFuture : futureList) { try { while (!responseFuture.isDone()) { Thread.sleep(5); } queryResults.addAll(responseFuture.get().getResponse()); } catch (InterruptedException | ExecutionException e) { e.printStackTrace(); } } QueryResponse<U> finalResponse = new QueryResponse<>(); finalResponse.setResponse(queryResults); executorService.shutdown(); return finalResponse; }
From source file:org.codice.pubsub.server.SubscriptionServer.java
private void processSubscriptions() { while (!Thread.currentThread().isInterrupted()) { //Fetch Subscriptions Dictionary subMap = getSubscriptionMap(); if (subMap != null) { Enumeration e = subMap.keys(); while (e.hasMoreElements()) { String subscriptionId = (String) e.nextElement(); if (!subscriptionId.equals("service.pid")) { String subscriptionMsg = (String) subMap.get(subscriptionId); int status = checkProcessingStatus(subscriptionId); if (status == PROCESS_STATUS_COMPLETE) { Future<QueryControlInfo> future = processMap.get(subscriptionId); if (future != null) { boolean done = future.isDone(); if (done) { try { QueryControlInfo ctrlInfo = future.get(); processMap.remove(subscriptionId); runQuery(subscriptionMsg, ctrlInfo.getQueryEndDateTime()); } catch (InterruptedException ie) { LOGGER.error(ie.getMessage()); } catch (ExecutionException ee) { LOGGER.error(ee.getMessage()); }/*from w ww. j a v a2 s . c o m*/ } } } else if (status == PROCESS_STATUS_NOT_EXIST) { runQuery(subscriptionMsg, new DateTime().minusSeconds(1)); } else if (status == PROCESS_STATUS_NOT_EXIST) { //Do Nothing For Now } } } } else { try { Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } } } }
From source file:org.apache.hive.hcatalog.templeton.JobRequestExecutor.java
private void cancelExecutePoolThread(Future<T> future) { int retryCount = 0; while (retryCount < this.maxTaskCancelRetryCount && !future.isDone()) { LOG.info("Task is still executing the job request. Cancelling it with retry count: " + retryCount); if (future.cancel(true)) { /*//from ww w .j a va2 s . co m * Cancelled the job request and return to client. */ LOG.info("Cancel job request issued successfully."); return; } retryCount++; try { Thread.sleep(this.maxTaskCancelRetryWaitTimeInMs); } catch (InterruptedException e) { /* * Nothing to do. Just retry. */ } } LOG.warn("Failed to cancel the job. isCancelled: " + future.isCancelled() + " Retry count: " + retryCount); }