List of usage examples for com.google.common.util.concurrent ListenableFuture get
V get() throws InterruptedException, ExecutionException;
From source file:org.voltdb.SnapshotSiteProcessor.java
public Future<?> doSnapshotWork(ExecutionEngine ee, boolean ignoreQuietPeriod) { ListenableFuture<?> retval = null; /*/*from w w w .j av a2s . co m*/ * This thread will null out the reference to m_snapshotTableTasks when * a snapshot is finished. If the snapshot buffer is loaned out that means * it is pending I/O somewhere so there is no work to do until it comes back. */ if (m_snapshotTableTasks == null || m_availableSnapshotBuffers.isEmpty() || (!ignoreQuietPeriod && inQuietPeriod())) { return retval; } /* * There definitely is snapshot work to do. There should be a task * here. If there isn't something is wrong because when the last task * is polled cleanup and nulling should occur. */ while (!m_snapshotTableTasks.isEmpty()) { final SnapshotTableTask currentTask = m_snapshotTableTasks.peek(); assert (currentTask != null); final int headerSize = currentTask.m_target.getHeaderSize(); final BBContainer snapshotBuffer = m_availableSnapshotBuffers.poll(); assert (snapshotBuffer != null); snapshotBuffer.b.clear(); snapshotBuffer.b.position(headerSize); final int serialized = ee.tableStreamSerializeMore(snapshotBuffer, currentTask.m_tableId, TableStreamType.SNAPSHOT); if (serialized < 0) { VoltDB.crashLocalVoltDB("Failure while serialize data from a table for COW snapshot", false, null); } /** * The EE will return 0 when there is no more data left to pull from that table. * The enclosing loop ensures that the next table is then addressed. */ if (serialized == 0) { final SnapshotTableTask t = m_snapshotTableTasks.poll(); /** * Replicated tables are assigned to a single ES on each site and that ES * is responsible for closing the data target. Done in a separate * thread so the EE can continue working. */ if (t.m_isReplicated && t.m_target.getFormat().isTableBased()) { final Thread terminatorThread = new Thread("Replicated SnapshotDataTarget terminator ") { @Override public void run() { try { t.m_target.close(); } catch (IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } } }; m_snapshotTargetTerminators.add(terminatorThread); terminatorThread.start(); } m_availableSnapshotBuffers.offer(snapshotBuffer); continue; } /** * The block from the EE will contain raw tuple data with no length prefix etc. */ snapshotBuffer.b.limit(headerSize + serialized); snapshotBuffer.b.position(0); Callable<BBContainer> valueForTarget = Callables.returning(snapshotBuffer); for (SnapshotDataFilter filter : currentTask.m_filters) { valueForTarget = filter.filter(valueForTarget); } retval = currentTask.m_target.write(valueForTarget, currentTask); if (retval != null) { final ListenableFuture<?> retvalFinal = retval; retvalFinal.addListener(new Runnable() { @Override public void run() { try { retvalFinal.get(); } catch (Throwable t) { if (m_lastSnapshotSucceded) { hostLog.error("Error while attempting to write snapshot data to file " + currentTask.m_target, t); m_lastSnapshotSucceded = false; } } } }, MoreExecutors.sameThreadExecutor()); } quietPeriodSet(ignoreQuietPeriod); break; } /** * If there are no more tasks then this particular EE is finished doing snapshot work * Check the AtomicInteger to find out if this is the last one. */ if (m_snapshotTableTasks.isEmpty()) { final ArrayList<SnapshotDataTarget> snapshotTargets = m_snapshotTargets; m_snapshotTargets = null; m_snapshotTableTasks = null; final int result = ExecutionSitesCurrentlySnapshotting.decrementAndGet(); /** * If this is the last one then this EE must close all the SnapshotDataTargets. * Done in a separate thread so the EE can go and do other work. It will * sync every file descriptor and that may block for a while. */ if (result == 0) { final long txnId = m_lastSnapshotTxnId; final int numHosts = m_lastSnapshotNumHosts; final Thread terminatorThread = new Thread("Snapshot terminator") { @Override public void run() { try { /* * Be absolutely sure the snapshot is finished * and synced to disk before another is started */ for (Thread t : m_snapshotTargetTerminators) { if (t == this) { continue; } try { t.join(); } catch (InterruptedException e) { return; } } for (final SnapshotDataTarget t : snapshotTargets) { try { t.close(); } catch (IOException e) { m_lastSnapshotSucceded = false; throw new RuntimeException(e); } catch (InterruptedException e) { m_lastSnapshotSucceded = false; throw new RuntimeException(e); } } Runnable r = null; while ((r = m_tasksOnSnapshotCompletion.poll()) != null) { try { r.run(); } catch (Exception e) { hostLog.error("Error running snapshot completion task", e); } } } finally { try { logSnapshotCompleteToZK(txnId, numHosts, m_lastSnapshotSucceded); } finally { /** * Set it to -1 indicating the system is ready to perform another snapshot. * Changed to wait until all the previous snapshot work has finished so * that snapshot initiation doesn't wait on the file system */ ExecutionSitesCurrentlySnapshotting.decrementAndGet(); } } } }; m_snapshotTargetTerminators.add(terminatorThread); terminatorThread.start(); } } return retval; }
From source file:org.jclouds.aws.ec2.compute.suppliers.AWSEC2ImageSupplier.java
@SuppressWarnings("unchecked") @Override/* ww w .ja v a 2 s . co m*/ public Set<? extends Image> get() { String amiQuery = queries.get(PROPERTY_EC2_AMI_QUERY); String ccAmiQuery = queries.get(PROPERTY_EC2_CC_AMI_QUERY); ListenableFuture<Iterable<Image>> normalImages = images(regions.get(), amiQuery, PROPERTY_EC2_AMI_QUERY); ImmutableSet<Image> clusterImages; try { clusterImages = ImmutableSet .copyOf(images(clusterRegions, ccAmiQuery, PROPERTY_EC2_CC_AMI_QUERY).get()); } catch (Exception e) { logger.warn(e, "Error parsing images in query %s", ccAmiQuery); throw Throwables.propagate(e); } Iterables.addAll(clusterComputeIds, transform(clusterImages, new Function<Image, String>() { @Override public String apply(Image arg0) { return arg0.getId(); } })); Iterable<? extends Image> parsedImages; try { parsedImages = ImmutableSet.copyOf(concat(clusterImages, normalImages.get())); } catch (Exception e) { logger.warn(e, "Error parsing images in query %s", amiQuery); throw Throwables.propagate(e); } final Map<RegionAndName, ? extends Image> imageMap = ImagesToRegionAndIdMap.imagesToMap(parsedImages); cache.get().invalidateAll(); cache.get().asMap().putAll(Map.class.cast(imageMap)); logger.debug("<< images(%d)", imageMap.size()); // TODO Used to be mutable; was this assumed anywhere? return new ForwardingSet<Image>() { protected Set<Image> delegate() { return ImmutableSet.copyOf(cache.get().asMap().values()); } }; }
From source file:net.rauros.jzwave.ZWaveManager.java
protected void prepareJobAndWait(Message message) { ListenableFuture<Boolean> future = prepareJob(message, true, false); try {// w w w .ja v a 2s . c om future.get(); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } }
From source file:com.android.camera.processing.imagebackend.TaskCompressImageToJpeg.java
@Override public void run() { ImageToProcess img = mImage;/*from w ww. j ava 2 s . co m*/ mSession.getCollector().markProcessingTimeStart(); final Rect safeCrop; // For JPEG, it is the capture devices responsibility to get proper // orientation. TaskImage inputImage, resultImage; byte[] writeOut; int numBytes; ByteBuffer compressedData; ExifInterface exifData = null; Resource<ByteBuffer> byteBufferResource = null; switch (img.proxy.getFormat()) { case ImageFormat.JPEG: try { // In the cases, we will request a zero-oriented JPEG from // the HAL; the HAL may deliver its orientation in the JPEG // encoding __OR__ EXIF -- we don't know. We need to read // the EXIF setting from byte payload and the EXIF reader // doesn't work on direct buffers. So, we make a local // copy in a non-direct buffer. ByteBuffer origBuffer = img.proxy.getPlanes().get(0).getBuffer(); compressedData = ByteBuffer.allocate(origBuffer.limit()); // On memory allocation failure, fail gracefully. if (compressedData == null) { // TODO: Put memory allocation failure code here. mSession.finishWithFailure(-1, true); return; } origBuffer.rewind(); compressedData.put(origBuffer); origBuffer.rewind(); compressedData.rewind(); // For JPEG, always use the EXIF orientation as ground // truth on orientation, width and height. Integer exifOrientation = null; Integer exifPixelXDimension = null; Integer exifPixelYDimension = null; if (compressedData.array() != null) { exifData = Exif.getExif(compressedData.array()); Map<Integer, Integer> minimalExifTags = exifGetMinimalTags(exifData); exifOrientation = minimalExifTags.get(ExifInterface.TAG_ORIENTATION); exifPixelXDimension = minimalExifTags.get(ExifInterface.TAG_PIXEL_X_DIMENSION); exifPixelYDimension = minimalExifTags.get(ExifInterface.TAG_PIXEL_Y_DIMENSION); } final DeviceOrientation exifDerivedRotation; if (exifOrientation == null) { // No existing rotation value is assumed to be 0 // rotation. exifDerivedRotation = DeviceOrientation.CLOCKWISE_0; } else { exifDerivedRotation = DeviceOrientation.from(exifOrientation); } final int imageWidth; final int imageHeight; // Crop coordinate space is in original sensor coordinates. We need // to calculate the proper rotation of the crop to be applied to the // final JPEG artifact. final DeviceOrientation combinedRotationFromSensorToJpeg = addOrientation(img.rotation, exifDerivedRotation); if (exifPixelXDimension == null || exifPixelYDimension == null) { Log.w(TAG, "Cannot parse EXIF for image dimensions, passing 0x0 dimensions"); imageHeight = 0; imageWidth = 0; // calculate crop from exif info with image proxy width/height safeCrop = guaranteedSafeCrop(img.proxy, rotateBoundingBox(img.crop, combinedRotationFromSensorToJpeg)); } else { imageWidth = exifPixelXDimension; imageHeight = exifPixelYDimension; // calculate crop from exif info with combined rotation safeCrop = guaranteedSafeCrop(imageWidth, imageHeight, rotateBoundingBox(img.crop, combinedRotationFromSensorToJpeg)); } // Ignore the device rotation on ImageToProcess and use the EXIF from // byte[] payload inputImage = new TaskImage(exifDerivedRotation, imageWidth, imageHeight, img.proxy.getFormat(), safeCrop); if (requiresCropOperation(img.proxy, safeCrop)) { // Crop the image resultImage = new TaskImage(exifDerivedRotation, safeCrop.width(), safeCrop.height(), img.proxy.getFormat(), null); byte[] croppedResult = decompressCropAndRecompressJpegData(compressedData.array(), safeCrop, getJpegCompressionQuality()); compressedData = ByteBuffer.allocate(croppedResult.length); compressedData.put(ByteBuffer.wrap(croppedResult)); compressedData.rewind(); } else { // Pass-though the JPEG data resultImage = inputImage; } } finally { // Release the image now that you have a usable copy in // local memory // Or you failed to process mImageTaskManager.releaseSemaphoreReference(img, mExecutor); } onStart(mId, inputImage, resultImage, TaskInfo.Destination.FINAL_IMAGE); numBytes = compressedData.limit(); break; case ImageFormat.YUV_420_888: safeCrop = guaranteedSafeCrop(img.proxy, img.crop); try { inputImage = new TaskImage(img.rotation, img.proxy.getWidth(), img.proxy.getHeight(), img.proxy.getFormat(), safeCrop); Size resultSize = getImageSizeForOrientation(img.crop.width(), img.crop.height(), img.rotation); // Resulting image will be rotated so that viewers won't // have to rotate. That's why the resulting image will have 0 // rotation. resultImage = new TaskImage(DeviceOrientation.CLOCKWISE_0, resultSize.getWidth(), resultSize.getHeight(), ImageFormat.JPEG, null); // Image rotation is already encoded into the bytes. onStart(mId, inputImage, resultImage, TaskInfo.Destination.FINAL_IMAGE); // WARNING: // This reduces the size of the buffer that is created // to hold the final jpg. It is reduced by the "Minimum expected // jpg compression factor" to reduce memory allocation consumption. // If the final jpg is more than this size the image will be // corrupted. The maximum size of an image is width * height * // number_of_channels. We artificially reduce this number based on // what we expect the compression ratio to be to reduce the // amount of memory we are required to allocate. int maxPossibleJpgSize = 3 * resultImage.width * resultImage.height; int jpgBufferSize = maxPossibleJpgSize / MINIMUM_EXPECTED_JPG_COMPRESSION_FACTOR; byteBufferResource = mByteBufferDirectPool.acquire(jpgBufferSize); compressedData = byteBufferResource.get(); // On memory allocation failure, fail gracefully. if (compressedData == null) { // TODO: Put memory allocation failure code here. mSession.finishWithFailure(-1, true); byteBufferResource.close(); return; } // Do the actual compression here. numBytes = compressJpegFromYUV420Image(img.proxy, compressedData, getJpegCompressionQuality(), img.crop, inputImage.orientation.getDegrees()); // If the compression overflows the size of the buffer, the // actual number of bytes will be returned. if (numBytes > jpgBufferSize) { byteBufferResource.close(); mByteBufferDirectPool.acquire(maxPossibleJpgSize); compressedData = byteBufferResource.get(); // On memory allocation failure, fail gracefully. if (compressedData == null) { // TODO: Put memory allocation failure code here. mSession.finishWithFailure(-1, true); byteBufferResource.close(); return; } numBytes = compressJpegFromYUV420Image(img.proxy, compressedData, getJpegCompressionQuality(), img.crop, inputImage.orientation.getDegrees()); } if (numBytes < 0) { byteBufferResource.close(); throw new RuntimeException("Error compressing jpeg."); } compressedData.limit(numBytes); } finally { // Release the image now that you have a usable copy in local memory // Or you failed to process mImageTaskManager.releaseSemaphoreReference(img, mExecutor); } break; default: mImageTaskManager.releaseSemaphoreReference(img, mExecutor); throw new IllegalArgumentException("Unsupported input image format for TaskCompressImageToJpeg"); } writeOut = new byte[numBytes]; compressedData.get(writeOut); compressedData.rewind(); if (byteBufferResource != null) { byteBufferResource.close(); } onJpegEncodeDone(mId, inputImage, resultImage, writeOut, TaskInfo.Destination.FINAL_IMAGE); // In rare cases, TaskCompressImageToJpeg might complete before // TaskConvertImageToRGBPreview. However, session should take care // of out-of-order completion. // EXIF tags are rewritten so that output from this task is normalized. final TaskImage finalInput = inputImage; final TaskImage finalResult = resultImage; final ExifInterface exif = createExif(Optional.fromNullable(exifData), resultImage, img.metadata); mSession.getCollector().decorateAtTimeWriteToDisk(exif); ListenableFuture<Optional<Uri>> futureUri = mSession.saveAndFinish(writeOut, resultImage.width, resultImage.height, resultImage.orientation.getDegrees(), exif); Futures.addCallback(futureUri, new FutureCallback<Optional<Uri>>() { @Override public void onSuccess(Optional<Uri> uriOptional) { if (uriOptional.isPresent()) { onUriResolved(mId, finalInput, finalResult, uriOptional.get(), TaskInfo.Destination.FINAL_IMAGE); } } @Override public void onFailure(Throwable throwable) { } }); final ListenableFuture<TotalCaptureResultProxy> requestMetadata = img.metadata; // If TotalCaptureResults are available add them to the capture event. // Otherwise, do NOT wait for them, since we'd be stalling the ImageBackend if (requestMetadata.isDone()) { try { mSession.getCollector().decorateAtTimeOfCaptureRequestAvailable(requestMetadata.get()); } catch (InterruptedException e) { Log.e(TAG, "CaptureResults not added to photoCaptureDoneEvent event due to Interrupted Exception."); } catch (ExecutionException e) { Log.w(TAG, "CaptureResults not added to photoCaptureDoneEvent event due to Execution Exception."); } finally { mSession.getCollector().photoCaptureDoneEvent(); } } else { Log.w(TAG, "CaptureResults unavailable to photoCaptureDoneEvent event."); mSession.getCollector().photoCaptureDoneEvent(); } }
From source file:org.opendaylight.protocol.bgp.openconfig.impl.moduleconfig.BGPPeerProvider.java
public void onNeighborModified(final Neighbor modifiedNeighbor) { final ModuleKey moduleKey = neighborState.getModuleKey(modifiedNeighbor.getKey()); final ReadOnlyTransaction rTx = dataBroker.newReadOnlyTransaction(); final ListenableFuture<List<AdvertizedTable>> advertizedTablesFuture = new TableTypesFunction<AdvertizedTable>( rTx, configModuleOp, ADVERTIZED_TABLE_FUNCTION).apply(modifiedNeighbor.getAfiSafis().getAfiSafi()); if (moduleKey != null) { //update an existing peer configuration try {/*from w w w.ja v a 2 s. c o m*/ if (neighborState.addOrUpdate(moduleKey, modifiedNeighbor.getKey(), modifiedNeighbor)) { final Optional<Module> maybeModule = configModuleOp.readModuleConfiguration(moduleKey, rTx) .get(); if (maybeModule.isPresent()) { final Module peerConfigModule = toPeerConfigModule(modifiedNeighbor, maybeModule.get(), advertizedTablesFuture.get()); configModuleOp.putModuleConfiguration(peerConfigModule, dataBroker.newWriteOnlyTransaction()); } } } catch (final Exception e) { LOG.error("Failed to update a configuration module: {}", moduleKey, e); throw new IllegalStateException(e); } } else { //create new peer configuration final ModuleKey ribImplKey = globalState.getModuleKey(GlobalIdentifier.GLOBAL_IDENTIFIER); if (ribImplKey != null) { try { final ListenableFuture<Rib> ribFuture = new RibInstanceFunction<>(rTx, configModuleOp, TO_RIB_FUNCTION).apply(ribImplKey.getName()); final Module peerConfigModule = toPeerConfigModule(modifiedNeighbor, advertizedTablesFuture.get(), ribFuture.get()); configModuleOp.putModuleConfiguration(peerConfigModule, dataBroker.newWriteOnlyTransaction()); neighborState.addOrUpdate(peerConfigModule.getKey(), modifiedNeighbor.getKey(), modifiedNeighbor); } catch (final Exception e) { LOG.error("Failed to create a configuration module: {}", moduleKey, e); throw new IllegalStateException(e); } } } }
From source file:com.navercorp.redis.cluster.connection.RedisConnectionAsync.java
private byte[] getResponse() { ListenableFuture<byte[]> f = pipelinedFutures.poll(); try {//from ww w . j av a 2 s . co m return f.get(); } catch (InterruptedException e) { throw new JedisConnectionException(e); } catch (ExecutionException e) { throw new JedisConnectionException(e); } }
From source file:org.opendaylight.atrium.hostservice.impl.HostMonitor.java
public void packetReceived(ConnectorAddress addrs, InstanceIdentifier<?> ii) { InstanceIdentifier<NodeConnector> iinc = ii.firstIdentifierOf(NodeConnector.class); InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node> iin// = ii.firstIdentifierOf(// w ww . j a v a 2s . c o m org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class); ListenableFuture<Optional<NodeConnector>> futureNodeConnector; ListenableFuture<Optional<org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node>> futureNode; try (ReadOnlyTransaction readTx = dataService.newReadOnlyTransaction()) { futureNodeConnector = readTx.read(LogicalDatastoreType.OPERATIONAL, iinc); futureNode = readTx.read(LogicalDatastoreType.OPERATIONAL, iin); readTx.close(); } Optional<NodeConnector> opNodeConnector = null; Optional<org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node> opNode = null; try { opNodeConnector = futureNodeConnector.get(); opNode = futureNode.get(); } catch (ExecutionException | InterruptedException ex) { LOG.warn(ex.getLocalizedMessage()); } if (opNode != null && opNode.isPresent() && opNodeConnector != null && opNodeConnector.isPresent()) { processHost(opNode.get(), opNodeConnector.get(), addrs); } }
From source file:org.opendaylight.nemo.user.tenantmanager.TenantManage.java
/** * * @return null if an error was encountered, or an empty map if there was no * error but no data was retrieved. *///ww w.j a v a 2s.c o m public Map<UserId, User> getUsers() { InstanceIdentifier<Users> usersInsId = InstanceIdentifier.builder(Users.class).build(); ListenableFuture<Optional<Users>> usersFuture = dataBroker.newReadOnlyTransaction() .read(LogicalDatastoreType.CONFIGURATION, usersInsId); final Optional<Users> usersOpt; try { // TODO: consider time out here? usersOpt = usersFuture.get(); } catch (InterruptedException e) { LOG.error("Cannot read user information.", e); return null; } catch (ExecutionException e) { LOG.error("Cannot read user information.", e); return null; } // TODO: change to Java 8 lambda expressions return usersOpt.transform(new Function<Users, Map<UserId, User>>() { @Override public Map<UserId, User> apply(Users input) { return Maps.uniqueIndex(input.getUser(), new Function<User, UserId>() { @Override public UserId apply(User user) { return user.getUserId(); } }); } }).or(new HashMap<UserId, User>()); }
From source file:android.support.test.espresso.web.action.AtomAction.java
@Override public void perform(UiController controller, View view) { WebView webView = (WebView) view;//from w ww . ja v a2 s.c om List<? extends Object> arguments = checkNotNull(atom.getArguments(element)); String script = checkNotNull(atom.getScript()); final ListenableFuture<Evaluation> localEval = JavascriptEvaluation.evaluate(webView, script, arguments, window); if (null != window && Build.VERSION.SDK_INT == 19) { Log.w(TAG, "WARNING: KitKat does not report when an iframe is loading new content. " + "If you are interacting with content within an iframe and that content is changing (" + "eg: you have just pressed a submit button). Espresso will not be able to block you " + "until the new content has loaded (which it can do on all other API levels). You will " + "need to have some custom polling / synchronization with the iframe in that case."); } localEval.addListener(new Runnable() { @Override public void run() { try { futureEval.set(localEval.get()); } catch (ExecutionException ee) { futureEval.setException(ee.getCause()); } catch (InterruptedException ie) { futureEval.setException(ie); } } }, MoreExecutors.sameThreadExecutor()); }
From source file:com.facebook.buck.distributed.build_client.BuildPhase.java
private BuildJob fetchBuildInformationFromServerAndPublishPendingEvents(BuildJob job, ListeningExecutorService networkExecutorService) throws InterruptedException { StampedeId stampedeId = job.getStampedeId(); try {/*from w ww.j av a2s . c o m*/ job = distBuildService.getCurrentBuildJobState(stampedeId); } catch (IOException e) { throw new RuntimeException(e); } LOG.info("Got build status: " + job.getStatus()); if (!job.isSetBuildSlaves()) { consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of()); checkTerminateScheduledUpdates(job, Optional.empty()); return job; } for (BuildSlaveInfo slave : job.getBuildSlaves()) { String runIdString = slave.getBuildSlaveRunId().getId(); if (!seenSlaveRunIds.contains(slave.getBuildSlaveRunId().getId())) { seenSlaveRunIds.add(runIdString); LOG.info("New slave server attached to build. (RunId: [%s], Hostname: [%s])", runIdString, slave.getHostname()); } } // TODO(alisdair,shivanker): if job just completed (checkTerminateScheduledUpdates), // we could have missed the final few events. ListenableFuture<?> slaveEventsFuture = fetchAndPostBuildSlaveEventsAsync(job, networkExecutorService); ListenableFuture<List<BuildSlaveStatus>> slaveStatusesFuture = fetchBuildSlaveStatusesAsync(job, networkExecutorService); ListenableFuture<?> logStreamingFuture = fetchAndProcessRealTimeSlaveLogsAsync(job, networkExecutorService); List<BuildSlaveStatus> slaveStatuses = ImmutableList.of(); try { slaveStatuses = slaveStatusesFuture.get(); consoleEventsDispatcher.postDistBuildStatusEvent(job, slaveStatuses); slaveEventsFuture.get(); logStreamingFuture.get(); } catch (InterruptedException ex) { // Ensure all async work is interrupted too. slaveStatusesFuture.cancel(true); slaveEventsFuture.cancel(true); logStreamingFuture.cancel(true); Thread.currentThread().interrupt(); throw ex; } catch (ExecutionException e) { LOG.error(e, "Failed to get slave statuses, events or logs."); } buildRuleEventManager.publishCacheSynchronizedBuildRuleFinishedEvents(); checkTerminateScheduledUpdates(job, Optional.of(slaveStatuses)); return job; }