List of usage examples for com.google.common.util.concurrent ListenableFuture isCancelled
boolean isCancelled();
From source file:io.druid.client.DirectDruidClient.java
@Override public Sequence<T> run(final Query<T> query, final Map<String, Object> context) { QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query); boolean isBySegment = query.getContextBySegment(false); Pair<JavaType, JavaType> types = typesMap.get(query.getClass()); if (types == null) { final TypeFactory typeFactory = objectMapper.getTypeFactory(); JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference()); JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType)); types = Pair.of(baseType, bySegmentType); typesMap.put(query.getClass(), types); }//from www . java 2 s. com final JavaType typeRef; if (isBySegment) { typeRef = types.rhs; } else { typeRef = types.lhs; } final ListenableFuture<InputStream> future; final String url = String.format("http://%s/druid/v2/", host); final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId()); try { log.debug("Querying url[%s]", url); final long requestStartTime = System.currentTimeMillis(); final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query); builder.setDimension("server", host); builder.setDimension(DruidMetrics.ID, Strings.nullToEmpty(query.getId())); final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() { private long responseStartTime; private final AtomicLong byteCount = new AtomicLong(0); private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>(); private final AtomicBoolean done = new AtomicBoolean(false); @Override public ClientResponse<InputStream> handleResponse(HttpResponse response) { log.debug("Initial response from url[%s]", url); responseStartTime = System.currentTimeMillis(); emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime)); try { final String responseContext = response.headers().get("X-Druid-Response-Context"); // context may be null in case of error or query timeout if (responseContext != null) { context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, new TypeReference<Map<String, Object>>() { })); } queue.put(new ChannelBufferInputStream(response.getContent())); } catch (final IOException e) { log.error(e, "Error parsing response context from url [%s]", url); return ClientResponse.<InputStream>finished(new InputStream() { @Override public int read() throws IOException { throw e; } }); } catch (InterruptedException e) { log.error(e, "Queue appending interrupted"); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } byteCount.addAndGet(response.getContent().readableBytes()); return ClientResponse .<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() { @Override public boolean hasMoreElements() { // Done is always true until the last stream has be put in the queue. // Then the stream should be spouting good InputStreams. synchronized (done) { return !done.get() || !queue.isEmpty(); } } @Override public InputStream nextElement() { try { return queue.take(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } } })); } @Override public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk) { final ChannelBuffer channelBuffer = chunk.getContent(); final int bytes = channelBuffer.readableBytes(); if (bytes > 0) { try { queue.put(new ChannelBufferInputStream(channelBuffer)); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } byteCount.addAndGet(bytes); } return clientResponse; } @Override public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) { long stopTime = System.currentTimeMillis(); log.debug("Completed request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", url, byteCount.get(), stopTime - responseStartTime, byteCount.get() / (0.0001 * (stopTime - responseStartTime))); emitter.emit(builder.build("query/node/time", stopTime - requestStartTime)); synchronized (done) { try { // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out // after done is set to true, regardless of the rest of the stream's state. queue.put(ByteSource.empty().openStream()); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (IOException e) { // This should never happen throw Throwables.propagate(e); } finally { done.set(true); } } return ClientResponse.<InputStream>finished(clientResponse.getObj()); } @Override public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) { // Don't wait for lock in case the lock had something to do with the error synchronized (done) { done.set(true); // Make a best effort to put a zero length buffer into the queue in case something is waiting on the take() // If nothing is waiting on take(), this will be closed out anyways. queue.offer(new InputStream() { @Override public int read() throws IOException { throw new IOException(e); } }); } } }; future = httpClient.go(new Request(HttpMethod.POST, new URL(url)) .setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler); queryWatcher.registerQuery(query, future); openConnections.getAndIncrement(); Futures.addCallback(future, new FutureCallback<InputStream>() { @Override public void onSuccess(InputStream result) { openConnections.getAndDecrement(); } @Override public void onFailure(Throwable t) { openConnections.getAndDecrement(); if (future.isCancelled()) { // forward the cancellation to underlying queriable node try { StatusResponseHolder res = httpClient.go( new Request(HttpMethod.DELETE, new URL(cancelUrl)) .setContent(objectMapper.writeValueAsBytes(query)) .setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(Charsets.UTF_8)).get(); if (res.getStatus().getCode() >= 500) { throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase()); } } catch (IOException | ExecutionException | InterruptedException e) { Throwables.propagate(e); } } } }); } catch (IOException e) { throw Throwables.propagate(e); } Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() { @Override public JsonParserIterator<T> make() { return new JsonParserIterator<T>(typeRef, future, url); } @Override public void cleanup(JsonParserIterator<T> iterFromMake) { CloseQuietly.close(iterFromMake); } }); // bySegment queries are de-serialized after caching results in order to // avoid the cost of de-serializing and then re-serializing again when adding to cache if (!isBySegment) { retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())); } return retVal; }
From source file:org.apache.druid.client.DirectDruidClient.java
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> context) { final Query<T> query = queryPlus.getQuery(); QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query); boolean isBySegment = QueryContexts.isBySegment(query); Pair<JavaType, JavaType> types = typesMap.get(query.getClass()); if (types == null) { final TypeFactory typeFactory = objectMapper.getTypeFactory(); JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference()); JavaType bySegmentType = typeFactory.constructParametricType(Result.class, typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType)); types = Pair.of(baseType, bySegmentType); typesMap.put(query.getClass(), types); }//from w w w . j a v a2s.c o m final JavaType typeRef; if (isBySegment) { typeRef = types.rhs; } else { typeRef = types.lhs; } final ListenableFuture<InputStream> future; final String url = StringUtils.format("%s://%s/druid/v2/", scheme, host); final String cancelUrl = StringUtils.format("%s://%s/druid/v2/%s", scheme, host, query.getId()); try { log.debug("Querying queryId[%s] url[%s]", query.getId(), url); final long requestStartTimeNs = System.nanoTime(); final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME); final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query); final AtomicLong totalBytesGathered = (AtomicLong) context.get(QUERY_TOTAL_BYTES_GATHERED); final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0); final boolean usingBackpressure = maxQueuedBytes > 0; final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() { private final AtomicLong totalByteCount = new AtomicLong(0); private final AtomicLong queuedByteCount = new AtomicLong(0); private final AtomicLong channelSuspendedTime = new AtomicLong(0); private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>(); private final AtomicBoolean done = new AtomicBoolean(false); private final AtomicReference<String> fail = new AtomicReference<>(); private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>(); private QueryMetrics<? super Query<T>> queryMetrics; private long responseStartTimeNs; private QueryMetrics<? super Query<T>> acquireResponseMetrics() { if (queryMetrics == null) { queryMetrics = toolChest.makeMetrics(query); queryMetrics.server(host); } return queryMetrics; } /** * Queue a buffer. Returns true if we should keep reading, false otherwise. */ private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException { // Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as // the actual number of queued bytes at any particular time. final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum); final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength()); queue.put(holder); // True if we should keep reading. return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes; } private InputStream dequeue() throws InterruptedException { final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS); if (holder == null) { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); } final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength()); if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) { long backPressureTime = Preconditions .checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?") .resume(holder.getChunkNum()); channelSuspendedTime.addAndGet(backPressureTime); } return holder.getStream(); } @Override public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) { trafficCopRef.set(trafficCop); checkQueryTimeout(); checkTotalBytesLimit(response.getContent().readableBytes()); log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId()); responseStartTimeNs = System.nanoTime(); acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs) .emit(emitter); final boolean continueReading; try { final String responseContext = response.headers().get("X-Druid-Response-Context"); // context may be null in case of error or query timeout if (responseContext != null) { context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext, JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT)); } continueReading = enqueue(response.getContent(), 0L); } catch (final IOException e) { log.error(e, "Error parsing response context from url [%s]", url); return ClientResponse.finished(new InputStream() { @Override public int read() throws IOException { throw e; } }); } catch (InterruptedException e) { log.error(e, "Queue appending interrupted"); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } totalByteCount.addAndGet(response.getContent().readableBytes()); return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() { @Override public boolean hasMoreElements() { if (fail.get() != null) { throw new RE(fail.get()); } checkQueryTimeout(); // Done is always true until the last stream has be put in the queue. // Then the stream should be spouting good InputStreams. synchronized (done) { return !done.get() || !queue.isEmpty(); } } @Override public InputStream nextElement() { if (fail.get() != null) { throw new RE(fail.get()); } try { return dequeue(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } } }), continueReading); } @Override public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) { checkQueryTimeout(); final ChannelBuffer channelBuffer = chunk.getContent(); final int bytes = channelBuffer.readableBytes(); checkTotalBytesLimit(bytes); boolean continueReading = true; if (bytes > 0) { try { continueReading = enqueue(channelBuffer, chunkNum); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } totalByteCount.addAndGet(bytes); } return ClientResponse.finished(clientResponse.getObj(), continueReading); } @Override public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) { long stopTimeNs = System.nanoTime(); long nodeTimeNs = stopTimeNs - requestStartTimeNs; final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs); log.debug( "Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception totalByteCount.get() / (0.001 * nodeTimeMs)); QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics(); responseMetrics.reportNodeTime(nodeTimeNs); responseMetrics.reportNodeBytes(totalByteCount.get()); if (usingBackpressure) { responseMetrics.reportBackPressureTime(channelSuspendedTime.get()); } responseMetrics.emit(emitter); synchronized (done) { try { // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out // after done is set to true, regardless of the rest of the stream's state. queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE)); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw Throwables.propagate(e); } finally { done.set(true); } } return ClientResponse.finished(clientResponse.getObj()); } @Override public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) { String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage()); setupResponseReadFailure(msg, e); } private void setupResponseReadFailure(String msg, Throwable th) { fail.set(msg); queue.clear(); queue.offer(InputStreamHolder.fromStream(new InputStream() { @Override public int read() throws IOException { if (th != null) { throw new IOException(msg, th); } else { throw new IOException(msg); } } }, -1, 0)); } // Returns remaining timeout or throws exception if timeout already elapsed. private long checkQueryTimeout() { long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } else { return timeLeft; } } private void checkTotalBytesLimit(long bytes) { if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) { String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } } }; long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); } future = httpClient.go( new Request(HttpMethod.POST, new URL(url)) .setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))) .setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft)); queryWatcher.registerQuery(query, future); openConnections.getAndIncrement(); Futures.addCallback(future, new FutureCallback<InputStream>() { @Override public void onSuccess(InputStream result) { openConnections.getAndDecrement(); } @Override public void onFailure(Throwable t) { openConnections.getAndDecrement(); if (future.isCancelled()) { // forward the cancellation to underlying queriable node try { StatusResponseHolder res = httpClient .go(new Request(HttpMethod.DELETE, new URL(cancelUrl)) .setContent(objectMapper.writeValueAsBytes(query)) .setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), new StatusResponseHandler(StandardCharsets.UTF_8), Duration.standardSeconds(1)) .get(1, TimeUnit.SECONDS); if (res.getStatus().getCode() >= 500) { throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase()); } } catch (IOException | ExecutionException | InterruptedException | TimeoutException e) { Throwables.propagate(e); } } } }); } catch (IOException e) { throw Throwables.propagate(e); } Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() { @Override public JsonParserIterator<T> make() { return new JsonParserIterator<T>(typeRef, future, url, query); } @Override public void cleanup(JsonParserIterator<T> iterFromMake) { CloseQuietly.close(iterFromMake); } }); // bySegment queries are de-serialized after caching results in order to // avoid the cost of de-serializing and then re-serializing again when adding to cache if (!isBySegment) { retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())); } return retVal; }