List of usage examples for org.joda.time Duration millis
public static Duration millis(long millis)
From source file:org.apache.beam.sdk.util.GroupAlsoByWindowsProperties.java
License:Apache License
/** * Tests that for a simple sequence of elements on the same key, the given GABW implementation * correctly groups them according to fixed windows and also sets the output timestamp * according to the policy {@link OutputTimeFns#outputAtEndOfWindow()}. *///from www .ja v a 2s. c o m public static void groupsElementsIntoFixedWindowsWithEndOfWindowTimestamp( GroupAlsoByWindowsDoFnFactory<String, String, Iterable<String>> gabwFactory) throws Exception { WindowingStrategy<?, IntervalWindow> windowingStrategy = WindowingStrategy .of(FixedWindows.of(Duration.millis(10))).withOutputTimeFn(OutputTimeFns.outputAtEndOfWindow()); DoFnTester<KV<String, Iterable<WindowedValue<String>>>, KV<String, Iterable<String>>> result = runGABW( gabwFactory, windowingStrategy, "key", WindowedValue.of("v1", new Instant(1), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v2", new Instant(2), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v3", new Instant(13), Arrays.asList(window(10, 20)), PaneInfo.NO_FIRING)); assertThat(result.peekOutputElements(), hasSize(2)); TimestampedValue<KV<String, Iterable<String>>> item0 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(0, 10))); assertThat(item0.getValue().getValue(), containsInAnyOrder("v1", "v2")); assertThat(item0.getTimestamp(), equalTo(window(0, 10).maxTimestamp())); TimestampedValue<KV<String, Iterable<String>>> item1 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(10, 20))); assertThat(item1.getValue().getValue(), contains("v3")); assertThat(item1.getTimestamp(), equalTo(window(10, 20).maxTimestamp())); }
From source file:org.apache.beam.sdk.util.GroupAlsoByWindowsProperties.java
License:Apache License
/** * Tests that for a simple sequence of elements on the same key, the given GABW implementation * correctly groups them according to fixed windows and also sets the output timestamp * according to the policy {@link OutputTimeFns#outputAtLatestInputTimestamp()}. *///from w w w .j ava2 s . c o m public static void groupsElementsIntoFixedWindowsWithLatestTimestamp( GroupAlsoByWindowsDoFnFactory<String, String, Iterable<String>> gabwFactory) throws Exception { WindowingStrategy<?, IntervalWindow> windowingStrategy = WindowingStrategy .of(FixedWindows.of(Duration.millis(10))) .withOutputTimeFn(OutputTimeFns.outputAtLatestInputTimestamp()); DoFnTester<KV<String, Iterable<WindowedValue<String>>>, KV<String, Iterable<String>>> result = runGABW( gabwFactory, windowingStrategy, "k", WindowedValue.of("v1", new Instant(1), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v2", new Instant(2), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v3", new Instant(13), Arrays.asList(window(10, 20)), PaneInfo.NO_FIRING)); assertThat(result.peekOutputElements(), hasSize(2)); TimestampedValue<KV<String, Iterable<String>>> item0 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(0, 10))); assertThat(item0.getValue().getValue(), containsInAnyOrder("v1", "v2")); assertThat(item0.getTimestamp(), equalTo(new Instant(2))); TimestampedValue<KV<String, Iterable<String>>> item1 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(10, 20))); assertThat(item1.getValue().getValue(), contains("v3")); assertThat(item1.getTimestamp(), equalTo(new Instant(13))); }
From source file:org.apache.beam.sdk.util.GroupAlsoByWindowsProperties.java
License:Apache License
/** * Tests that the given GABW implementation correctly groups elements into merged sessions * with output timestamps at the end of the merged window. *//* ww w. j a va 2 s. c o m*/ public static void groupsElementsInMergedSessionsWithEndOfWindowTimestamp( GroupAlsoByWindowsDoFnFactory<String, String, Iterable<String>> gabwFactory) throws Exception { WindowingStrategy<?, IntervalWindow> windowingStrategy = WindowingStrategy .of(Sessions.withGapDuration(Duration.millis(10))) .withOutputTimeFn(OutputTimeFns.outputAtEndOfWindow()); DoFnTester<KV<String, Iterable<WindowedValue<String>>>, KV<String, Iterable<String>>> result = runGABW( gabwFactory, windowingStrategy, "k", WindowedValue.of("v1", new Instant(0), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v2", new Instant(5), Arrays.asList(window(5, 15)), PaneInfo.NO_FIRING), WindowedValue.of("v3", new Instant(15), Arrays.asList(window(15, 25)), PaneInfo.NO_FIRING)); assertThat(result.peekOutputElements(), hasSize(2)); TimestampedValue<KV<String, Iterable<String>>> item0 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(0, 15))); assertThat(item0.getValue().getValue(), containsInAnyOrder("v1", "v2")); assertThat(item0.getTimestamp(), equalTo(window(0, 15).maxTimestamp())); TimestampedValue<KV<String, Iterable<String>>> item1 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(window(15, 25))); assertThat(item1.getValue().getValue(), contains("v3")); assertThat(item1.getTimestamp(), equalTo(window(15, 25).maxTimestamp())); }
From source file:org.apache.beam.sdk.util.GroupAlsoByWindowsProperties.java
License:Apache License
/** * Tests that the given GABW implementation correctly groups elements into merged sessions * with output timestamps at the end of the merged window. *//* w w w. ja v a 2 s . c om*/ public static void groupsElementsInMergedSessionsWithLatestTimestamp( GroupAlsoByWindowsDoFnFactory<String, String, Iterable<String>> gabwFactory) throws Exception { WindowingStrategy<?, IntervalWindow> windowingStrategy = WindowingStrategy .of(Sessions.withGapDuration(Duration.millis(10))) .withOutputTimeFn(OutputTimeFns.outputAtLatestInputTimestamp()); BoundedWindow unmergedWindow = window(15, 25); DoFnTester<KV<String, Iterable<WindowedValue<String>>>, KV<String, Iterable<String>>> result = runGABW( gabwFactory, windowingStrategy, "k", WindowedValue.of("v1", new Instant(0), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of("v2", new Instant(5), Arrays.asList(window(5, 15)), PaneInfo.NO_FIRING), WindowedValue.of("v3", new Instant(15), Arrays.asList(unmergedWindow), PaneInfo.NO_FIRING)); assertThat(result.peekOutputElements(), hasSize(2)); BoundedWindow mergedWindow = window(0, 15); TimestampedValue<KV<String, Iterable<String>>> item0 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(mergedWindow)); assertThat(item0.getValue().getValue(), containsInAnyOrder("v1", "v2")); assertThat(item0.getTimestamp(), equalTo(new Instant(5))); TimestampedValue<KV<String, Iterable<String>>> item1 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(unmergedWindow)); assertThat(item1.getValue().getValue(), contains("v3")); assertThat(item1.getTimestamp(), equalTo(new Instant(15))); }
From source file:org.apache.beam.sdk.util.GroupAlsoByWindowsProperties.java
License:Apache License
/** * Tests that the given {@link GroupAlsoByWindowsDoFn} implementation combines elements per * session window correctly according to the provided {@link CombineFn}. *///from w w w . j a va2 s .c o m public static void combinesElementsPerSessionWithEndOfWindowTimestamp( GroupAlsoByWindowsDoFnFactory<String, Long, Long> gabwFactory, CombineFn<Long, ?, Long> combineFn) throws Exception { WindowingStrategy<?, IntervalWindow> windowingStrategy = WindowingStrategy .of(Sessions.withGapDuration(Duration.millis(10))) .withOutputTimeFn(OutputTimeFns.outputAtEndOfWindow()); BoundedWindow secondWindow = window(15, 25); DoFnTester<?, KV<String, Long>> result = runGABW(gabwFactory, windowingStrategy, "k", WindowedValue.of(1L, new Instant(0), Arrays.asList(window(0, 10)), PaneInfo.NO_FIRING), WindowedValue.of(2L, new Instant(5), Arrays.asList(window(5, 15)), PaneInfo.NO_FIRING), WindowedValue.of(4L, new Instant(15), Arrays.asList(secondWindow), PaneInfo.NO_FIRING)); assertThat(result.peekOutputElements(), hasSize(2)); BoundedWindow firstResultWindow = window(0, 15); TimestampedValue<KV<String, Long>> item0 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(firstResultWindow)); assertThat(item0.getValue().getValue(), equalTo(combineFn.apply(ImmutableList.of(1L, 2L)))); assertThat(item0.getTimestamp(), equalTo(firstResultWindow.maxTimestamp())); TimestampedValue<KV<String, Long>> item1 = Iterables .getOnlyElement(result.peekOutputElementsInWindow(secondWindow)); assertThat(item1.getValue().getValue(), equalTo(combineFn.apply(ImmutableList.of(4L)))); assertThat(item1.getTimestamp(), equalTo(secondWindow.maxTimestamp())); }
From source file:org.apache.beam.sdk.util.Reshuffle.java
License:Apache License
@Override public PCollection<KV<K, V>> expand(PCollection<KV<K, V>> input) { WindowingStrategy<?, ?> originalStrategy = input.getWindowingStrategy(); // If the input has already had its windows merged, then the GBK that performed the merge // will have set originalStrategy.getWindowFn() to InvalidWindows, causing the GBK contained // here to fail. Instead, we install a valid WindowFn that leaves all windows unchanged. // The OutputTimeFn is set to ensure the GroupByKey does not shift elements forwards in time. // Because this outputs as fast as possible, this should not hold the watermark. Window<KV<K, V>> rewindow = Window .<KV<K, V>>into(new IdentityWindowFn<>(originalStrategy.getWindowFn().windowCoder())) .triggering(new ReshuffleTrigger<>()).discardingFiredPanes() .withOutputTimeFn(OutputTimeFns.outputAtEarliestInputTimestamp()) .withAllowedLateness(Duration.millis(BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis())); return input.apply(rewindow).apply("ReifyOriginalTimestamps", ReifyTimestamps.<K, V>inValues()) .apply(GroupByKey.<K, TimestampedValue<V>>create()) // Set the windowing strategy directly, so that it doesn't get counted as the user having // set allowed lateness. .setWindowingStrategyInternal(originalStrategy).apply("ExpandIterable", ParDo.of(new DoFn<KV<K, Iterable<TimestampedValue<V>>>, KV<K, TimestampedValue<V>>>() { @ProcessElement/* w ww . j a v a 2 s. c o m*/ public void processElement(ProcessContext c) { K key = c.element().getKey(); for (TimestampedValue<V> value : c.element().getValue()) { c.output(KV.of(key, value)); } } })) .apply("RestoreOriginalTimestamps", ReifyTimestamps.<K, V>extractFromValues()); }
From source file:org.apache.beam.sdk.util.WindowingStrategies.java
License:Apache License
/** * Converts from {@link RunnerApi.WindowingStrategy} to the SDK's {@link WindowingStrategy} using * the provided components to dereferences identifiers found in the proto. *//*from w ww.ja v a 2 s . c o m*/ public static WindowingStrategy<?, ?> fromProto(RunnerApi.WindowingStrategy proto, RunnerApi.Components components) throws InvalidProtocolBufferException { SdkFunctionSpec windowFnSpec = proto.getWindowFn(); checkArgument(windowFnSpec.getSpec().getUrn().equals(CUSTOM_WINDOWFN_URN), "Only Java-serialized %s instances are supported, with URN %s. But found URN %s", WindowFn.class.getSimpleName(), CUSTOM_WINDOWFN_URN, windowFnSpec.getSpec().getUrn()); Object deserializedWindowFn = SerializableUtils.deserializeFromByteArray( windowFnSpec.getSpec().getParameter().unpack(BytesValue.class).getValue().toByteArray(), "WindowFn"); WindowFn<?, ?> windowFn = (WindowFn<?, ?>) deserializedWindowFn; OutputTimeFn<?> outputTimeFn = OutputTimeFns.fromProto(proto.getOutputTime()); AccumulationMode accumulationMode = fromProto(proto.getAccumulationMode()); Trigger trigger = Triggers.fromProto(proto.getTrigger()); ClosingBehavior closingBehavior = fromProto(proto.getClosingBehavior()); Duration allowedLateness = Duration.millis(proto.getAllowedLateness()); return WindowingStrategy.of(windowFn).withAllowedLateness(allowedLateness).withMode(accumulationMode) .withTrigger(trigger).withOutputTimeFn(outputTimeFn).withClosingBehavior(closingBehavior); }
From source file:org.apache.drill.exec.vector.accessor.AccessorUtilities.java
License:Apache License
public static void setFromInt(ColumnWriter writer, int value) { switch (writer.valueType()) { case BYTES:/* w w w . ja v a2 s. com*/ writer.setBytes(Integer.toHexString(value).getBytes()); break; case DOUBLE: writer.setDouble(value); break; case INTEGER: writer.setInt(value); break; case LONG: writer.setLong(value); break; case STRING: writer.setString(Integer.toString(value)); break; case DECIMAL: writer.setDecimal(BigDecimal.valueOf(value)); break; case PERIOD: writer.setPeriod(Duration.millis(value).toPeriod()); break; default: throw new IllegalStateException("Unknown writer type: " + writer.valueType()); } }
From source file:org.apache.drill.test.rowSet.RowSetUtilities.java
License:Apache License
/** * Ad-hoc, test-only method to set a Period from an integer. Periods are made up of * months and millseconds. There is no mapping from one to the other, so a period * requires at least two number. Still, we are given just one (typically from a test * data generator.) Use that int value to "spread" some value across the two kinds * of fields. The result has no meaning, but has the same comparison order as the * original ints.// www .j av a 2s . co m * * @param writer column writer for a period column * @param minorType the Drill data type * @param value the integer value to apply * @throws VectorOverflowException */ public static Period periodFromInt(MinorType minorType, int value) { switch (minorType) { case INTERVAL: return Duration.millis(value).toPeriod(); case INTERVALYEAR: return Period.years(value / 12).withMonths(value % 12); case INTERVALDAY: int sec = value % 60; value = value / 60; int min = value % 60; value = value / 60; return Period.days(value).withMinutes(min).withSeconds(sec); default: throw new IllegalArgumentException("Writer is not an interval: " + minorType); } }
From source file:org.apache.druid.client.DirectDruidClient.java
License:Apache License
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) { final Query<T> query = queryPlus.getQuery(); QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query); boolean isBySegment = QueryContexts.isBySegment(query); final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType() : toolChest.getBaseResultType(); final ListenableFuture<InputStream> future; final String url = StringUtils.format("%s://%s/druid/v2/", scheme, host); final String cancelUrl = StringUtils.format("%s://%s/druid/v2/%s", scheme, host, query.getId()); try {// w w w. j a v a2s .co m log.debug("Querying queryId[%s] url[%s]", query.getId(), url); final long requestStartTimeNs = System.nanoTime(); final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME); final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query); final AtomicLong totalBytesGathered = (AtomicLong) context .get(ResponseContext.Key.QUERY_TOTAL_BYTES_GATHERED); final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0); final boolean usingBackpressure = maxQueuedBytes > 0; final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() { private final AtomicLong totalByteCount = new AtomicLong(0); private final AtomicLong queuedByteCount = new AtomicLong(0); private final AtomicLong channelSuspendedTime = new AtomicLong(0); private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>(); private final AtomicBoolean done = new AtomicBoolean(false); private final AtomicReference<String> fail = new AtomicReference<>(); private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>(); private QueryMetrics<? super Query<T>> queryMetrics; private long responseStartTimeNs; private QueryMetrics<? super Query<T>> acquireResponseMetrics() { if (queryMetrics == null) { queryMetrics = toolChest.makeMetrics(query); queryMetrics.server(host); } return queryMetrics; } /** * Queue a buffer. Returns true if we should keep reading, false otherwise. */ private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException { // Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as // the actual number of queued bytes at any particular time. final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum); final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength()); queue.put(holder); // True if we should keep reading. return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes; } private InputStream dequeue() throws InterruptedException { final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS); if (holder == null) { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); } final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength()); if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) { long backPressureTime = Preconditions .checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?") .resume(holder.getChunkNum()); channelSuspendedTime.addAndGet(backPressureTime); } return holder.getStream(); } @Override public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) { trafficCopRef.set(trafficCop); checkQueryTimeout(); checkTotalBytesLimit(response.getContent().readableBytes()); log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId()); responseStartTimeNs = System.nanoTime(); acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs) .emit(emitter); final boolean continueReading; try { final String responseContext = response.headers() .get(QueryResource.HEADER_RESPONSE_CONTEXT); // context may be null in case of error or query timeout if (responseContext != null) { context.merge(ResponseContext.deserialize(responseContext, objectMapper)); } continueReading = enqueue(response.getContent(), 0L); } catch (final IOException e) { log.error(e, "Error parsing response context from url [%s]", url); return ClientResponse.finished(new InputStream() { @Override public int read() throws IOException { throw e; } }); } catch (InterruptedException e) { log.error(e, "Queue appending interrupted"); Thread.currentThread().interrupt(); throw new RuntimeException(e); } totalByteCount.addAndGet(response.getContent().readableBytes()); return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() { @Override public boolean hasMoreElements() { if (fail.get() != null) { throw new RE(fail.get()); } checkQueryTimeout(); // Done is always true until the last stream has be put in the queue. // Then the stream should be spouting good InputStreams. synchronized (done) { return !done.get() || !queue.isEmpty(); } } @Override public InputStream nextElement() { if (fail.get() != null) { throw new RE(fail.get()); } try { return dequeue(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } }), continueReading); } @Override public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse, HttpChunk chunk, long chunkNum) { checkQueryTimeout(); final ChannelBuffer channelBuffer = chunk.getContent(); final int bytes = channelBuffer.readableBytes(); checkTotalBytesLimit(bytes); boolean continueReading = true; if (bytes > 0) { try { continueReading = enqueue(channelBuffer, chunkNum); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw new RuntimeException(e); } totalByteCount.addAndGet(bytes); } return ClientResponse.finished(clientResponse.getObj(), continueReading); } @Override public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) { long stopTimeNs = System.nanoTime(); long nodeTimeNs = stopTimeNs - requestStartTimeNs; final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs); log.debug( "Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", query.getId(), url, totalByteCount.get(), nodeTimeMs, // Floating math; division by zero will yield Inf, not exception totalByteCount.get() / (0.001 * nodeTimeMs)); QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics(); responseMetrics.reportNodeTime(nodeTimeNs); responseMetrics.reportNodeBytes(totalByteCount.get()); if (usingBackpressure) { responseMetrics.reportBackPressureTime(channelSuspendedTime.get()); } responseMetrics.emit(emitter); synchronized (done) { try { // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out // after done is set to true, regardless of the rest of the stream's state. queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER, Long.MAX_VALUE)); } catch (InterruptedException e) { log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]", url); Thread.currentThread().interrupt(); throw new RuntimeException(e); } finally { done.set(true); } } return ClientResponse.finished(clientResponse.getObj()); } @Override public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) { String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]", query.getId(), url, e.getMessage()); setupResponseReadFailure(msg, e); } private void setupResponseReadFailure(String msg, Throwable th) { fail.set(msg); queue.clear(); queue.offer(InputStreamHolder.fromStream(new InputStream() { @Override public int read() throws IOException { if (th != null) { throw new IOException(msg, th); } else { throw new IOException(msg); } } }, -1, 0)); } // Returns remaining timeout or throws exception if timeout already elapsed. private long checkQueryTimeout() { long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } else { return timeLeft; } } private void checkTotalBytesLimit(long bytes) { if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) { String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } } }; long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); } future = httpClient.go( new Request(HttpMethod.POST, new URL(url)) .setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft))) .setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), responseHandler, Duration.millis(timeLeft)); queryWatcher.registerQuery(query, future); openConnections.getAndIncrement(); Futures.addCallback(future, new FutureCallback<InputStream>() { @Override public void onSuccess(InputStream result) { openConnections.getAndDecrement(); } @Override public void onFailure(Throwable t) { openConnections.getAndDecrement(); if (future.isCancelled()) { // forward the cancellation to underlying queriable node try { StatusResponseHolder res = httpClient .go(new Request(HttpMethod.DELETE, new URL(cancelUrl)) .setContent(objectMapper.writeValueAsBytes(query)) .setHeader(HttpHeaders.Names.CONTENT_TYPE, isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON), StatusResponseHandler.getInstance(), Duration.standardSeconds(1)) .get(1, TimeUnit.SECONDS); if (res.getStatus().getCode() >= 500) { throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].", res.getStatus().getCode(), res.getStatus().getReasonPhrase()); } } catch (IOException | ExecutionException | InterruptedException | TimeoutException e) { throw new RuntimeException(e); } } } }); } catch (IOException e) { throw new RuntimeException(e); } Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() { @Override public JsonParserIterator<T> make() { return new JsonParserIterator<T>(queryResultType, future, url, query, host, toolChest.decorateObjectMapper(objectMapper, query), null); } @Override public void cleanup(JsonParserIterator<T> iterFromMake) { CloseQuietly.close(iterFromMake); } }); // bySegment queries are de-serialized after caching results in order to // avoid the cost of de-serializing and then re-serializing again when adding to cache if (!isBySegment) { retVal = Sequences.map(retVal, toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing())); } return retVal; }