Example usage for org.joda.time Duration standardSeconds

List of usage examples for org.joda.time Duration standardSeconds

Introduction

In this page you can find the example usage for org.joda.time Duration standardSeconds.

Prototype

public static Duration standardSeconds(long seconds) 

Source Link

Document

Create a duration with the specified number of seconds assuming that there are the standard number of milliseconds in a second.

Usage

From source file:org.apache.beam.sdk.nexmark.queries.Query12.java

License:Apache License

@Override
public PCollection<BidsPerSession> expand(PCollection<Event> events) {
    return events.apply(NexmarkQueryUtil.JUST_BIDS).apply(ParDo.of(new DoFn<Bid, Long>() {
        @ProcessElement//from   ww w.ja  va2  s  .  com
        public void processElement(ProcessContext c) {
            c.output(c.element().bidder);
        }
    })).apply(Window.<Long>into(new GlobalWindows())
            .triggering(Repeatedly.forever(AfterProcessingTime.pastFirstElementInPane()
                    .plusDelayOf(Duration.standardSeconds(configuration.windowSizeSec))))
            .discardingFiredPanes().withAllowedLateness(Duration.ZERO)).apply(Count.perElement())
            .apply(name + ".ToResult", ParDo.of(new DoFn<KV<Long, Long>, BidsPerSession>() {
                @ProcessElement
                public void processElement(ProcessContext c) {
                    c.output(new BidsPerSession(c.element().getKey(), c.element().getValue()));
                }
            }));
}

From source file:org.apache.beam.sdk.nexmark.queries.Query4.java

License:Apache License

@Override
public PCollection<CategoryPrice> expand(PCollection<Event> events) {
    PCollection<AuctionBid> winningBids = events.apply(Filter.by(new AuctionOrBid()))
            // Find the winning bid for each closed auction.
            .apply(new WinningBids(name + ".WinningBids", configuration));

    // Monitor winning bids
    winningBids = winningBids.apply(name + ".WinningBidsMonitor", winningBidsMonitor.getTransform());

    return winningBids
            // Key the winning bid price by the auction category.
            .apply(name + ".Rekey", ParDo.of(new DoFn<AuctionBid, KV<Long, Long>>() {
                @ProcessElement//w  w w.j  a v a 2  s  .  c o m
                public void processElement(ProcessContext c) {
                    Auction auction = c.element().auction;
                    Bid bid = c.element().bid;
                    c.output(KV.of(auction.category, bid.price));
                }
            }))

            // Re-window so we can calculate a sliding average
            .apply(Window.into(SlidingWindows.of(Duration.standardSeconds(configuration.windowSizeSec))
                    .every(Duration.standardSeconds(configuration.windowPeriodSec))))

            // Find the average of the winning bids for each category.
            // Make sure we share the work for each category between workers.
            .apply(Mean.<Long, Long>perKey().withHotKeyFanout(configuration.fanout))

            // For testing against Query4Model, capture which results are 'final'.
            .apply(name + ".Project", ParDo.of(new DoFn<KV<Long, Double>, CategoryPrice>() {
                @ProcessElement
                public void processElement(ProcessContext c) {
                    c.output(new CategoryPrice(c.element().getKey(), Math.round(c.element().getValue()),
                            c.pane().isLast()));
                }
            }));
}

From source file:org.apache.beam.sdk.nexmark.queries.Query5.java

License:Apache License

@Override
public PCollection<AuctionCount> expand(PCollection<Event> events) {
    return events
            // Only want the bid events.
            .apply(NexmarkQueryUtil.JUST_BIDS)
            // Window the bids into sliding windows.
            .apply(Window.into(SlidingWindows.of(Duration.standardSeconds(configuration.windowSizeSec))
                    .every(Duration.standardSeconds(configuration.windowPeriodSec))))
            // Project just the auction id.
            .apply("BidToAuction", NexmarkQueryUtil.BID_TO_AUCTION)

            // Count the number of bids per auction id.
            .apply(Count.perElement())//from   www  . ja  v a2  s  .  c o  m

            // We'll want to keep all auctions with the maximal number of bids.
            // Start by lifting each into a singleton list.
            // need to do so because bellow combine returns a list of auctions in the key in case of
            // equal number of bids. Combine needs to have same input type and return type.
            .apply(name + ".ToSingletons", ParDo.of(new DoFn<KV<Long, Long>, KV<List<Long>, Long>>() {
                @ProcessElement
                public void processElement(ProcessContext c) {
                    c.output(KV.of(Collections.singletonList(c.element().getKey()), c.element().getValue()));
                }
            }))

            // Keep only the auction ids with the most bids.
            .apply(Combine.globally(new Combine.BinaryCombineFn<KV<List<Long>, Long>>() {
                @Override
                public KV<List<Long>, Long> apply(KV<List<Long>, Long> left, KV<List<Long>, Long> right) {
                    List<Long> leftBestAuctions = left.getKey();
                    long leftCount = left.getValue();
                    List<Long> rightBestAuctions = right.getKey();
                    long rightCount = right.getValue();
                    if (leftCount > rightCount) {
                        return left;
                    } else if (leftCount < rightCount) {
                        return right;
                    } else {
                        List<Long> newBestAuctions = new ArrayList<>();
                        newBestAuctions.addAll(leftBestAuctions);
                        newBestAuctions.addAll(rightBestAuctions);
                        return KV.of(newBestAuctions, leftCount);
                    }
                }
            }).withoutDefaults().withFanout(configuration.fanout))

            // Project into result.
            .apply(name + ".Select", ParDo.of(new DoFn<KV<List<Long>, Long>, AuctionCount>() {
                @ProcessElement
                public void processElement(ProcessContext c) {
                    long count = c.element().getValue();
                    for (long auction : c.element().getKey()) {
                        c.output(new AuctionCount(auction, count));
                    }
                }
            }));
}

From source file:org.apache.beam.sdk.nexmark.queries.Query7.java

License:Apache License

@Override
public PCollection<Bid> expand(PCollection<Event> events) {
    // Window the bids.
    PCollection<Bid> slidingBids = events.apply(NexmarkQueryUtil.JUST_BIDS)
            .apply(Window.into(FixedWindows.of(Duration.standardSeconds(configuration.windowSizeSec))));

    // Find the largest price in all bids.
    // NOTE: It would be more efficient to write this query much as we did for Query5, using
    // a binary combiner to accumulate the bids with maximal price. As written this query
    // requires an additional scan per window, with the associated cost of snapshotted state and
    // its I/O. We'll keep this implementation since it illustrates the use of side inputs.
    final PCollectionView<Long> maxPriceView = slidingBids.apply("BidToPrice", NexmarkQueryUtil.BID_TO_PRICE)
            .apply(Max.longsGlobally().withFanout(configuration.fanout).asSingletonView());

    return slidingBids
            // Select all bids which have that maximum price (there may be more than one).
            .apply(name + ".Select", ParDo.of(new DoFn<Bid, Bid>() {
                @ProcessElement//from  ww  w.j  a  v  a  2s  . co m
                public void processElement(ProcessContext c) {
                    long maxPrice = c.sideInput(maxPriceView);
                    Bid bid = c.element();
                    if (bid.price == maxPrice) {
                        c.output(bid);
                    }
                }
            }).withSideInputs(maxPriceView));
}

From source file:org.apache.beam.sdk.nexmark.queries.Query8.java

License:Apache License

@Override
public PCollection<IdNameReserve> expand(PCollection<Event> events) {
    // Window and key new people by their id.
    PCollection<KV<Long, Person>> personsById = events.apply(NexmarkQueryUtil.JUST_NEW_PERSONS)
            .apply("Query8.WindowPersons",
                    Window.into(FixedWindows.of(Duration.standardSeconds(configuration.windowSizeSec))))
            .apply("PersonById", NexmarkQueryUtil.PERSON_BY_ID);

    // Window and key new auctions by their id.
    PCollection<KV<Long, Auction>> auctionsBySeller = events.apply(NexmarkQueryUtil.JUST_NEW_AUCTIONS)
            .apply("Query8.WindowAuctions",
                    Window.into(FixedWindows.of(Duration.standardSeconds(configuration.windowSizeSec))))
            .apply("AuctionBySeller", NexmarkQueryUtil.AUCTION_BY_SELLER);

    // Join people and auctions and project the person id, name and auction reserve price.
    return KeyedPCollectionTuple.of(NexmarkQueryUtil.PERSON_TAG, personsById)
            .and(NexmarkQueryUtil.AUCTION_TAG, auctionsBySeller).apply(CoGroupByKey.create())
            .apply(name + ".Select", ParDo.of(new DoFn<KV<Long, CoGbkResult>, IdNameReserve>() {
                @ProcessElement/*from w w  w.ja va2 s.  c o m*/
                public void processElement(ProcessContext c) {
                    @Nullable
                    Person person = c.element().getValue().getOnly(NexmarkQueryUtil.PERSON_TAG, null);
                    if (person == null) {
                        // Person was not created in last window period.
                        return;
                    }
                    for (Auction auction : c.element().getValue().getAll(NexmarkQueryUtil.AUCTION_TAG)) {
                        c.output(new IdNameReserve(person.id, person.name, auction.reserve));
                    }
                }
            }));
}

From source file:org.apache.beam.sdk.nexmark.queries.sql.SqlQuery3.java

License:Apache License

private PCollection<Event> fixedWindows(PCollection<Event> events) {
    return events.apply(Window.into(FixedWindows.of(Duration.standardSeconds(configuration.windowSizeSec))));
}

From source file:org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder.java

License:Apache License

/**
 *  Generates the ISO file that has the tempDir content.
 *
 *  Max allowed file size of config drive is 64MB [1]. Therefore, if the ISO is bigger than that, we throw a {@link CloudRuntimeException}.
 *  [1] https://docs.openstack.org/project-install-guide/baremetal/draft/configdrive.html
 */// w  ww . j  a  v a  2  s  .co  m
static String generateAndRetrieveIsoAsBase64Iso(String isoFileName, String driveLabel, String tempDirName)
        throws IOException {
    File tmpIsoStore = new File(tempDirName, isoFileName);
    Script command = new Script(getProgramToGenerateIso(), Duration.standardSeconds(300), LOG);
    command.add("-o", tmpIsoStore.getAbsolutePath());
    command.add("-ldots");
    command.add("-allow-lowercase");
    command.add("-allow-multidot");
    command.add("-cache-inodes"); // Enable caching inode and device numbers to find hard links to files.
    command.add("-l");
    command.add("-quiet");
    command.add("-J");
    command.add("-r");
    command.add("-V", driveLabel);
    command.add(tempDirName);
    LOG.debug("Executing config drive creation command: " + command.toString());
    String result = command.execute();
    if (StringUtils.isNotBlank(result)) {
        String errMsg = "Unable to create iso file: " + isoFileName + " due to ge" + result;
        LOG.warn(errMsg);
        throw new CloudRuntimeException(errMsg);
    }
    File tmpIsoFile = new File(tmpIsoStore.getAbsolutePath());
    if (tmpIsoFile.length() > (64L * 1024L * 1024L)) {
        throw new CloudRuntimeException("Config drive file exceeds maximum allowed size of 64MB");
    }
    return fileToBase64String(tmpIsoFile);
}

From source file:org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder.java

License:Apache License

/**
 * Hard link the user_data.txt file with the user_data file in the OpenStack directory.
 *///from   w  w w .jav a 2 s  . c  om
static void linkUserData(String tempDirName) {
    String userDataFilePath = tempDirName + ConfigDrive.cloudStackConfigDriveName + "userdata/user_data.txt";
    File file = new File(userDataFilePath);
    if (file.exists()) {
        Script hardLink = new Script("ln", Duration.standardSeconds(300), LOG);
        hardLink.add(userDataFilePath);
        hardLink.add(tempDirName + ConfigDrive.openStackConfigDriveName + "user_data");
        LOG.debug("execute command: " + hardLink.toString());

        String executionResult = hardLink.execute();
        if (StringUtils.isNotBlank(executionResult)) {
            throw new CloudRuntimeException("Unable to create user_data link due to " + executionResult);
        }
    }
}

From source file:org.apache.druid.client.DirectDruidClient.java

License:Apache License

@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext context) {
    final Query<T> query = queryPlus.getQuery();
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = QueryContexts.isBySegment(query);
    final JavaType queryResultType = isBySegment ? toolChest.getBySegmentResultType()
            : toolChest.getBaseResultType();

    final ListenableFuture<InputStream> future;
    final String url = StringUtils.format("%s://%s/druid/v2/", scheme, host);
    final String cancelUrl = StringUtils.format("%s://%s/druid/v2/%s", scheme, host, query.getId());

    try {/*from   w w w .  j av a  2  s . c o  m*/
        log.debug("Querying queryId[%s] url[%s]", query.getId(), url);

        final long requestStartTimeNs = System.nanoTime();
        final long timeoutAt = query.getContextValue(QUERY_FAIL_TIME);
        final long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query);
        final AtomicLong totalBytesGathered = (AtomicLong) context
                .get(ResponseContext.Key.QUERY_TOTAL_BYTES_GATHERED);
        final long maxQueuedBytes = QueryContexts.getMaxQueuedBytes(query, 0);
        final boolean usingBackpressure = maxQueuedBytes > 0;

        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
            private final AtomicLong totalByteCount = new AtomicLong(0);
            private final AtomicLong queuedByteCount = new AtomicLong(0);
            private final AtomicLong channelSuspendedTime = new AtomicLong(0);
            private final BlockingQueue<InputStreamHolder> queue = new LinkedBlockingQueue<>();
            private final AtomicBoolean done = new AtomicBoolean(false);
            private final AtomicReference<String> fail = new AtomicReference<>();
            private final AtomicReference<TrafficCop> trafficCopRef = new AtomicReference<>();

            private QueryMetrics<? super Query<T>> queryMetrics;
            private long responseStartTimeNs;

            private QueryMetrics<? super Query<T>> acquireResponseMetrics() {
                if (queryMetrics == null) {
                    queryMetrics = toolChest.makeMetrics(query);
                    queryMetrics.server(host);
                }
                return queryMetrics;
            }

            /**
             * Queue a buffer. Returns true if we should keep reading, false otherwise.
             */
            private boolean enqueue(ChannelBuffer buffer, long chunkNum) throws InterruptedException {
                // Increment queuedByteCount before queueing the object, so queuedByteCount is at least as high as
                // the actual number of queued bytes at any particular time.
                final InputStreamHolder holder = InputStreamHolder.fromChannelBuffer(buffer, chunkNum);
                final long currentQueuedByteCount = queuedByteCount.addAndGet(holder.getLength());
                queue.put(holder);

                // True if we should keep reading.
                return !usingBackpressure || currentQueuedByteCount < maxQueuedBytes;
            }

            private InputStream dequeue() throws InterruptedException {
                final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS);
                if (holder == null) {
                    throw new RE("Query[%s] url[%s] timed out.", query.getId(), url);
                }

                final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength());
                if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) {
                    long backPressureTime = Preconditions
                            .checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?")
                            .resume(holder.getChunkNum());
                    channelSuspendedTime.addAndGet(backPressureTime);
                }

                return holder.getStream();
            }

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response, TrafficCop trafficCop) {
                trafficCopRef.set(trafficCop);
                checkQueryTimeout();
                checkTotalBytesLimit(response.getContent().readableBytes());

                log.debug("Initial response from url[%s] for queryId[%s]", url, query.getId());
                responseStartTimeNs = System.nanoTime();
                acquireResponseMetrics().reportNodeTimeToFirstByte(responseStartTimeNs - requestStartTimeNs)
                        .emit(emitter);

                final boolean continueReading;
                try {
                    final String responseContext = response.headers()
                            .get(QueryResource.HEADER_RESPONSE_CONTEXT);
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.merge(ResponseContext.deserialize(responseContext, objectMapper));
                    }
                    continueReading = enqueue(response.getContent(), 0L);
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.finished(new InputStream() {
                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
                totalByteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse.finished(new SequenceInputStream(new Enumeration<InputStream>() {
                    @Override
                    public boolean hasMoreElements() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }
                        checkQueryTimeout();

                        // Done is always true until the last stream has be put in the queue.
                        // Then the stream should be spouting good InputStreams.
                        synchronized (done) {
                            return !done.get() || !queue.isEmpty();
                        }
                    }

                    @Override
                    public InputStream nextElement() {
                        if (fail.get() != null) {
                            throw new RE(fail.get());
                        }

                        try {
                            return dequeue();
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                            throw new RuntimeException(e);
                        }
                    }
                }), continueReading);
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse,
                    HttpChunk chunk, long chunkNum) {
                checkQueryTimeout();

                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();

                checkTotalBytesLimit(bytes);

                boolean continueReading = true;
                if (bytes > 0) {
                    try {
                        continueReading = enqueue(channelBuffer, chunkNum);
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]",
                                url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    }
                    totalByteCount.addAndGet(bytes);
                }

                return ClientResponse.finished(clientResponse.getObj(), continueReading);
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTimeNs = System.nanoTime();
                long nodeTimeNs = stopTimeNs - requestStartTimeNs;
                final long nodeTimeMs = TimeUnit.NANOSECONDS.toMillis(nodeTimeNs);
                log.debug(
                        "Completed queryId[%s] request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].",
                        query.getId(), url, totalByteCount.get(), nodeTimeMs,
                        // Floating math; division by zero will yield Inf, not exception
                        totalByteCount.get() / (0.001 * nodeTimeMs));
                QueryMetrics<? super Query<T>> responseMetrics = acquireResponseMetrics();
                responseMetrics.reportNodeTime(nodeTimeNs);
                responseMetrics.reportNodeBytes(totalByteCount.get());

                if (usingBackpressure) {
                    responseMetrics.reportBackPressureTime(channelSuspendedTime.get());
                }

                responseMetrics.emit(emitter);
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(InputStreamHolder.fromChannelBuffer(ChannelBuffers.EMPTY_BUFFER,
                                Long.MAX_VALUE));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]",
                                url);
                        Thread.currentThread().interrupt();
                        throw new RuntimeException(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                String msg = StringUtils.format("Query[%s] url[%s] failed with exception msg [%s]",
                        query.getId(), url, e.getMessage());
                setupResponseReadFailure(msg, e);
            }

            private void setupResponseReadFailure(String msg, Throwable th) {
                fail.set(msg);
                queue.clear();
                queue.offer(InputStreamHolder.fromStream(new InputStream() {
                    @Override
                    public int read() throws IOException {
                        if (th != null) {
                            throw new IOException(msg, th);
                        } else {
                            throw new IOException(msg);
                        }
                    }
                }, -1, 0));
            }

            // Returns remaining timeout or throws exception if timeout already elapsed.
            private long checkQueryTimeout() {
                long timeLeft = timeoutAt - System.currentTimeMillis();
                if (timeLeft <= 0) {
                    String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new RE(msg);
                } else {
                    return timeLeft;
                }
            }

            private void checkTotalBytesLimit(long bytes) {
                if (maxScatterGatherBytes < Long.MAX_VALUE
                        && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) {
                    String msg = StringUtils.format("Query[%s] url[%s] max scatter-gather bytes limit reached.",
                            query.getId(), url);
                    setupResponseReadFailure(msg, null);
                    throw new RE(msg);
                }
            }
        };

        long timeLeft = timeoutAt - System.currentTimeMillis();

        if (timeLeft <= 0) {
            throw new RE("Query[%s] url[%s] timed out.", query.getId(), url);
        }

        future = httpClient.go(
                new Request(HttpMethod.POST, new URL(url))
                        .setContent(objectMapper.writeValueAsBytes(QueryContexts.withTimeout(query, timeLeft)))
                        .setHeader(HttpHeaders.Names.CONTENT_TYPE,
                                isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE
                                        : MediaType.APPLICATION_JSON),
                responseHandler, Duration.millis(timeLeft));

        queryWatcher.registerQuery(query, future);

        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {
            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    // forward the cancellation to underlying queriable node
                    try {
                        StatusResponseHolder res = httpClient
                                .go(new Request(HttpMethod.DELETE, new URL(cancelUrl))
                                        .setContent(objectMapper.writeValueAsBytes(query))
                                        .setHeader(HttpHeaders.Names.CONTENT_TYPE,
                                                isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE
                                                        : MediaType.APPLICATION_JSON),
                                        StatusResponseHandler.getInstance(), Duration.standardSeconds(1))
                                .get(1, TimeUnit.SECONDS);

                        if (res.getStatus().getCode() >= 500) {
                            throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].",
                                    res.getStatus().getCode(), res.getStatus().getReasonPhrase());
                        }
                    } catch (IOException | ExecutionException | InterruptedException | TimeoutException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        });
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(queryResultType, future, url, query, host,
                    toolChest.decorateObjectMapper(objectMapper, query), null);
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseQuietly.close(iterFromMake);
        }
    });

    // bySegment queries are de-serialized after caching results in order to
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal,
                toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }

    return retVal;
}

From source file:org.apache.druid.indexing.common.IndexTaskClient.java

License:Apache License

/**
 * Sends an HTTP request to the task of the specified {@code taskId} and returns a response if it succeeded.
 *///  w  w w.j a v  a 2s.co  m
private FullResponseHolder submitRequest(String taskId, @Nullable String mediaType, // nullable if content is empty
        HttpMethod method, String encodedPathSuffix, @Nullable String encodedQueryString, byte[] content,
        boolean retry) throws IOException, ChannelException, NoTaskLocationException {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();

    while (true) {
        String path = StringUtils.format("%s/%s/%s", BASE_PATH, StringUtils.urlEncode(taskId),
                encodedPathSuffix);

        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(taskId);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(
                    StringUtils.format("Aborting request because task [%s] is not runnable", taskId));
        }

        final TaskLocation location = taskInfoProvider.getTaskLocation(taskId);
        if (location.equals(TaskLocation.unknown())) {
            throw new NoTaskLocationException(
                    StringUtils.format("No TaskLocation available for task [%s]", taskId));
        }

        final Request request = createRequest(taskId, location, path, encodedQueryString, method, mediaType,
                content);

        FullResponseHolder response = null;
        try {
            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(request.getUrl().getHost(), request.getUrl().getPort());

            response = submitRequest(request);

            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) { // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOE("Received status [%d] and content [%s]", responseCode, response.getContent());
            }
        } catch (IOException | ChannelException e) {

            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.

            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = StringUtils
                        .urlDecode(response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER));
                if (headerId != null && !headerId.equals(taskId)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s",
                            taskId, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }
            final String urlForLog = request.getUrl().toString();
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                throw e;
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                throw e;
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])",
                            (response != null ? response.getStatus().getCode() : "no response"), urlForLog,
                            new Duration(sleepTime).toString(),
                            (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Thread.currentThread().interrupt();
                    e.addSuppressed(e2);
                    throw new RuntimeException(e);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info(
                    "No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or "
                            + "may have already completed",
                    taskId);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}