Example usage for java.util.concurrent CompletionService poll

List of usage examples for java.util.concurrent CompletionService poll

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService poll.

Prototype

Future<V> poll(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Retrieves and removes the Future representing the next completed task, waiting if necessary up to the specified wait time if none are yet present.

Usage

From source file:com.mellanox.r4h.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[], int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time. We then wait on which ever read returns first.
 *//*from   w w w . java  2 s.  co  m*/
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    block = getBlockAt(block.getStartOffset());
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb,
                    corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block, ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end,
                        bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:nl.b3p.viewer.image.ImageManager.java

public void process() throws Exception {
    ExecutorService threadPool = Executors.newFixedThreadPool(MAX_TREADS);
    CompletionService<ImageCollector> pool = new ExecutorCompletionService<ImageCollector>(threadPool);
    for (ImageCollector ic : ics) {
        if (ic.getStatus() == ImageCollector.NEW) {
            pool.submit(ic);//w ww  .j  a v  a  2 s .c  o  m
        }
    }
    //wait for all to complete. Wait max 5 min
    for (int i = 0; i < ics.size(); i++) {
        pool.poll(5, TimeUnit.MINUTES).get();
    }
}

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[],
 * int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time.  We then wait on which ever read returns first.
 * //from w  w w.j a  v a  2 s . c  om
 * @param block
 * @param start
 * @param end
 * @param buf
 * @param offset
 * @param corruptedBlockMap
 * @throws IOException
 */
private void hedgedFetchBlockByteRange(long blockStartOffset, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    LocatedBlock block = getBlockAt(blockStartOffset);
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                    block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block.getLocations(), ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                        block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer.java

protected void localizeFiles(LocalizationProtocol nodemanager, CompletionService<Path> cs,
        UserGroupInformation ugi) throws IOException, YarnException {
    while (true) {
        try {//from   ww w . ja va  2 s . c  o m
            LocalizerStatus status = createStatus();
            LocalizerHeartbeatResponse response = nodemanager.heartbeat(status);
            switch (response.getLocalizerAction()) {
            case LIVE:
                List<ResourceLocalizationSpec> newRsrcs = response.getResourceSpecs();
                for (ResourceLocalizationSpec newRsrc : newRsrcs) {
                    if (!pendingResources.containsKey(newRsrc.getResource())) {
                        pendingResources.put(newRsrc.getResource(),
                                cs.submit(download(new Path(newRsrc.getDestinationDirectory().getFile()),
                                        newRsrc.getResource(), ugi)));
                    }
                }
                break;
            case DIE:
                // killall running localizations
                for (Future<Path> pending : pendingResources.values()) {
                    pending.cancel(true);
                }
                status = createStatus();
                // ignore response while dying.
                try {
                    nodemanager.heartbeat(status);
                } catch (YarnException e) {
                    // Cannot do anything about this during death stage, let's just log
                    // it.
                    e.printStackTrace(System.out);
                    LOG.error("Heartbeat failed while dying: ", e);
                }
                return;
            }
            cs.poll(1000, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            return;
        } catch (YarnException e) {
            // TODO cleanup
            throw e;
        }
    }
}

From source file:org.codice.ddf.admin.common.PrioritizedBatchExecutor.java

private Optional<R> getResult(long totalWaitTime, TimeUnit timeUnit,
        List<CompletionService<T>> prioritizedCompletionServices, long endTime, int index) {
    LOGGER.debug("Executing batch {}.", index + 1);

    CompletionService<T> completionService = prioritizedCompletionServices.get(index);
    int currentBatchSize = tasks.get(index).size();

    long lastBatchPollTime = System.currentTimeMillis();
    for (int j = 0; j < currentBatchSize; j++) {

        Future<T> taskFuture;

        if (lastBatchPollTime >= endTime) {
            Optional<R> result = pollRemainingBatches(totalWaitTime, timeUnit, completionService,
                    currentBatchSize, j);
            if (result.isPresent())
                return result;
        }/*from   ww w  .  j  a va 2 s .co m*/

        if (lastBatchPollTime < endTime) {
            long pollTime = endTime - lastBatchPollTime;

            try {
                LOGGER.debug("\tPolling completion service for batch {} for {} milliseconds.", index + 1,
                        pollTime);

                taskFuture = completionService.poll(pollTime, TimeUnit.MILLISECONDS);
                lastBatchPollTime = System.currentTimeMillis();
            } catch (InterruptedException e) {
                LOGGER.debug("\tThread interrupted while polling completionService. Interrupting thread.", e);

                Thread.currentThread().interrupt();
                continue;
            }

            Optional<R> result = handleTaskResult(taskFuture);
            if (result.isPresent()) {
                LOGGER.debug("\tReturning valid task result {} of {} tasks.", j + 1, currentBatchSize);

                return result;
            }
        }
    }
    return Optional.empty();
}

From source file:org.codice.ddf.catalog.sourcepoller.Poller.java

/**
 * @throws IllegalStateException if unable to wait for polls
 * @throws InterruptedException if the current thread was interrupted
 * @throws CancellationException if the task to wait for the loader {@link Callable<V>} to be
 *     complete was cancelled//from   w w w . j  av  a  2  s. c  om
 * @throws ExecutionException if the the task to wait for the loader {@link Callable<V>} threw an
 *     exception
 * @throws PollerException if unable to commit the value for any of the {@code itemsToPoll}
 */
private void doPollItems(long timeout, TimeUnit timeoutTimeUnit, ImmutableMap<K, Callable<V>> itemsToPoll)
        throws InterruptedException, ExecutionException, PollerException {
    removeNoncurrentKeysFromTheCache(itemsToPoll.keySet());

    if (itemsToPoll.isEmpty()) {
        LOGGER.debug("itemsToPoll is empty. Nothing to poll");
        return;
    }

    // Gather any exceptions while loading or committing new values
    final Map<K, Throwable> exceptions = new HashMap<>();
    final CompletionService<Pair<K, Commitable>> completionService = new ExecutorCompletionService<>(
            pollTimeoutWatcherThreadPool);
    final int startedLoadsCount = startLoads(timeout, timeoutTimeUnit, itemsToPoll, completionService,
            exceptions);

    boolean interrupted = false;
    try {
        for (int i = 0; i < startedLoadsCount; i++) {
            // Use CompletionService#poll(long, TimeUnit) instead of CompletionService#take() even
            // though the timeout has already been accounted for in #load(K, Callable<V>, long,
            // TimeUnit) to prevent blocking forever
            // @throws InterruptedException if interrupted while waiting
            final Future<Pair<K, Commitable>> nextCompletedLoadFuture = completionService.poll(timeout,
                    timeoutTimeUnit);
            if (nextCompletedLoadFuture == null) {
                final String message = String.format("Unable to wait for polls to finish within %d %s", timeout,
                        timeoutTimeUnit);
                LOGGER.debug(message);
                throw new IllegalStateException(message);
            }

            // @throws CancellationException if the computation was cancelled
            // @throws ExecutionException if the computation threw an exception
            // @throws InterruptedException if the current thread was interrupted
            final Pair<K, Commitable> nextCompletedLoad = nextCompletedLoadFuture.get();

            try {
                attemptToCommitLoadedValue(nextCompletedLoad.getKey(), nextCompletedLoad.getValue(),
                        exceptions);
            } catch (InterruptedException e) {
                interrupted = true;
            }
        }
    } finally {
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
    }

    if (!exceptions.isEmpty()) {
        throw new PollerException(exceptions);
    }
}

From source file:org.paxle.se.search.impl.SearchProviderManager.java

private void search(ISearchRequest request, ISearchResultCollector results)
        throws InterruptedException, ExecutionException, SearchException {
    if (request == null)
        throw new NullPointerException("The search-request object must not be null");

    final CompletionService<ISearchResult> execCompletionService = new ExecutorCompletionService<ISearchResult>(
            this.execService);

    // determining all search-providers that should be used for the query
    HashSet<String> allowedProviderPIDs = new HashSet<String>(request.getProviderIDs());

    // loop through all providers and pass the request to each one
    List<String> usedProviderPIDs = new ArrayList<String>();
    for (Entry<String, ServiceReference> providerEntry : this.providersRefs.entrySet()) {
        final String providerPID = providerEntry.getKey();
        final ServiceReference providerRef = providerEntry.getValue();

        if (allowedProviderPIDs.size() > 0 && !allowedProviderPIDs.contains(providerPID)) {
            this.logger.debug(String.format("SEProvider '%s' is skipped for search request '%d'.", providerPID,
                    Integer.valueOf(request.getRequestID())));
            continue;
        }/*from   w  w  w  .  j  a  va2s  .  c o  m*/

        usedProviderPIDs.add(providerPID);
        execCompletionService.submit(new SearchProviderCallable(this.ctx, providerRef, request));
    }

    if (allowedProviderPIDs.size() == 0) {
        // store the providers we have used to process the search-request
        request.setProviderIDs(usedProviderPIDs);
    }

    // loop through all providers and collect the results
    long searchTimeout = request.getTimeout();
    for (int i = 0; i < usedProviderPIDs.size(); ++i) {
        final long start = System.currentTimeMillis();

        // waiting for the next search result
        final Future<ISearchResult> future = execCompletionService.poll(searchTimeout, TimeUnit.MILLISECONDS);
        if (future != null) {
            final ISearchResult r = future.get();

            if (r != null) {
                final String providerPID = r.getProviderID();
                final int size = r.getSize();
                this.logger
                        .debug(String.format("SEProvider '%s' returned '%d' results for search-request '%d'.",
                                providerPID, Integer.valueOf(size), Integer.valueOf(request.getRequestID())));

                results.collect(r);
            }
        }

        final long diff = System.currentTimeMillis() - start;
        if ((searchTimeout -= diff) <= 0)
            break;
    }
}