Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor, BlockingQueue<Future<V>> completionQueue) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and the supplied queue as its completion queue.

Usage

From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrServer.java

/**
 * Tries to query a live server from the list provided in Req. Servers in the
 * dead pool are skipped. If a request fails due to an IOException, the server
 * is moved to the dead pool for a certain period of time, or until a test
 * request on that server succeeds./*from   w  w w  .  j ava2 s . c o m*/
 *
 * If a request takes longer than backUpRequestDelay the request will be sent
 * to the next server in the list, this will continue until there is a
 * response, the server list is exhausted or the number of requests in flight
 * equals maximumConcurrentRequests.
 *
 * Servers are queried in the exact order given (except servers currently in
 * the dead pool are skipped). If no live servers from the provided list
 * remain to be tried, a number of previously skipped dead servers will be
 * tried. Req.getNumDeadServersToTry() controls how many dead servers will be
 * tried.
 *
 * If no live servers are found a SolrServerException is thrown.
 *
 * @param req
 *          contains both the request as well as the list of servers to query
 *
 * @return the result of the request
 */
@Override
public Rsp request(Req req) throws SolrServerException, IOException {
    ArrayBlockingQueue<Future<RequestTaskState>> queue = new ArrayBlockingQueue<Future<RequestTaskState>>(
            maximumConcurrentRequests + 1);
    ExecutorCompletionService<RequestTaskState> executer = new ExecutorCompletionService<RequestTaskState>(
            threadPoolExecuter, queue);
    List<ServerWrapper> skipped = new ArrayList<ServerWrapper>(req.getNumDeadServersToTry());
    int inFlight = 0;
    RequestTaskState returnedRsp = null;
    Exception ex = null;

    for (String serverStr : req.getServers()) {
        serverStr = normalize(serverStr);
        // if the server is currently a zombie, just skip to the next one
        ServerWrapper wrapper = zombieServers.get(serverStr);
        if (wrapper != null) {
            if (tryDeadServers && skipped.size() < req.getNumDeadServersToTry()) {
                skipped.add(wrapper);
            }
            continue;
        }
        HttpSolrServer server = makeServer(serverStr);
        Callable<RequestTaskState> task = createRequestTask(server, req, false);
        executer.submit(task);
        inFlight++;
        returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests);
        if (returnedRsp == null) {
            // null response signifies that the response took too long.
            log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed",
                    server.getBaseURL(), backUpRequestDelay);
            continue;
        }
        inFlight--;
        if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
            return returnedRsp.response;
        } else if (returnedRsp.stateDescription == TaskState.ServerException) {
            ex = returnedRsp.exception;
        } else if (returnedRsp.stateDescription == TaskState.RequestException) {
            throw new SolrServerException(returnedRsp.exception);
        }
    }

    // no response so try the zombie servers
    if (tryDeadServers) {
        if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
            // try the servers we previously skipped
            for (ServerWrapper wrapper : skipped) {
                Callable<RequestTaskState> task = createRequestTask(wrapper.solrServer, req, true);
                executer.submit(task);
                inFlight++;
                returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests);
                if (returnedRsp == null) {
                    log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed",
                            wrapper.getKey(), backUpRequestDelay);
                    continue;
                }
                inFlight--;
                if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                    return returnedRsp.response;
                } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                    ex = returnedRsp.exception;
                } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                    throw new SolrServerException(returnedRsp.exception);
                }
            }
        }
    }

    // All current attempts could be slower than backUpRequestPause or returned
    // response could be from struggling server
    // so we need to wait until we get a good response or tasks all are
    // exhausted.
    if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
        while (inFlight > 0) {
            returnedRsp = getResponseIfReady(executer, true);
            inFlight--;
            if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                return returnedRsp.response;
            } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                ex = returnedRsp.exception;
            } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                throw new SolrServerException(returnedRsp.exception);
            }
        }
    }

    if (ex == null) {
        throw new SolrServerException("No live SolrServers available to handle this request");
    } else {
        throw new SolrServerException(
                "No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
    }
}

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java

/**
 * /*w ww  .  ja  v  a2s .  c om*/
 */
@SuppressWarnings("resource")
private boolean doPack(final File targetArchiveFile, List<FileData> fileDatas,
        final ArchiveRetriverCallback<FileData> callback) {
    // ?
    if (true == targetArchiveFile.exists() && false == NioUtils.delete(targetArchiveFile, 3)) {
        throw new ArchiveException(
                String.format("[%s] exist and delete failed", targetArchiveFile.getAbsolutePath()));
    }

    boolean exist = false;
    ZipOutputStream zipOut = null;
    Set<String> entryNames = new HashSet<String>();
    BlockingQueue<Future<ArchiveEntry>> queue = new LinkedBlockingQueue<Future<ArchiveEntry>>(); // ?
    ExecutorCompletionService completionService = new ExecutorCompletionService(executor, queue);

    final File targetDir = new File(targetArchiveFile.getParentFile(),
            FilenameUtils.getBaseName(targetArchiveFile.getPath()));
    try {
        // 
        FileUtils.forceMkdir(targetDir);

        zipOut = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(targetArchiveFile)));
        zipOut.setLevel(Deflater.BEST_SPEED);
        // ??
        for (final FileData fileData : fileDatas) {
            if (fileData.getEventType().isDelete()) {
                continue; // delete??
            }

            String namespace = fileData.getNameSpace();
            String path = fileData.getPath();
            boolean isLocal = StringUtils.isBlank(namespace);
            String entryName = null;
            if (true == isLocal) {
                entryName = FilenameUtils.getPath(path) + FilenameUtils.getName(path);
            } else {
                entryName = namespace + File.separator + path;
            }

            // ????
            if (entryNames.contains(entryName) == false) {
                entryNames.add(entryName);
            } else {
                continue;
            }

            final String name = entryName;
            if (true == isLocal && !useLocalFileMutliThread) {
                // ??
                queue.add(new DummyFuture(new ArchiveEntry(name, callback.retrive(fileData))));
            } else {
                completionService.submit(new Callable<ArchiveEntry>() {

                    public ArchiveEntry call() throws Exception {
                        // ??
                        InputStream input = null;
                        OutputStream output = null;
                        try {
                            input = callback.retrive(fileData);

                            if (input instanceof LazyFileInputStream) {
                                input = ((LazyFileInputStream) input).getInputSteam();// ?stream
                            }

                            if (input != null) {
                                File tmp = new File(targetDir, name);
                                NioUtils.create(tmp.getParentFile(), false, 3);// ?
                                output = new FileOutputStream(tmp);
                                NioUtils.copy(input, output);// ?
                                return new ArchiveEntry(name, new File(targetDir, name));
                            } else {
                                return new ArchiveEntry(name);
                            }
                        } finally {
                            IOUtils.closeQuietly(input);
                            IOUtils.closeQuietly(output);
                        }
                    }
                });
            }
        }

        for (int i = 0; i < entryNames.size(); i++) {
            // ?
            ArchiveEntry input = null;
            InputStream stream = null;
            try {
                input = queue.take().get();
                if (input == null) {
                    continue;
                }

                stream = input.getStream();
                if (stream == null) {
                    continue;
                }

                if (stream instanceof LazyFileInputStream) {
                    stream = ((LazyFileInputStream) stream).getInputSteam();// ?stream
                }

                exist = true;
                zipOut.putNextEntry(new ZipEntry(input.getName()));
                NioUtils.copy(stream, zipOut);// ?
                zipOut.closeEntry();
            } finally {
                IOUtils.closeQuietly(stream);
            }
        }

        if (exist) {
            zipOut.finish();
        }
    } catch (Exception e) {
        throw new ArchiveException(e);
    } finally {
        IOUtils.closeQuietly(zipOut);
        try {
            FileUtils.deleteDirectory(targetDir);// 
        } catch (IOException e) {
            // ignore
        }
    }

    return exist;
}

From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrClient.java

/**
 * Tries to query a live server from the list provided in Req. Servers in the
 * dead pool are skipped. If a request fails due to an IOException, the server
 * is moved to the dead pool for a certain period of time, or until a test
 * request on that server succeeds.//from   w ww.  j  a va2 s .c  om
 *
 * If a request takes longer than defaultBackUpRequestDelay the request will be sent
 * to the next server in the list, this will continue until there is a
 * response, the server list is exhausted or the number of requests in flight
 * equals defaultMaximumConcurrentRequests.
 *
 * Servers are queried in the exact order given (except servers currently in
 * the dead pool are skipped). If no live servers from the provided list
 * remain to be tried, a number of previously skipped dead servers will be
 * tried. Req.getNumDeadServersToTry() controls how many dead servers will be
 * tried.
 *
 * If no live servers are found a SolrServerException is thrown.
 *
 * @param req
 *          contains both the request as well as the list of servers to query
 *
 * @return the result of the request
 */
@Override
public Rsp request(Req req) throws SolrServerException, IOException {
    SolrParams reqParams = req.getRequest().getParams();

    int maximumConcurrentRequests = reqParams == null ? defaultMaximumConcurrentRequests
            : reqParams.getInt(HttpBackupRequestShardHandlerFactory.MAX_CONCURRENT_REQUESTS,
                    defaultMaximumConcurrentRequests);

    // If we can't do anything useful, fall back to the stock solr code
    if (maximumConcurrentRequests < 0) {
        return super.request(req);
    }

    // if there's an explicit backupDelay in the request, use that
    int backupDelay = reqParams == null ? -1
            : reqParams.getInt(HttpBackupRequestShardHandlerFactory.BACKUP_REQUEST_DELAY, -1);

    BackupPercentile backupPercentile = defaultBackupPercentile;
    String backupPercentileParam = reqParams == null ? null
            : reqParams.get(HttpBackupRequestShardHandlerFactory.BACKUP_PERCENTILE);
    if (backupPercentileParam != null) {
        backupPercentile = getPercentile(backupPercentileParam);
    }

    String performanceClass = reqParams == null ? req.getRequest().getPath() // getPath is typically the request handler name
            : reqParams.get(HttpBackupRequestShardHandlerFactory.PERFORMANCE_CLASS,
                    reqParams.get(CommonParams.QT, req.getRequest().getPath())); // TODO: Is QT getting filtered out of the distrib requests?

    if (backupDelay < 0 && backupPercentile != BackupPercentile.NONE) {
        // no explicit backup delay, consider a backup percentile for the delay.
        double rate = getCachedRate(performanceClass);
        if (rate > 0.1) { // 1 request per 10 seconds minimum.
            backupDelay = getCachedPercentile(performanceClass, backupPercentile);
            log.debug("Using delay of {}ms for percentile {} for performanceClass {}", backupDelay,
                    backupPercentile.name(), performanceClass);
        } else {
            log.info(
                    "Insufficient query rate ({} per sec) to rely on latency percentiles for performanceClass {}",
                    rate, performanceClass);
        }
    } else {
        // not using a percentile to track backupDelay
        performanceClass = null;
    }

    if (backupDelay < 0) {
        backupDelay = defaultBackUpRequestDelay;
    }

    // If we are using a backupPercentile, we need to proceed regardless of backupDelay so we can record and build the percentile info.
    // If not, and we still don't have a backupDelay, fall back to stock solr code.
    if (backupPercentile == BackupPercentile.NONE && backupDelay < 0) {
        return super.request(req);
    }

    // Reaching this point with a backupDelay < 0 means backup requests are effectively disabled, but we're executing
    // this codepath anyway. Presumably in order to build latency percentile data for future requests.

    ArrayBlockingQueue<Future<RequestTaskState>> queue = new ArrayBlockingQueue<Future<RequestTaskState>>(
            maximumConcurrentRequests + 1);
    ExecutorCompletionService<RequestTaskState> executer = new ExecutorCompletionService<RequestTaskState>(
            threadPoolExecuter, queue);

    final int numDeadServersToTry = req.getNumDeadServersToTry();
    final boolean isUpdate = req.getRequest() instanceof IsUpdateRequest;
    List<ServerWrapper> skipped = null;
    int inFlight = 0;
    RequestTaskState returnedRsp = null;
    Exception ex = null;

    long timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
    long timeOutTime = System.nanoTime() + timeAllowedNano;

    for (String serverStr : req.getServers()) {
        if (isTimeExceeded(timeAllowedNano, timeOutTime)) {
            break;
        }

        serverStr = normalize(serverStr);
        // if the server is currently a zombie, just skip to the next one
        ServerWrapper wrapper = zombieServers.get(serverStr);
        if (wrapper != null) {
            if (tryDeadServers && numDeadServersToTry > 0) {
                if (skipped == null) {
                    skipped = new ArrayList<>(numDeadServersToTry);
                    skipped.add(wrapper);
                } else if (skipped.size() < numDeadServersToTry) {
                    skipped.add(wrapper);
                }
            }

            continue;
        }
        HttpSolrClient client = makeSolrClient(serverStr);
        Callable<RequestTaskState> task = createRequestTask(client, req, isUpdate, false, null,
                performanceClass, inFlight > 0);
        executer.submit(task);
        inFlight++;

        returnedRsp = getResponseIfReady(executer, patience(inFlight, maximumConcurrentRequests, backupDelay));
        if (returnedRsp == null) {
            // null response signifies that the response took too long.
            log.debug("Server :{} did not respond before the backupRequestDelay time of {} elapsed",
                    client.baseUrl, backupDelay);
            continue;
        }
        inFlight--;
        if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
            return returnedRsp.response;
        } else if (returnedRsp.stateDescription == TaskState.ServerException) {
            ex = returnedRsp.exception;
        } else if (returnedRsp.stateDescription == TaskState.RequestException) {
            throw new SolrServerException(returnedRsp.exception);
        }
    }

    // no response so try the zombie servers
    if (tryDeadServers && skipped != null) {
        if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
            // try the servers we previously skipped
            for (ServerWrapper wrapper : skipped) {
                if (isTimeExceeded(timeAllowedNano, timeOutTime)) {
                    break;
                }
                Callable<RequestTaskState> task = createRequestTask(wrapper.client, req, isUpdate, true,
                        wrapper.getKey(), performanceClass, inFlight > 0);
                executer.submit(task);
                inFlight++;
                returnedRsp = getResponseIfReady(executer,
                        patience(inFlight, maximumConcurrentRequests, backupDelay));
                if (returnedRsp == null) {
                    log.debug("Server :{} did not respond before the backupRequestDelay time of {} elapsed",
                            wrapper.getKey(), backupDelay);
                    continue;
                }
                inFlight--;
                if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                    return returnedRsp.response;
                } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                    ex = returnedRsp.exception;
                } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                    throw new SolrServerException(returnedRsp.exception);
                }
            }
        }
    }

    // All current attempts could be slower than backUpRequestPause or returned
    // response could be from struggling server
    // so we need to wait until we get a good response or tasks all are
    // exhausted.
    if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
        while (inFlight > 0) {
            returnedRsp = getResponseIfReady(executer, -1);
            inFlight--;
            if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                return returnedRsp.response;
            } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                ex = returnedRsp.exception;
            } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                throw new SolrServerException(returnedRsp.exception);
            }
        }
    }

    if (ex == null) {
        throw new SolrServerException("No live SolrServers available to handle this request");
    } else {
        throw new SolrServerException(
                "No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
    }
}