Example usage for java.util.concurrent ExecutorCompletionService submit

List of usage examples for java.util.concurrent ExecutorCompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService submit.

Prototype

public Future<V> submit(Runnable task, V result) 

Source Link

Usage

From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java

@Override
public void produce(Map<String, String> lastOffsets, int maxBatchSize) throws StageException {
    int batchSize = Math.min(maxBatchSize, commonSourceConfigBean.maxBatchSize);
    handleLastOffset(new HashMap<>(lastOffsets));
    try {/*  w w  w . j  a v a2s. co  m*/
        executorService = new SafeScheduledExecutorService(numberOfThreads,
                TableJdbcRunnable.TABLE_JDBC_THREAD_PREFIX);

        ExecutorCompletionService<Future> completionService = new ExecutorCompletionService<>(executorService);

        final RateLimiter queryRateLimiter = commonSourceConfigBean.creatQueryRateLimiter();

        List<Future> allFutures = new LinkedList<>();
        IntStream.range(0, numberOfThreads).forEach(threadNumber -> {
            JdbcBaseRunnable runnable = new JdbcRunnableBuilder().context(getContext())
                    .threadNumber(threadNumber).batchSize(batchSize).connectionManager(connectionManager)
                    .offsets(offsets).tableProvider(tableOrderProvider)
                    .tableReadContextCache(getTableReadContextCache(connectionManager, offsets))
                    .commonSourceConfigBean(commonSourceConfigBean).tableJdbcConfigBean(tableJdbcConfigBean)
                    .queryRateLimiter(commonSourceConfigBean.creatQueryRateLimiter()).isReconnect(isReconnect)
                    .build();

            toBeInvalidatedThreadCaches.add(runnable.getTableReadContextCache());
            allFutures.add(completionService.submit(runnable, null));
        });

        if (commonSourceConfigBean.allowLateTable) {
            TableSpooler tableSpooler = new TableSpooler();
            executorServiceForTableSpooler = new SafeScheduledExecutorService(1,
                    JdbcBaseRunnable.TABLE_JDBC_THREAD_PREFIX);
            executorServiceForTableSpooler.scheduleWithFixedDelay(tableSpooler, 0,
                    commonSourceConfigBean.newTableQueryInterval, TimeUnit.SECONDS);
        }

        while (!getContext().isStopped()) {
            checkWorkerStatus(completionService);
            final boolean shouldGenerate = tableOrderProvider.shouldGenerateNoMoreDataEvent();
            if (shouldGenerate) {
                final int delay = commonSourceConfigBean.noMoreDataEventDelay;
                if (delay > 0) {
                    Executors.newSingleThreadScheduledExecutor().schedule(new Runnable() {
                        @Override
                        public void run() {
                            jdbcUtil.generateNoMoreDataEvent(getContext());
                        }
                    }, delay, TimeUnit.SECONDS);
                } else {
                    jdbcUtil.generateNoMoreDataEvent(getContext());
                }
            }

            // This loop is only a checker for isStopped() -> hence running it as fast as possible leads to high CPU
            // usage even for no-data passing through use case. We're currently hard coding the sleep for few milliseconds.
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                LOG.debug("Interrupted wait");
            }
        }

        for (Future future : allFutures) {
            try {
                future.get();
            } catch (ExecutionException e) {
                LOG.error(
                        "ExecutionException when attempting to wait for all table JDBC runnables to complete, after context was"
                                + " stopped: {}",
                        e.getMessage(), e);
            } catch (InterruptedException e) {
                LOG.error(
                        "InterruptedException when attempting to wait for all table JDBC runnables to complete, after context "
                                + "was stopped: {}",
                        e.getMessage(), e);
                Thread.currentThread().interrupt();
            }
        }
    } finally {
        if (shutdownExecutorIfNeeded()) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:com.alibaba.otter.node.etl.extract.extractor.DatabaseExtractor.java

@Override
public void extract(DbBatch dbBatch) throws ExtractException {
    Assert.notNull(dbBatch);//from w  ww. j  a  v  a 2  s. c om
    Assert.notNull(dbBatch.getRowBatch());
    // ??
    Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId());
    boolean mustDb = pipeline.getParameters().getSyncConsistency().isMedia();
    boolean isRow = pipeline.getParameters().getSyncMode().isRow();// ???
    // ??
    adjustPoolSize(pipeline.getParameters().getExtractPoolSize()); // Extractor?
    ExecutorCompletionService completionService = new ExecutorCompletionService(executor);

    // ???
    ExtractException exception = null;
    // ??
    List<DataItem> items = new ArrayList<DataItem>();
    List<Future> futures = new ArrayList<Future>();
    List<EventData> eventDatas = dbBatch.getRowBatch().getDatas();
    for (EventData eventData : eventDatas) {
        if (eventData.getEventType().isDdl()) {
            continue;
        }

        DataItem item = new DataItem(eventData);
        // row??????row???
        boolean flag = mustDb
                || (eventData.getSyncConsistency() != null && eventData.getSyncConsistency().isMedia());

        // ?case, oracle erosa??????
        if (!flag && CollectionUtils.isEmpty(eventData.getUpdatedColumns())) {
            DataMedia dataMedia = ConfigHelper.findDataMedia(pipeline, eventData.getTableId());
            if (dataMedia.getSource().getType().isOracle()) {
                flag |= true;
                eventData.setRemedy(true);// ???erosa?????
            }
        }

        if (isRow && !flag) {
            // ?????
            // view??
            flag = checkNeedDbForRowMode(pipeline, eventData);
        }

        if (flag && (eventData.getEventType().isInsert() || eventData.getEventType().isUpdate())) {// ????
            Future future = completionService.submit(new DatabaseExtractWorker(pipeline, item), null); // ??
            if (future.isDone()) {
                // ?CallerRun????
                try {
                    future.get();
                } catch (InterruptedException e) {
                    cancel(futures);// ??
                    throw new ExtractException(e);
                } catch (ExecutionException e) {
                    cancel(futures); // ??
                    throw new ExtractException(e);
                }
            }

            futures.add(future);// 
        }

        items.add(item);// ?
    }

    // ?
    int index = 0;
    while (index < futures.size()) { // ??
        try {
            Future future = completionService.take();// ?
            future.get();
        } catch (InterruptedException e) {
            exception = new ExtractException(e);
            break;// future
        } catch (ExecutionException e) {
            exception = new ExtractException(e);
            break;// future
        }

        index++;
    }

    if (index < futures.size()) {
        // ???cancel?????
        cancel(futures);
        throw exception;
    } else {
        // ?, ????
        for (int i = 0; i < items.size(); i++) {
            DataItem item = items.get(i);
            if (item.filter) { // ???????
                eventDatas.remove(item.getEventData());
            }
        }
    }

}