Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV1.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.info("No regions under directory:" + snapshotDir);
        return null;
    }/* w  w  w  .  ja  va  2  s.c o  m*/

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus region : regions) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
                return buildManifestFromDisk(conf, fs, snapshotDir, hri);
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(regions.length);
    try {
        for (int i = 0; i < regions.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}

From source file:knowledgeMiner.InformationDripBootstrapping.java

/**
 * Run the experiment by starting with a seed concept/article and rippling
 * outwards to other linked concepts/articles. When max ripple is reached,
 * repeat for as many repeats as defined.
 *///from  w  ww  . j a  va 2s .co m
private void run() {
    ResourceAccess.newInstance();
    IOManager.newInstance();
    KnowledgeMiner.readInOntologyMappings(initialRunNumber_);
    Executor executor = Executors.newFixedThreadPool(KnowledgeMiner.getNumThreads());
    pool_ = new ExecutorCompletionService<Collection<ConceptModule>>(executor);
    for (int i = 0; i < repeats_; i++) {
        KnowledgeMiner.runID_ = initialRunNumber_ + i;

        // Set up completed collections
        Set<OntologyConcept> completedConcepts = Collections
                .newSetFromMap(new ConcurrentHashMap<OntologyConcept, Boolean>());
        Set<Integer> completedArticles = Collections.newSetFromMap(new ConcurrentHashMap<Integer, Boolean>());

        // Add the initial
        Collection<ConceptModule> rippleLayer = new HashSet<>();
        rippleLayer.add(initial_);

        int maxRipples = (maxRipple_ == -1) ? Integer.MAX_VALUE : maxRipple_;
        for (int r = 0; r <= maxRipples; r++) {
            System.out.println("\nRipple " + r + ": " + rippleLayer.size() + " tasks to process.\n");
            int count = 0;

            // Simultaneously process every concept in the ripple layer
            System.out.print(count++ + ": ");
            for (ConceptModule cm : rippleLayer) {
                pool_.submit(new RippleTask(cm, r != maxRipples, completedArticles, completedConcepts));
            }

            // Wait for the tasks to finish and store results
            Collection<ConceptModule> nextLayer = new HashSet<>();
            for (int j = 0; j < rippleLayer.size(); j++) {
                try {
                    // Get the results and process them.
                    Collection<ConceptModule> result = pool_.take().get();
                    if (count <= rippleLayer.size())
                        System.out.print(count++ + ": ");
                    if (r == maxRipples)
                        continue;

                    // Add the articles/concepts to the next ripple layer
                    for (ConceptModule cm : result) {
                        if (cm.getConcept() != null && !completedConcepts.contains(cm.getConcept()))
                            nextLayer.add(cm);
                        else if (cm.getArticle() != -1 && !completedArticles.contains(cm.getArticle()))
                            nextLayer.add(cm);
                    }
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
            rippleLayer = nextLayer;

            // TODO Record details of this run

            // Clear preprocessed data
            KnowledgeMinerPreprocessor.getInstance().writeHeuristics();

            if (rippleLayer.isEmpty())
                break;
        }
    }
}

From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedDoubleBarrier.java

@Test
public void testOverSubscribed() throws Exception {
    final Timing timing = new Timing();
    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    ExecutorService service = Executors.newCachedThreadPool();
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(service);
    try {//from   w  w  w .j av  a2s.  c  o  m
        client.start();

        final Semaphore semaphore = new Semaphore(0);
        final CountDownLatch latch = new CountDownLatch(1);
        for (int i = 0; i < (QTY + 1); ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    DistributedDoubleBarrier barrier = new DistributedDoubleBarrier(client, "/barrier", QTY) {
                        @Override
                        protected List<String> getChildrenForEntering() throws Exception {
                            semaphore.release();
                            Assert.assertTrue(timing.awaitLatch(latch));
                            return super.getChildrenForEntering();
                        }
                    };
                    Assert.assertTrue(barrier.enter(timing.seconds(), TimeUnit.SECONDS));
                    Assert.assertTrue(barrier.leave(timing.seconds(), TimeUnit.SECONDS));
                    return null;
                }
            });
        }

        Assert.assertTrue(semaphore.tryAcquire(QTY + 1, timing.seconds(), TimeUnit.SECONDS)); // wait until all QTY+1 barriers are trying to enter
        latch.countDown();

        for (int i = 0; i < (QTY + 1); ++i) {
            completionService.take().get(); // to check for assertions
        }
    } finally {
        service.shutdown();
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

@Test
public void testNoParsingAsFlumeAgent() throws Exception {
    final KafkaChannel channel = startChannel(false);
    Producer<String, byte[]> producer = new Producer<String, byte[]>(
            new ProducerConfig(channel.getKafkaConf()));
    List<KeyedMessage<String, byte[]>> original = Lists.newArrayList();
    for (int i = 0; i < 50; i++) {
        KeyedMessage<String, byte[]> data = new KeyedMessage<String, byte[]>(topic, null,
                RandomStringUtils.randomAlphabetic(6), String.valueOf(i).getBytes());
        original.add(data);/*  www  . j  a  v a  2  s. c  om*/
    }
    producer.send(original);
    ExecutorCompletionService<Void> submitterSvc = new ExecutorCompletionService<Void>(
            Executors.newCachedThreadPool());
    List<Event> events = pullEvents(channel, submitterSvc, 50, false, false);
    wait(submitterSvc, 5);
    Set<Integer> finals = Sets.newHashSet();
    for (int i = 0; i < 50; i++) {
        finals.add(Integer.parseInt(new String(events.get(i).getBody())));
    }
    for (int i = 0; i < 50; i++) {
        Assert.assertTrue(finals.contains(i));
        finals.remove(i);
    }
    Assert.assertTrue(finals.isEmpty());
    channel.stop();
}

From source file:org.springframework.integration.jdbc.store.channel.AbstractTxTimeoutMessageStoreTests.java

public void testInt2993IdCacheConcurrency() throws InterruptedException, ExecutionException {
    final String groupId = "testInt2993Group";
    for (int i = 0; i < 100; i++) {
        this.jdbcChannelMessageStore.addMessageToGroup(groupId,
                new GenericMessage<String>("testInt2993Message"));
    }//from  www.jav a2 s . com

    ExecutorService executorService = Executors.newCachedThreadPool();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService);

    final int concurrency = 5;

    final TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager);

    for (int i = 0; i < concurrency; i++) {
        completionService.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                for (int i = 0; i < 100; i++) {
                    boolean result = transactionTemplate.execute(new TransactionCallback<Boolean>() {
                        @Override
                        public Boolean doInTransaction(TransactionStatus status) {
                            Message<?> message = null;
                            try {
                                message = jdbcChannelMessageStore.pollMessageFromGroup(groupId);
                            } catch (Exception e) {
                                log.error("IdCache race condition.", e);
                                return false;
                            }
                            try {
                                Thread.sleep(10);
                            } catch (InterruptedException e) {
                                log.error(e);
                            }
                            if (message != null) {
                                jdbcChannelMessageStore
                                        .removeFromIdCache(message.getHeaders().getId().toString());
                            }
                            return true;
                        }
                    });
                    if (!result)
                        return false;
                }

                return true;
            }
        });
    }

    for (int j = 0; j < concurrency; j++) {
        assertTrue(completionService.take().get());
    }

    executorService.shutdown();
    assertTrue(executorService.awaitTermination(5, TimeUnit.SECONDS));
}

From source file:com.baidu.rigel.biplatform.tesseract.isservice.search.service.impl.SearchIndexServiceImpl.java

@Override
public SearchIndexResultSet query(QueryRequest query) throws IndexAndSearchException {
    ExecutorCompletionService<SearchIndexResultSet> completionService = new ExecutorCompletionService<>(
            taskExecutor);/*from   ww w .  j  a va 2 s. co m*/
    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_BEGIN, "query", "[query:" + query + "]"));
    // 1. Does all the existed index cover this query
    // 2. get index meta and index shard
    // 3. trans query to Query that can used for searching
    // 4. dispatch search query
    // 5. do search
    // 6. merge result
    // 7. return

    if (query == null || StringUtils.isEmpty(query.getCubeId())) {
        LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query",
                "[query:" + query + "]"));
        throw new IndexAndSearchException(
                TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                        IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION),
                IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION);
    }
    IndexMeta idxMeta = this.idxMetaService.getIndexMetaByCubeId(query.getCubeId(),
            query.getDataSourceInfo().getDataSourceKey());

    SearchIndexResultSet result = null;
    long current = System.currentTimeMillis();
    if (idxMeta == null || idxMeta.getIdxState().equals(IndexState.INDEX_UNAVAILABLE)
            || idxMeta.getIdxState().equals(IndexState.INDEX_UNINIT) || !query.isUseIndex()
            || (query.getFrom() != null && query.getFrom().getFrom() != null
                    && !idxMeta.getDataDescInfo().getTableNameList().contains(query.getFrom().getFrom()))
            || !indexMetaContains(idxMeta, query)) {
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "use database"));
        // index does not exist or unavailable,use db query
        SqlQuery sqlQuery = QueryRequestUtil.transQueryRequest2SqlQuery(query);
        SqlDataSourceWrap dataSourceWrape = null;
        try {
            dataSourceWrape = (SqlDataSourceWrap) this.dataSourcePoolService
                    .getDataSourceByKey(query.getDataSourceInfo());
        } catch (DataSourceException e) {
            LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query",
                    "[query:" + query + "]", e));
            throw new IndexAndSearchException(
                    TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                            IndexAndSearchExceptionType.SQL_EXCEPTION),
                    e, IndexAndSearchExceptionType.SQL_EXCEPTION);
        }
        if (dataSourceWrape == null) {
            throw new IllegalArgumentException();
        }

        long limitStart = 0;
        long limitSize = 0;
        if (query.getLimit() != null) {
            limitStart = query.getLimit().getStart();
            if (query.getLimit().getSize() > 0) {
                limitSize = query.getLimit().getSize();
            }

        }
        SearchIndexResultSet currResult = this.dataQueryService.queryForListWithSQLQueryAndGroupBy(sqlQuery,
                dataSourceWrape, limitStart, limitSize, query);
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "db return " + currResult.size() + " records"));
        result = currResult;
    } else {
        LOGGER.info(
                String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "use index"));

        LOGGER.info("cost :" + (System.currentTimeMillis() - current) + " before prepare get record.");
        current = System.currentTimeMillis();

        List<SearchIndexResultSet> idxShardResultSetList = new ArrayList<SearchIndexResultSet>();
        for (IndexShard idxShard : idxMeta.getIdxShardList()) {

            if (idxShard.getIdxState().equals(IndexState.INDEX_UNINIT)) {
                continue;
            }

            completionService.submit(new Callable<SearchIndexResultSet>() {

                @Override
                public SearchIndexResultSet call() throws Exception {
                    try {
                        long current = System.currentTimeMillis();
                        Node searchNode = isNodeService.getFreeSearchNodeByIndexShard(idxShard,
                                idxMeta.getClusterName());
                        searchNode.searchRequestCountAdd();
                        isNodeService.saveOrUpdateNodeInfo(searchNode);
                        LOGGER.info("begin search in shard:{}", idxShard);
                        SearchIndexResultSet result = (SearchIndexResultSet) isClient
                                .search(query, idxShard, searchNode).getMessageBody();
                        searchNode.searchrequestCountSub();
                        isNodeService.saveOrUpdateNodeInfo(searchNode);
                        LOGGER.info("compelete search in shard:{},take:{} ms", idxShard,
                                System.currentTimeMillis() - current);
                        return result;
                    } catch (Exception e) {
                        throw new IndexAndSearchException(
                                TesseractExceptionUtils.getExceptionMessage(
                                        IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                                        IndexAndSearchExceptionType.NETWORK_EXCEPTION),
                                e, IndexAndSearchExceptionType.NETWORK_EXCEPTION);
                    }

                }
            });
        }
        for (int i = 0; i < idxMeta.getIdxShardList().size(); i++) {
            try {
                idxShardResultSetList.add(completionService.take().get());
            } catch (InterruptedException | ExecutionException e) {
                throw new IndexAndSearchException(
                        TesseractExceptionUtils.getExceptionMessage(
                                IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                                IndexAndSearchExceptionType.NETWORK_EXCEPTION),
                        e, IndexAndSearchExceptionType.NETWORK_EXCEPTION);
            }
        }
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "merging result from multiple index"));
        result = mergeResultSet(idxShardResultSetList, query);
        StringBuilder sb = new StringBuilder();
        sb.append("cost :").append(System.currentTimeMillis() - current)
                .append(" in get result record,result size:").append(result.size()).append(" shard size:")
                .append(idxShardResultSetList.size());

        LOGGER.info(sb.toString());
        current = System.currentTimeMillis();
    }

    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
            "merging final result"));

    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_END, "query", "[query:" + query + "]"));
    return result;
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
        @Override//  w  w w  .  j  a v  a  2  s.  co m
        public boolean accept(Path path) {
            return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX);
        }
    });

    if (manifestFiles == null || manifestFiles.length == 0)
        return null;

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus st : manifestFiles) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                FSDataInputStream stream = fs.open(st.getPath());
                try {
                    return SnapshotRegionManifest.parseFrom(stream);
                } finally {
                    stream.close();
                }
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(
            manifestFiles.length);
    try {
        for (int i = 0; i < manifestFiles.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}

From source file:org.apache.drill.optiq.EnumerableDrill.java

@Override
public Enumerator<E> enumerator() {
    // TODO: use a completion service from the container
    final ExecutorCompletionService<Collection<RunOutcome>> service = new ExecutorCompletionService<Collection<RunOutcome>>(
            new ThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(10)));

    // Run the plan using an executor. It runs in a different thread, writing
    // results to our queue.
    ///*from  w  w  w.ja v a  2  s.c  om*/
    // TODO: use the result of task, and check for exceptions
    final Future<Collection<RunOutcome>> task = runPlan(service);

    return new Enumerator<E>() {
        private E current;

        @Override
        public E current() {
            return current;
        }

        @Override
        public boolean moveNext() {
            try {
                Object o = queue.take();
                if (o instanceof RunOutcome.OutcomeType) {
                    switch ((RunOutcome.OutcomeType) o) {
                    case SUCCESS:
                        return false; // end of data
                    case CANCELED:
                        throw new RuntimeException("canceled");
                    case FAILED:
                    default:
                        throw new RuntimeException("failed");
                    }
                } else {
                    current = (E) parseJson((byte[]) o);
                    return true;
                }
            } catch (InterruptedException e) {
                Thread.interrupted();
                throw new RuntimeException(e);
            }
        }

        @Override
        public void reset() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.jtheque.modules.impl.ModuleLoader.java

/**
 * Load all the modules from the given files in parallel (using one thread per processor).
 *
 * @param files The files to load the modules from.
 *
 * @return A Collection containing all the loaded modules.
 *///from  www . j  a  v  a  2  s  .  co  m
@SuppressWarnings({ "ForLoopReplaceableByForEach" })
private Collection<Module> loadInParallel(File[] files) {
    ExecutorService loadersPool = Executors.newFixedThreadPool(2 * ThreadUtils.processors());

    CompletionService<Module> completionService = new ExecutorCompletionService<Module>(loadersPool);

    for (File file : files) {
        completionService.submit(new ModuleLoaderTask(file));
    }

    List<Module> modules = CollectionUtils.newList(files.length);

    try {
        for (int i = 0; i < files.length; i++) {
            modules.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    loadersPool.shutdown();

    return modules;
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessMutexBase.java

@Test
public void testKilledSession() throws Exception {
    final Timing timing = new Timing();

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();// w  w w  . j a  v  a 2s  .co  m
    try {
        final InterProcessLock mutex1 = makeLock(client);
        final InterProcessLock mutex2 = makeLock(client);

        final Semaphore semaphore = new Semaphore(0);
        ExecutorCompletionService<Object> service = new ExecutorCompletionService<Object>(
                Executors.newFixedThreadPool(2));
        service.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                mutex1.acquire();
                semaphore.release();
                Thread.sleep(1000000);
                return null;
            }
        });

        service.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                mutex2.acquire();
                semaphore.release();
                Thread.sleep(1000000);
                return null;
            }
        });

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));
        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());
        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));
    } finally {
        client.close();
    }
}