Example usage for java.util.concurrent ExecutorCompletionService take

List of usage examples for java.util.concurrent ExecutorCompletionService take

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService take.

Prototype

public Future<V> take() throws InterruptedException 

Source Link

Usage

From source file:org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.java

public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs,
        final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();

    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
    if (regionManifests == null || regionManifests.size() == 0) {
        LOG.debug("No manifest files present: " + snapshotDir);
        return;/*from   w  w w . j  a  v a  2  s  . c om*/
    }

    ExecutorService exec = SnapshotManifest.createExecutor(conf, "VerifySnapshot");
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    try {
        for (final SnapshotRegionManifest regionManifest : regionManifests) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws IOException {
                    visitRegionStoreFiles(regionManifest, visitor);
                    return null;
                }
            });
        }
        try {
            for (int i = 0; i < regionManifests.size(); ++i) {
                completionService.take().get();
            }
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        } catch (ExecutionException e) {
            if (e.getCause() instanceof CorruptedSnapshotException) {
                throw new CorruptedSnapshotException(e.getCause().getMessage(), snapshotDesc);
            } else {
                IOException ex = new IOException();
                ex.initCause(e.getCause());
                throw ex;
            }
        }
    } finally {
        exec.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Execute the task on the specified set of regions.
 *
 * @param exec Thread Pool Executor/*from   w  w  w. j  a v  a  2  s.  c  o  m*/
 * @param regions {@link HRegionInfo} that describes the regions to edit
 * @param task {@link RegionFillTask} custom code to edit the region
 * @throws IOException
 */
public static void editRegions(final ThreadPoolExecutor exec, final Collection<HRegionInfo> regions,
        final RegionEditTask task) throws IOException {
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    for (final HRegionInfo hri : regions) {
        completionService.submit(new Callable<Void>() {
            @Override
            public Void call() throws IOException {
                task.editRegion(hri);
                return null;
            }
        });
    }

    try {
        for (HRegionInfo hri : regions) {
            completionService.take().get();
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdLock.java

@Test
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {//from   ww  w. j ava 2s  . c om
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        idLock.assertMapEmpty();
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdReadWriteLock.java

@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {/*from  w w w  . j  a v a2s  . com*/
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        // make sure the entry pool will be cleared after GC and purge call
        int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
        LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
        assertEquals(0, entryPoolSize);
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static ExecutorService createExecutor() throws Exception {
    final ExecutorService executor = Executors.newFixedThreadPool(NUM_THREADS);
    final ExecutorCompletionService<Path> completion = new ExecutorCompletionService<>(executor);

    // initialize all threads and buffers
    for (int i = 0; i < NUM_THREADS; i++) {
        completion.submit(new Callable<Path>() {
            @Override// www  . j a  v a2s .  c om
            public Path call() throws Exception {
                IO_BUF.get();
                VERIFY_BUF.get();
                return null;
            }
        });
    }
    for (int i = 0; i < NUM_THREADS; i++) {
        completion.take().get();
    }
    return executor;
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static void runIbrTest(final long ibrInterval) throws Exception {
    final ExecutorService executor = createExecutor();
    final Random ran = new Random();

    final Configuration conf = newConf(ibrInterval);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();

    try {/*w  w w. ja  v  a 2s  . c  om*/
        final String dirPathString = "/dir";
        final Path dir = new Path(dirPathString);
        dfs.mkdirs(dir);

        // start testing
        final long testStartTime = Time.monotonicNow();
        final ExecutorCompletionService<Path> createService = new ExecutorCompletionService<>(executor);
        final AtomicLong createFileTime = new AtomicLong();
        final AtomicInteger numBlockCreated = new AtomicInteger();

        // create files
        for (int i = 0; i < NUM_FILES; i++) {
            createService.submit(new Callable<Path>() {
                @Override
                public Path call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        final long seed = ran.nextLong();
                        final int numBlocks = ran.nextInt(MAX_BLOCK_NUM) + 1;
                        numBlockCreated.addAndGet(numBlocks);
                        return createFile(dir, numBlocks, seed, dfs);
                    } finally {
                        createFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }

        // verify files
        final ExecutorCompletionService<Boolean> verifyService = new ExecutorCompletionService<>(executor);
        final AtomicLong verifyFileTime = new AtomicLong();
        for (int i = 0; i < NUM_FILES; i++) {
            final Path file = createService.take().get();
            verifyService.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        return verifyFile(file, dfs);
                    } finally {
                        verifyFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }
        for (int i = 0; i < NUM_FILES; i++) {
            Assert.assertTrue(verifyService.take().get());
        }
        final long testEndTime = Time.monotonicNow();

        LOG.info("ibrInterval=" + ibrInterval + " ("
                + toConfString(DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY, conf) + "), numBlockCreated="
                + numBlockCreated);
        LOG.info("duration=" + toSecondString(testEndTime - testStartTime) + ", createFileTime="
                + toSecondString(createFileTime.get()) + ", verifyFileTime="
                + toSecondString(verifyFileTime.get()));
        LOG.info("NUM_FILES=" + NUM_FILES + ", MAX_BLOCK_NUM=" + MAX_BLOCK_NUM + ", BLOCK_SIZE=" + BLOCK_SIZE
                + ", NUM_THREADS=" + NUM_THREADS + ", NUM_DATANODES=" + NUM_DATANODES);
        logIbrCounts(cluster.getDataNodes());
    } finally {
        executor.shutdown();
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.amrmproxy.BaseAMRMProxyTest.java

/**
 * This helper method will invoke the specified function in parallel for each
 * end point in the specified list using a thread pool and return the
 * responses received from the function. It implements the logic required for
 * dispatching requests in parallel and waiting for the responses. If any of
 * the function call fails or times out, it will ignore and proceed with the
 * rest. So the responses returned can be less than the number of end points
 * specified/*from  w  ww . ja va2s.c  o m*/
 * 
 * @param testContext
 * @param func
 * @return
 */
protected <T, R> List<R> runInParallel(List<T> testContexts, final Function<T, R> func) {
    ExecutorCompletionService<R> completionService = new ExecutorCompletionService<R>(this.getThreadPool());
    LOG.info("Sending requests to endpoints asynchronously. Number of test contexts=" + testContexts.size());
    for (int index = 0; index < testContexts.size(); index++) {
        final T testContext = testContexts.get(index);

        LOG.info("Adding request to threadpool for test context: " + testContext.toString());

        completionService.submit(new Callable<R>() {
            @Override
            public R call() throws Exception {
                LOG.info("Sending request. Test context:" + testContext.toString());

                R response = null;
                try {
                    response = func.invoke(testContext);
                    LOG.info("Successfully sent request for context: " + testContext.toString());
                } catch (Throwable ex) {
                    LOG.error("Failed to process request for context: " + testContext);
                    response = null;
                }

                return response;
            }
        });
    }

    ArrayList<R> responseList = new ArrayList<R>();
    LOG.info("Waiting for responses from endpoints. Number of contexts=" + testContexts.size());
    for (int i = 0; i < testContexts.size(); ++i) {
        try {
            final Future<R> future = completionService.take();
            final R response = future.get(3000, TimeUnit.MILLISECONDS);
            responseList.add(response);
        } catch (Throwable e) {
            LOG.error("Failed to process request " + e.getMessage());
        }
    }

    return responseList;
}

From source file:org.apache.tika.batch.fs.strawman.StrawManTikaAppDriver.java

public static void main(String[] args) {
    long start = new Date().getTime();
    if (args.length < 6) {
        System.err.println(StrawManTikaAppDriver.usage());
    }/*  w ww .  j  a  va  2s  .  co m*/
    Path inputDir = Paths.get(args[0]);
    Path outputDir = Paths.get(args[1]);
    int totalThreads = Integer.parseInt(args[2]);

    List<String> commandLine = new ArrayList<>();
    commandLine.addAll(Arrays.asList(args).subList(3, args.length));
    totalThreads = (totalThreads < 1) ? 1 : totalThreads;
    ExecutorService ex = Executors.newFixedThreadPool(totalThreads);
    ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(ex);

    for (int i = 0; i < totalThreads; i++) {
        StrawManTikaAppDriver driver = new StrawManTikaAppDriver(inputDir, outputDir, totalThreads,
                commandLine.toArray(new String[commandLine.size()]));
        completionService.submit(driver);
    }

    int totalFilesProcessed = 0;
    for (int i = 0; i < totalThreads; i++) {
        try {
            Future<Integer> future = completionService.take();
            if (future != null) {
                totalFilesProcessed += future.get();
            }
        } catch (InterruptedException | ExecutionException e) {
            LOG.error(e.getMessage(), e);
        }
    }
    double elapsedSeconds = (double) (new Date().getTime() - start) / (double) 1000;
    LOG.info("Processed {} in {} seconds", totalFilesProcessed, elapsedSeconds);
}

From source file:org.commonjava.util.partyline.ManyReadersWithPreExistingWriterTest.java

private void executeTestIteration() throws Exception {
    ThreadContext.getContext(true);/* ww  w. j a va 2  s .  co m*/

    ExecutorCompletionService<String> completionService = new ExecutorCompletionService<String>(executor);

    final AtomicBoolean readFlag = new AtomicBoolean(false);
    final AtomicBoolean writeFlag = new AtomicBoolean(false);

    completionService.submit(writer(writeFlag, readFlag));
    for (int i = 0; i < THREADS; i++) {
        completionService.submit(reader(readFlag));
    }

    writeFlag.set(true);

    for (int i = 0; i < (THREADS + 1); i++) {
        String error = completionService.take().get();
        if (error != null) {
            logger.info(error);
            fail("thread failed.");
        }
        assertThat(error, nullValue());
    }

    ThreadContext.clearContext();
}

From source file:org.geoserver.bkprst.BackupTask.java

@Override
public void run() {

    // Sets up the filter to exclude some directories according to the previous backup info
    IOFileFilter excludeFilter = this.getExcludeFilter(this.includeData, this.includeGwc, this.includeLog);

    // Sets up source and destination
    File srcMount = this.dataRoot.root();
    File trgMount = new File(this.path);

    // Sets transaction
    this.trans = new BackupTransaction(this, srcMount, trgMount, excludeFilter);

    try {/* www.  jav a  2  s  .co m*/
        // Deletes dest directory if existing
        if (trgMount.exists()) {
            Remove.deleteDirectory(trgMount,
                    FileFilterUtils.or(FileFilterUtils.directoryFileFilter(), FileFilterUtils.fileFileFilter()),
                    true, true);
        }

        // Starts transanction
        this.trans.start();
        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }

        // Sets up the copy task
        ExecutorService ex = Executors.newFixedThreadPool(2);
        if (ex == null || ex.isTerminated()) {
            throw new IllegalArgumentException(
                    "Unable to run asynchronously using a terminated or null ThreadPoolExecutor");
        }
        ExecutorCompletionService<File> cs = new ExecutorCompletionService<File>(ex);

        this.act = new CopyTree(excludeFilter, cs, srcMount, trgMount);
        this.act.addCopyListener(new DefaultProgress(this.id.toString()) {
            public void onUpdateProgress(float percent) {
                super.onUpdateProgress(percent);
                progress = percent;
            }
        });

        // Starts backup
        int workSize = this.act.copy();

        // This is to keep track af restore advancement
        while (workSize-- > 0) {
            Future<File> future;
            try {
                future = cs.take();
                LOGGER.info("copied file: " + future.get());
            } catch (Exception e) {
                LOGGER.log(Level.INFO, e.getLocalizedMessage(), e);
            }

            if (checkForHalt()) {
                LOGGER.fine("run:Halt requested, shutting down threads " + this.id);
                ex.shutdown();
                if (!ex.awaitTermination(5, TimeUnit.SECONDS)) {
                    throw new RuntimeException("Unable to stop backup task");
                }
                return;
            }
        }

        // Writes info about backup
        if (!this.writeBackupInfo(this.path)) {
            LOGGER.severe(
                    "Backup data info were not written properly, a restore operation will fail on this data");
            this.state = BrTaskState.FAILED;
        }

        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }
        // Restore completed
        this.trans.commit();

    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, e.getLocalizedMessage(), e);
        // In case of errors, rollbacks
        this.trans.rollback();
    } finally {
        haltSemaphore.release();
    }
}