Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngineTest.java

@Test
public void shouldAllowVariableReuseAcrossThreads() throws Exception {
    final BasicThreadFactory testingThreadFactory = new BasicThreadFactory.Builder()
            .namingPattern("test-gremlin-scriptengine-%d").build();
    final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory);
    final GremlinGroovyScriptEngine scriptEngine = new GremlinGroovyScriptEngine();

    final AtomicBoolean failed = new AtomicBoolean(false);
    final int max = 512;
    final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max));
    IntStream.range(0, max).forEach(i -> {
        final int yValue = i * 2;
        final int zValue = i * -1;
        final Bindings b = new SimpleBindings();
        b.put("x", i);
        b.put("y", yValue);

        final String script = "z=" + zValue + ";[x,y,z]";
        try {/*  www .j  ava 2 s.c o  m*/
            service.submit(() -> {
                try {
                    final List<Integer> result = (List<Integer>) scriptEngine.eval(script, b);
                    futures.add(Pair.with(i, result));
                } catch (Exception ex) {
                    failed.set(true);
                }
            });
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    });

    service.shutdown();
    assertThat(service.awaitTermination(120000, TimeUnit.MILLISECONDS), is(true));

    // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this
    // test is partially designed to protected against.
    assertThat(failed.get(), is(false));
    assertEquals(max, futures.size());
    futures.forEach(t -> {
        assertEquals(t.getValue0(), t.getValue1().get(0));
        assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue());
        assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue());
    });
}

From source file:org.apache.hadoop.mapreduce.lib.output.TestFileOutputCommitter.java

private void testConcurrentCommitTaskWithSubDir(int version) throws Exception {
    final Job job = Job.getInstance();
    FileOutputFormat.setOutputPath(job, outDir);
    final Configuration conf = job.getConfiguration();
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);

    conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
    FileSystem.closeAll();/*  ww w .j a  va2  s.c o  m*/

    final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    final FileOutputCommitter amCommitter = new FileOutputCommitter(outDir, jContext);
    amCommitter.setupJob(jContext);

    final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
    taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
    taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);

    final TextOutputFormat[] tof = new TextOutputFormat[2];
    for (int i = 0; i < tof.length; i++) {
        tof[i] = new TextOutputFormat() {
            @Override
            public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException {
                final FileOutputCommitter foc = (FileOutputCommitter) getOutputCommitter(context);
                return new Path(new Path(foc.getWorkPath(), SUB_DIR),
                        getUniqueFile(context, getOutputName(context), extension));
            }
        };
    }

    final ExecutorService executor = Executors.newFixedThreadPool(2);
    try {
        for (int i = 0; i < taCtx.length; i++) {
            final int taskIdx = i;
            executor.submit(new Callable<Void>() {
                @Override
                public Void call() throws IOException, InterruptedException {
                    final OutputCommitter outputCommitter = tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
                    outputCommitter.setupTask(taCtx[taskIdx]);
                    final RecordWriter rw = tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
                    writeOutput(rw, taCtx[taskIdx]);
                    outputCommitter.commitTask(taCtx[taskIdx]);
                    return null;
                }
            });
        }
    } finally {
        executor.shutdown();
        while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
            LOG.info("Awaiting thread termination!");
        }
    }

    amCommitter.commitJob(jContext);
    final RawLocalFileSystem lfs = new RawLocalFileSystem();
    lfs.setConf(conf);
    assertFalse("Must not end up with sub_dir/sub_dir", lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));

    // validate output
    validateContent(OUT_SUB_DIR);
    FileUtil.fullyDelete(new File(outDir.toString()));
}

From source file:org.openrdf.http.server.ProtocolTest.java

/**
 * Test for SES-1861/*  w ww  .  j a v a  2 s.co  m*/
 * 
 * @throws Exception
 */
@Test
public void testConcurrentNamespaceUpdates() throws Exception {
    int limitCount = 1000;
    int limitPrefix = 50;

    Random prng = new Random();

    // String repositoryLocation =
    // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame",
    // "Test-NativeStore");
    String repositoryLocation = TestServer.REPOSITORY_URL;

    ExecutorService threadPool = Executors.newFixedThreadPool(20);

    for (int count = 0; count < limitCount; count++) {
        final int number = count;
        final int i = prng.nextInt(limitPrefix);
        final String prefix = "prefix" + i;
        final String ns = "http://example.org/namespace" + i;

        final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix);

        Runnable runner = new Runnable() {

            public void run() {
                try {
                    if (number % 2 == 0) {
                        putNamespace(location, ns);
                    } else {
                        deleteNamespace(location);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    fail("Failed in test: " + number);
                }
            }
        };
        threadPool.execute(runner);
    }
    threadPool.shutdown();
    threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS);
    threadPool.shutdownNow();
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 10000)
public void testDownload() throws IOException, URISyntaxException, InterruptedException {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
    FileContext files = FileContext.getLocalFSFileContext(conf);
    final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
    files.mkdir(basedir, null, true);/*from ww w.jav a  2  s  . c  o m*/
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>();

    Random rand = new Random();
    long sharedSeed = rand.nextLong();
    rand.setSeed(sharedSeed);
    System.out.println("SEED: " + sharedSeed);

    Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
    ExecutorService exec = Executors.newSingleThreadExecutor();
    LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());
    int[] sizes = new int[10];
    for (int i = 0; i < 10; ++i) {
        sizes[i] = rand.nextInt(512) + 512;
        LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
        if (i % 2 == 1) {
            vis = LocalResourceVisibility.APPLICATION;
        }
        Path p = new Path(basedir, "" + i);
        LocalResource rsrc = createFile(files, p, sizes[i], rand, vis);
        rsrcVis.put(rsrc, vis);
        Path destPath = dirs.getLocalPathForWrite(basedir.toString(), sizes[i], conf);
        destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
        FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
        pending.put(rsrc, exec.submit(fsd));
    }

    exec.shutdown();
    while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS))
        ;
    for (Future<Path> path : pending.values()) {
        Assert.assertTrue(path.isDone());
    }

    try {
        for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
            Path localized = p.getValue().get();
            assertEquals(sizes[Integer.parseInt(localized.getName())], p.getKey().getSize());

            FileStatus status = files.getFileStatus(localized.getParent());
            FsPermission perm = status.getPermission();
            assertEquals("Cache directory permissions are incorrect", new FsPermission((short) 0755), perm);

            status = files.getFileStatus(localized);
            perm = status.getPermission();
            System.out
                    .println("File permission " + perm + " for rsrc vis " + p.getKey().getVisibility().name());
            assert (rsrcVis.containsKey(p.getKey()));
            Assert.assertTrue("Private file should be 500",
                    perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort());
        }
    } catch (ExecutionException e) {
        throw new IOException("Failed exec", e);
    }
}

From source file:io.hops.security.TestUsersGroups.java

public void testConcurrentSetSameOwner(int cacheTime, int cacheSize) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SECS, Integer.toString(cacheTime));
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SIZE, Integer.toString(cacheSize));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();/*from  w w  w . j  a v  a  2 s.c o  m*/

    DistributedFileSystem dfs = cluster.getFileSystem();
    Path base = new Path("/base");
    dfs.mkdirs(base);

    final String userName = "user";
    final String groupName = "group";
    final int CONCURRENT_USERS = 100;
    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_USERS);
    List<Callable<Boolean>> callables = new ArrayList<>();

    for (int i = 0; i < CONCURRENT_USERS; i++) {
        Path file = new Path(base, "file" + i);
        dfs.create(file).close();
        callables.add(new SetOwner(dfs, file, userName, groupName));
    }

    List<Future<Boolean>> futures = executorService.invokeAll(callables);
    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.SECONDS);

    for (Future<Boolean> f : futures) {
        assertTrue(f.get());
    }
    cluster.shutdown();
}

From source file:com.serphacker.serposcope.task.proxy.ProxyChecker.java

@Override
public void run() {

    LOG.info("starting proxy checking task, threads = {}, timeout in MS = {}", nThread, timeoutMS);

    long start = System.currentTimeMillis();

    List<Proxy> proxies = db.proxy.list();
    if (proxies == null || proxies.isEmpty()) {
        LOG.debug("no proxy to check");
        return;/*from   w  w w.j a v  a 2  s  .  co m*/
    }

    totalProxies = proxies.size();

    ExecutorService executor = Executors.newFixedThreadPool(nThread);
    db.proxy.updateStatus(Proxy.Status.UNCHECKED,
            proxies.stream().map((t) -> t.getId()).collect(Collectors.toList()));

    for (Proxy proxy : proxies) {
        executor.submit(new Runnable() {
            @Override
            public void run() {
                ScrapClient cli = new ScrapClient();

                cli.setTimeout(timeoutMS);
                ScrapProxy scrapProxy = proxy.toScrapProxy();
                cli.setProxy(scrapProxy);

                LOG.info("checking {}", scrapProxy);

                Proxy.Status proxyStatus = Proxy.Status.ERROR;

                //                    try{Thread.sleep(30000l);}catch(Exception ex){}

                int httpStatus = cli.get(judgeUrl);
                if (httpStatus == 200 && cli.getContentAsString() != null) {
                    Matcher matcher = PATTERN_IP.matcher(cli.getContentAsString());
                    if (matcher.find()) {
                        proxy.setRemoteip(matcher.group(1));
                        proxyStatus = Proxy.Status.OK;
                    }
                }

                proxy.setStatus(proxyStatus);
                proxy.setLastCheck(LocalDateTime.now());
                db.proxy.update(proxy);

                checked.incrementAndGet();
            }
        });
    }

    executor.shutdown();
    try {
        executor.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException ex) {
        executor.shutdownNow();
    }
    LOG.info("proxy checking finished in {}",
            DurationFormatUtils.formatDurationHMS(System.currentTimeMillis() - start));
}

From source file:io.fabric8.kubernetes.pipeline.BuildImageStepExecution.java

@Override
protected ImageInspect run() throws Exception {
    return workspace.getChannel().call(new MasterToSlaveCallable<ImageInspect, Exception>() {
        @Override//w w w  .  j  av a2  s. c  o  m
        public ImageInspect call() throws Exception {
            ExecutorService executorService = Executors.newFixedThreadPool(2);
            try {
                Future<Boolean> createTarFuture;
                Future<ImageInspect> buildImageFuture;
                try (PipedInputStream pin = new PipedInputStream();
                        PipedOutputStream pout = new PipedOutputStream(pin)) {

                    createTarFuture = executorService.submit(new CreateTarTask(pout));
                    buildImageFuture = executorService.submit(new BuildImageTask(pin));
                }

                //Wait for the two tasks to complete.
                if (!createTarFuture.get(step.getTimeout(), TimeUnit.MILLISECONDS)) {
                    listener.getLogger().println("Failed to create docker image tarball.");
                }

                ImageInspect imageInspect = buildImageFuture.get(step.getTimeout(), TimeUnit.MILLISECONDS);
                if (imageInspect == null) {
                    throw new RuntimeException("Failed to build docker image.");
                } else {
                    return imageInspect;
                }
            } finally {
                executorService.shutdown();
                if (executorService.awaitTermination(30, TimeUnit.SECONDS)) {
                    executorService.shutdownNow();
                }
            }
        }
    });
}

From source file:com.opengamma.bbg.replay.BloombergTickWriterTest.java

@Test(invocationCount = 5, successPercentage = 19)
public void performance() throws Exception {
    ExecutorService writerExecutor = Executors.newSingleThreadExecutor();
    Future<?> writerFuture = writerExecutor.submit(_writer);

    double nStartTime = System.currentTimeMillis();

    //create ticks generators
    List<RandomTicksGeneratorJob> ticksGeneratorsList = new ArrayList<RandomTicksGeneratorJob>();
    List<Thread> ticksGeneratorThreads = new ArrayList<Thread>();
    for (int i = 0; i < TICKS_GENERATOR_THREAD_SIZE; i++) {
        RandomTicksGeneratorJob ticksGeneratorJob = new RandomTicksGeneratorJob(
                new ArrayList<String>(_ticker2buid.keySet()), _allTicksQueue);
        ticksGeneratorsList.add(ticksGeneratorJob);
        Thread thread = new Thread(ticksGeneratorJob, "TicksGenerator" + i);
        thread.start();//from w w w .  j ava  2  s  .c o m
        ticksGeneratorThreads.add(thread);
    }

    s_logger.info("Test running for 1min to gather stats");
    Thread.sleep(RUN_DURATION);

    for (RandomTicksGeneratorJob ticksGeneratorJob : ticksGeneratorsList) {
        ticksGeneratorJob.terminate();
    }

    //wait for all ticksGenerator threads to finish
    for (Thread thread : ticksGeneratorThreads) {
        thread.join();
    }

    //send terminate message for tickWriter to terminate
    sendTerminateMessage();

    //test should fail if writer throws an exception
    writerFuture.get();
    writerExecutor.shutdown();
    writerExecutor.awaitTermination(1, TimeUnit.SECONDS);

    double nRunDuration = System.currentTimeMillis() - nStartTime;

    double nTicks = ((double) _writer.getNTicks() / nRunDuration) * 1000;
    s_logger.info("ticks {}/s", nTicks);
    double nWrites = ((double) _writer.getNWrites() / nRunDuration) * 1000;
    s_logger.info("fileOperations {}/s", nWrites);
    double nBlocks = (double) _writer.getNBlocks() / (double) _writer.getNWrites();
    s_logger.info("average blocks {}bytes", nBlocks);

    assertTrue("reportInterval > testRunTime", REPORT_INTERVAL > nRunDuration);
    if ((nWrites * nBlocks) < WRITER_SPEED_THRESHOLD) {
        s_logger.warn("BloombergTickWriter looks like running really slower than {}b/s",
                WRITER_SPEED_THRESHOLD);
    }
}

From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java

private void detect(FastList<Integer> collection, int index, boolean expectedResult) {
    MutableList<Runnable> runnables = FastList.newList();
    runnables.add(() -> this.basicSerialDetectPerformance(collection, PREDICATES_LAMBDA.get(index),
            expectedResult, SERIAL_RUN_COUNT));
    int cores = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(cores);
    runnables.add(() -> this.basicParallelLazyDetectPerformance(collection, PREDICATES_LAMBDA.get(index),
            "Lambda", expectedResult, PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyDetectPerformance(collection, PREDICATES.get(index), "Predicate",
            expectedResult, PARALLEL_RUN_COUNT, cores, service));
    runnables.add(() -> this.basicParallelLazyDetectPerformance(collection, PREDICATES_METHOD_REF.get(index),
            "MethodRef", expectedResult, PARALLEL_RUN_COUNT, cores, service));
    this.shuffleAndRun(runnables);
    service.shutdown();//from  www .ja  v a 2  s  .  com
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.carbondata.processing.globalsurrogategenerator.LevelGlobalSurrogateGeneratorThread.java

@Override
public Void call() throws Exception {

    long currentTimeMillis = System.currentTimeMillis();
    long currentTimeMillis1 = System.currentTimeMillis();

    Hierarchy[] extractHierarchies = CarbonSchemaParser.extractHierarchies(schema, dimension);
    Level cubeLevel = extractHierarchies[0].levels[0];
    boolean isPartitionColumn = partitionColumn.equals(cubeLevel.name);
    if (partitionColumn.equals(cubeLevel.name)) {
        isPartitionColumn = true;//ww  w.j a  va  2s  . c  om
    }

    RelationOrJoin relation = extractHierarchies[0].relation;
    String hierarchyTable = relation == null ? tableName : ((Table) extractHierarchies[0].relation).name;
    String levelFileName = hierarchyTable + '_' + cubeLevel.name;

    List<PartitionMemberVo> partitionMemberVoList = new ArrayList<PartitionMemberVo>(
            CarbonCommonConstants.CONSTANT_SIZE_TEN);

    ExecutorService ex = Executors.newFixedThreadPool(10);

    PartitionMemberVo memberVo = null;

    List<Future<Map<String, Integer>>> submitList = new ArrayList<Future<Map<String, Integer>>>(
            CarbonCommonConstants.CONSTANT_SIZE_TEN);

    for (int i = 0; i < partitionLocation.length; i++) {

        int partitionLength = partitionLocation[i].length;
        if (partitionLength == 0) {
            LOGGER.info("partition length is 0");
            continue;
        }
        String path = partitionLocation[i][partitionLength - 1] + '/' + levelFileName + ".level";

        FileType fileType = FileFactory.getFileType(path);
        if (!FileFactory.isFileExist(path, fileType)) {
            LOGGER.info("File does not exist at path :: " + path);
            continue;
        }
        CarbonFile carbonFile = FileFactory.getCarbonFile(path, fileType);

        memberVo = new PartitionMemberVo();
        memberVo.setPath(partitionLocation[i][partitionLength - 1]);
        partitionMemberVoList.add(memberVo);
        Future<Map<String, Integer>> submit = ex.submit(new ReaderThread(carbonFile));
        submitList.add(submit);
    }

    ex.shutdown();
    ex.awaitTermination(1, TimeUnit.DAYS);
    if (partitionMemberVoList.size() < 1) {
        return null;
    }
    int maxSeqenceKey = getMaxSequenceKeyAssigned(levelFileName + ".globallevel");
    int index = 0;
    for (Future<Map<String, Integer>> future : submitList) {
        partitionMemberVoList.get(index).setMembersMap(future.get());
        index++;
    }

    LOGGER.info("Time Taken to read surrogate for Level: " + levelFileName + " : "
            + (System.currentTimeMillis() - currentTimeMillis));

    currentTimeMillis = System.currentTimeMillis();

    ex = Executors.newFixedThreadPool(5);

    createGlobalSurrogateKey(currentTimeMillis, currentTimeMillis1, isPartitionColumn, levelFileName,
            partitionMemberVoList, ex, maxSeqenceKey);

    return null;
}