Example usage for java.util.concurrent ThreadPoolExecutor isTerminated

List of usage examples for java.util.concurrent ThreadPoolExecutor isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor isTerminated.

Prototype

public boolean isTerminated() 

Source Link

Usage

From source file:httpscheduler.HttpScheduler.java

/**
 * @param args the command line arguments
 * @throws java.lang.Exception/*from ww  w.j  av a 2  s.  c om*/
 */

public static void main(String[] args) throws Exception {

    if (args.length != 2) {
        System.err.println("Invalid command line parameters for worker");
        System.exit(-1);
    }

    int fixedExecutorSize = 4;

    //Creating fixed size executor
    ThreadPoolExecutor taskCommExecutor = new ThreadPoolExecutor(fixedExecutorSize, fixedExecutorSize, 0L,
            TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
    // Used for late binding
    JobMap jobMap = new JobMap();

    // Set port number
    int port = Integer.parseInt(args[0]);

    // Set worker mode
    String mode = args[1].substring(2);

    // Set up the HTTP protocol processor
    HttpProcessor httpproc = HttpProcessorBuilder.create().add(new ResponseDate())
            .add(new ResponseServer("Test/1.1")).add(new ResponseContent()).add(new ResponseConnControl())
            .build();

    // Set up request handlers
    UriHttpRequestHandlerMapper reqistry = new UriHttpRequestHandlerMapper();
    // Different handlers for late binding and generic cases
    if (mode.equals("late"))
        reqistry.register("*", new LateBindingRequestHandler(taskCommExecutor, jobMap));
    else
        reqistry.register("*", new GenericRequestHandler(taskCommExecutor, mode));

    // Set up the HTTP service
    HttpService httpService = new HttpService(httpproc, reqistry);

    SSLServerSocketFactory sf = null;

    // create a thread to listen for possible client available connections
    Thread t;
    if (mode.equals("late"))
        t = new LateBindingRequestListenerThread(port, httpService, sf);
    else
        t = new GenericRequestListenerThread(port, httpService, sf);
    System.out.println("Request Listener Thread created");
    t.setDaemon(false);
    t.start();

    // main thread should wait for the listener to exit before shutdown the
    // task executor pool
    t.join();

    // shutdown task executor pool and wait for any taskCommExecutor thread
    // still running
    taskCommExecutor.shutdown();
    while (!taskCommExecutor.isTerminated()) {
    }

    System.out.println("Finished all task communication executor threads");
    System.out.println("Finished all tasks");

}

From source file:com.guns.media.tools.yuv.MediaTool.java

/**
 * @param args the command line arguments
 *//*  w  w w. j  a v a  2 s .com*/
public static void main(String[] args) throws IOException {

    try {
        Options options = getOptions();
        CommandLine cmd = null;
        int offset = 0;
        CommandLineParser parser = new GnuParser();
        cmd = parser.parse(options, args);

        if (cmd.hasOption("help")) {
            printHelp(options);
            exit(1);
        }

        if (cmd.hasOption("offset")) {
            offset = new Integer(cmd.getOptionValue("offset"));
        }
        int frame = new Integer(cmd.getOptionValue("f"));

        //  int scale = new Integer(args[2]);
        BufferedInputStream if1 = new BufferedInputStream(new FileInputStream(cmd.getOptionValue("if1")));
        BufferedInputStream if2 = new BufferedInputStream(new FileInputStream(cmd.getOptionValue("if2")));

        DataStorage.create(new Integer(cmd.getOptionValue("f")));

        int width = new Integer(cmd.getOptionValue("w"));
        int height = new Integer(cmd.getOptionValue("h"));
        LookUp.initSSYUV(width, height);
        //  int[][] frame1 = new int[width][height];
        //  int[][] frame2 = new int[width][height];
        int nRead;
        int fRead;
        byte[] data = new byte[width * height + ((width * height) / 2)];
        byte[] data1 = new byte[width * height + ((width * height) / 2)];
        int frames = 0;
        long start_ms = System.currentTimeMillis() / 1000L;
        long end_ms = start_ms;

        if (offset > 0) {
            if1.skip(((width * height + ((width * height) / 2)) * offset));
        } else if (offset < 0) {
            if2.skip(((width * height + ((width * height) / 2)) * (-1 * offset)));
        }

        if (cmd.hasOption("psnr")) {
            ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
            while ((nRead = if1.read(data)) != -1 && ((fRead = if2.read(data1)) != -1) && frames < frame) {
                byte[] data_out = data.clone();
                byte[] data1_out = data1.clone();

                PSNRCalculatorThread wt = new PSNRCalculatorThread(data_out, data1_out, frames, width, height);
                executor.execute(wt);
                frames++;

            }
            executor.shutdown();
            end_ms = System.currentTimeMillis();

            System.out.println("Frame Rate :" + frames * 1000 / ((end_ms - start_ms)));
            for (int i = 0; i < frames; i++) {
                System.out.println(
                        i + "," + 10 * Math.log10((255 * 255) / (DataStorage.getFrame(i) / (width * height))));

            }
        }
        if (cmd.hasOption("sub")) {

            RandomAccessFile raf = new RandomAccessFile(cmd.getOptionValue("o"), "rw");

            ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
            while ((nRead = if1.read(data)) != -1 && ((fRead = if2.read(data1)) != -1)) {
                byte[] data_out = data.clone();
                byte[] data1_out = data1.clone();

                ImageSubstractThread wt = new ImageSubstractThread(data_out, data1_out, frames, width, height,
                        raf);
                //wt.run();
                executor.execute(wt);

                frames++;

            }
            executor.shutdown();
            end_ms = System.currentTimeMillis() / 1000L;
            System.out.println("Frame Rate :" + frames / ((end_ms - start_ms)));

            raf.close();
        }
        if (cmd.hasOption("ss") && !cmd.getOptionValue("o").matches("-")) {

            RandomAccessFile raf = new RandomAccessFile(cmd.getOptionValue("o"), "rw");

            // RandomAccessFile ra =  new RandomAccessFile(cmd.getOptionValue("o"), "rw");
            // MappedByteBuffer raf = new RandomAccessFile(cmd.getOptionValue("o"), "rw").getChannel().map(FileChannel.MapMode.READ_WRITE, 0, ((width*height)+(width*height/2))*frame);
            ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
            while ((nRead = if1.read(data)) != -1 && ((fRead = if2.read(data1)) != -1) && frames < frame) {
                byte[] data_out = data.clone();
                byte[] data1_out = data1.clone();

                SidebySideImageThread wt = new SidebySideImageThread(data_out, data1_out, frames, width, height,
                        raf);
                // MPSidebySideImageThread wt = new MPSidebySideImageThread(data_out, data1_out, frames, width, height, ra);
                frames++;
                // wt.run();

                executor.execute(wt);
            }
            executor.shutdown();
            end_ms = System.currentTimeMillis() / 1000L;

            while (!executor.isTerminated()) {

            }

            raf.close();
        }
        if (cmd.hasOption("ss") && cmd.getOptionValue("o").matches("-")) {

            PrintStream stdout = new PrintStream(System.out);

            // RandomAccessFile ra =  new RandomAccessFile(cmd.getOptionValue("o"), "rw");
            // MappedByteBuffer raf = new RandomAccessFile(cmd.getOptionValue("o"), "rw").getChannel().map(FileChannel.MapMode.READ_WRITE, 0, ((width*height)+(width*height/2))*frame);
            // ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
            while ((nRead = if1.read(data)) != -1 && ((fRead = if2.read(data1)) != -1) && frames < frame) {
                byte[] data_out = data.clone();
                byte[] data1_out = data1.clone();

                SidebySideImageThread wt = new SidebySideImageThread(data_out, data1_out, frames, width, height,
                        stdout);
                // MPSidebySideImageThread wt = new MPSidebySideImageThread(data_out, data1_out, frames, width, height, ra);
                frames++;
                // wt.run();

                wt.run();
            }

            end_ms = System.currentTimeMillis() / 1000L;
            System.out.println("Frame Rate :" + frames / ((end_ms - start_ms)));

            stdout.close();
        }
        if (cmd.hasOption("image")) {

            System.setProperty("java.awt.headless", "true");
            //ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
            ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
            while ((nRead = if1.read(data)) != -1 && ((fRead = if2.read(data1)) != -1)) {

                if (frames == frame) {
                    byte[] data_out = data.clone();
                    byte[] data1_out = data1.clone();

                    Frame f1 = new Frame(data_out, width, height, Frame.YUV420);
                    Frame f2 = new Frame(data1_out, width, height, Frame.YUV420);
                    //       System.out.println(cmd.getOptionValue("o"));
                    ExtractImageThread wt = new ExtractImageThread(f1, frames,
                            cmd.getOptionValue("if1") + "frame1-" + cmd.getOptionValue("o"));
                    ExtractImageThread wt1 = new ExtractImageThread(f2, frames,
                            cmd.getOptionValue("if2") + "frame2-" + cmd.getOptionValue("o"));
                    //   executor.execute(wt);
                    executor.execute(wt);
                    executor.execute(wt1);
                }
                frames++;

            }
            executor.shutdown();
            //  executor.shutdown();
            end_ms = System.currentTimeMillis() / 1000L;
            System.out.println("Frame Rate :" + frames / ((end_ms - start_ms)));
        }

    } catch (ParseException ex) {
        Logger.getLogger(MediaTool.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:eu.cassandra.sim.utilities.Utils.java

public static void printExecutorSummary(ThreadPoolExecutor executor) {
    System.out.println(String.format(
            "[monitor] [%d/%d] Active: %d, Completed: %d, Task: %d, isShutdown: %s, isTerminated: %s",
            executor.getPoolSize(), executor.getCorePoolSize(), executor.getActiveCount(),
            executor.getCompletedTaskCount(), executor.getTaskCount(), executor.isShutdown(),
            executor.isTerminated()));
}

From source file:org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure.java

/**
 * Create Split directory/*from  w ww. j  a va 2s.c  o  m*/
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();

    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null) {
            nbFiles += storeFiles.size();
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads
            + " threads");
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA
                + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }//  w ww .j  a v a  2  s  .  co m
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException/*from w  ww.  ja va2  s  .  com*/
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.trnltk.apps.experiments.AmbiguityMatrixApp.java

private void waitUntilThreadPoolToTerminate(ThreadPoolExecutor pool) throws InterruptedException {
    pool.shutdown();//from   www . j av  a2  s  .  co  m
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(1000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse all sample corpus. Does not do an offline analysis to add most frequent words to cache in advance.")
public void parse8MWords() throws Exception {
    /*/*from   ww  w . j  a  v a 2 s  . c o m*/
     Total time :0:07:29.799
     Nr of tokens : 18362187
     Avg time : 0.024495938310616267 ms
    */
    final Set<File> files = SampleFiles.oneMillionSentencesTokenizedFiles();

    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();

    for (File tokenizedFile : files) {
        final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
        for (String line : lines) {
            final ArrayList<String> strings = Lists
                    .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
            words.addAll(strings);
            uniqueWords.addAll(strings);
        }
    }

    System.out.println("Number of words : " + words.size());
    System.out.println("Number of unique words : " + uniqueWords.size());
    System.out.println("======================");

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS,
            INITIAL_L1_CACHE_SIZE, MAX_L1_CACHE_SIZE);

    final ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i = i + BULK_SIZE) {
        final MorphologicParser parser = parsers[(i / BULK_SIZE) % NUMBER_OF_THREADS];
        int start = i;
        int end = i + BULK_SIZE < words.size() ? i + BULK_SIZE : words.size();
        final List<String> subWordList = words.subList(start, end);
        final int wordIndex = i;
        pool.execute(new BulkParseCommand(parser, subWordList, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(1000, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse all sample corpus. Does an offline analysis to add most frequent words to cache in advance.")
public void parseWordsOfOneMillionSentences_withOfflineAnalysis() throws Exception {
    /*/*from   w ww  .j a  va2  s . com*/
    Total time :0:05:27.806
    Nr of tokens : 18362187
    Avg time : 0.01785223078274935 ms
    */
    LoggingSettings.turnOnLogger(LoggingSettings.Piece.FrequentWordAnalysis);

    final Set<File> files = SampleFiles.oneMillionSentencesTokenizedFiles();

    final List<String> words = new ArrayList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();

    for (File tokenizedFile : files) {
        final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
        for (String line : lines) {
            final ArrayList<String> strings = Lists
                    .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
            words.addAll(strings);
            uniqueWords.addAll(strings);
        }
    }

    System.out.println("Number of words : " + words.size());
    System.out.println("Number of unique words : " + uniqueWords.size());
    System.out.println("======================");

    final MorphologicParserCache staticCache = new MorphologicParserCache() {

        private ImmutableMap<String, List<MorphemeContainer>> cacheMap;
        private boolean built;

        @Override
        public List<MorphemeContainer> get(String input) {
            return this.cacheMap.get(input);
        }

        @Override
        public void put(String input, List<MorphemeContainer> morphemeContainers) {
            // do nothing
        }

        @Override
        public void putAll(Map<String, List<MorphemeContainer>> map) {
            // do nothing
        }

        @Override
        public void build(MorphologicParser parser) {
            final ImmutableMap.Builder<String, List<MorphemeContainer>> builder = new ImmutableMap.Builder<String, List<MorphemeContainer>>();
            final FrequentWordAnalysis.FrequentWordAnalysisResult result = new FrequentWordAnalysis().run(words,
                    0.75);

            final List<String> wordsToUseInCache = result.getWordsWithEnoughOccurrences();
            for (String word : wordsToUseInCache) {
                builder.put(word, contextlessMorphologicParser.parseStr(word));
            }
            this.cacheMap = builder.build();
            this.built = true;
        }

        @Override
        public boolean isNotBuilt() {
            return !this.built;
        }
    };

    final ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(staticCache, contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i = i + BULK_SIZE) {
        final MorphologicParser parser = parsers[(i / BULK_SIZE) % NUMBER_OF_THREADS];
        int start = i;
        int end = i + BULK_SIZE < words.size() ? i + BULK_SIZE : words.size();
        final List<String> subWordList = words.subList(start, end);
        final int wordIndex = i;
        pool.execute(new BulkParseCommand(parser, subWordList, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(1000, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:org.trnltk.apps.morphology.contextless.parser.FolderContextlessMorphologicParsingApp.java

@App
public void parse8MWords_withOfflineAnalysis() throws Exception {
    final File folder = new File("D:\\devl\\data\\1MSentences");

    final List<File> files = new ArrayList<File>();

    for (File file : folder.listFiles()) {
        if (file.getName().endsWith("_tokenized.txt"))
            files.add(file);//  w  w w . jav  a 2 s  .  c  o m
    }

    final ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (File file : files) {
        final File targetFile = new File(file.getParent(),
                file.getName().substring(0, file.getName().length() - "_tokenized.txt".length())
                        + "_parsed.txt");
        final FileParseCommand fileParseCommand = new FileParseCommand(contextlessMorphologicParser, file,
                targetFile, false);
        pool.execute(fileParseCommand);
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(1000, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
}