Example usage for com.google.common.collect Lists newArrayList

List of usage examples for com.google.common.collect Lists newArrayList

Introduction

In this page you can find the example usage for com.google.common.collect Lists newArrayList.

Prototype

@GwtCompatible(serializable = true)
public static <E> ArrayList<E> newArrayList() 

Source Link

Document

Creates a mutable, empty ArrayList instance (for Java 6 and earlier).

Usage

From source file:edu.byu.nlp.data.app.AnnotationStream2Csv.java

public static void main(String[] args) throws IOException {
    // parse CLI arguments
    new ArgumentParser(AnnotationStream2Csv.class).parseArgs(args);
    Preconditions.checkNotNull(jsonStream, "You must provide a valid --json-stream!");

    Dataset data = readData(jsonStream);

    // optionally aggregate by instance
    String header = "annotator,start,end,annotation,label,source,num_correct_annotations,num_annotations,cum_num_annotations,num_annotators,cum_num_annotators\n";

    // iterate over instances and (optionally) annotations
    final StringBuilder bld = new StringBuilder();

    switch (row) {
    case ANNOTATION:

        // sort all annotations by end time
        Map<FlatInstance<SparseFeatureVector, Integer>, DatasetInstance> ann2InstMap = Maps
                .newIdentityHashMap();/*from  w  w  w .j  ava  2  s  .c o  m*/
        List<FlatInstance<SparseFeatureVector, Integer>> annotationList = Lists.newArrayList();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                ann2InstMap.put(ann, inst); // record instance of each annotations
                annotationList.add(ann);
            }
        }
        Collections.sort(annotationList, new Comparator<FlatInstance<SparseFeatureVector, Integer>>() {
            @Override
            public int compare(FlatInstance<SparseFeatureVector, Integer> o1,
                    FlatInstance<SparseFeatureVector, Integer> o2) {
                // no null checking since we want to fail if annotation time is not set. 
                return Long.compare(o1.getEndTimestamp(), o2.getEndTimestamp());
            }
        });

        Set<Integer> annotators = Sets.newHashSet();
        for (Enumeration<FlatInstance<SparseFeatureVector, Integer>> item : Iterables2
                .enumerate(annotationList)) {
            FlatInstance<SparseFeatureVector, Integer> ann = item.getElement();
            DatasetInstance inst = ann2InstMap.get(ann);
            annotators.add(ann.getAnnotator());

            bld.append(ann.getAnnotator() + ",");
            bld.append(ann.getStartTimestamp() + ",");
            bld.append(ann.getEndTimestamp() + ",");
            bld.append(ann.getAnnotation() + ",");
            bld.append(inst.getLabel() + ",");
            bld.append(
                    data.getInfo().getIndexers().getInstanceIdIndexer().get(inst.getInfo().getSource()) + ",");
            bld.append((!inst.hasLabel() ? "NA" : ann.getAnnotation() == inst.getLabel() ? 1 : 0) + ","); // num correct
            bld.append(1 + ","); // num annotations
            bld.append((item.getIndex() + 1) + ","); // cumulative num annotations
            bld.append(1 + ","); // num annotators
            bld.append(annotators.size() + ""); // cumulative num annotators
            bld.append("\n");
        }
        break;
    case INSTANCE:
        int cumNumAnnotations = 0;
        for (DatasetInstance inst : data) {
            cumNumAnnotations += inst.getInfo().getNumAnnotations();

            int numCorrectAnnotations = 0;
            // sum over all the annotators who put the correct answer (if available)
            if (inst.hasLabel()) {
                Integer correctLabel = inst.getLabel();
                for (int j = 0; j < data.getInfo().getNumAnnotators(); j++) {
                    numCorrectAnnotations += inst.getAnnotations().getLabelAnnotations()
                            .getRow(j)[correctLabel];
                }
            }

            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(inst.getLabel() + ",");
            bld.append(inst.getInfo().getSource() + ",");
            bld.append(numCorrectAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotations() + ",");
            bld.append(cumNumAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotators() + ",");
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }
        break;

    case ANNOTATOR:
        Multiset<Integer> perAnnotatorAnnotationCounts = HashMultiset.create();
        Multiset<Integer> perAnnotatorCorrectAnnotationCounts = HashMultiset.create();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                int annotatorId = ann.getAnnotator();

                perAnnotatorAnnotationCounts.add(annotatorId);

                if (inst.getLabel() == ann.getAnnotation()) {
                    perAnnotatorCorrectAnnotationCounts.add(annotatorId);
                }

            }
        }

        for (String annotatorId : data.getInfo().getAnnotatorIdIndexer()) {

            bld.append(annotatorId + ",");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(perAnnotatorCorrectAnnotationCounts.count(annotatorId) + ",");
            bld.append(perAnnotatorAnnotationCounts.count(annotatorId) + ",");
            bld.append("NA,");
            bld.append("1,"); // num annotators
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }

        break;

    default:
        Preconditions.checkArgument(false, "unknown row type: " + row);
        break;
    }

    // output to console
    if (out == null) {
        System.out.println(header);
        System.out.println(bld.toString());
    } else {
        File outfile = new File(out);
        Files.write(header, outfile, Charsets.UTF_8);
        Files.append(bld, outfile, Charsets.UTF_8);
    }

}

From source file:com.sina.dip.twill.HelloWorldMultipleRunnablesAnyOrder.java

public static void main(String[] args) {
    String zkStr = "localhost:2181";

    YarnConfiguration yarnConfiguration = new YarnConfiguration();

    final TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);

    twillRunner.start();/*  w ww  .  java2 s .  co m*/

    String yarnClasspath = yarnConfiguration.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            "/usr/lib/hadoop/*,/usr/lib/hadoop-0.20-mapreduce/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-yarn/*");

    List<String> applicationClassPaths = Lists.newArrayList();

    Iterables.addAll(applicationClassPaths, Splitter.on(",").split(yarnClasspath));

    final TwillController controller = twillRunner.prepare(new HelloWorldApplication())
            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
            .withApplicationClassPaths(applicationClassPaths)
            .withBundlerClassAcceptor(new HadoopClassExcluder()).start();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                Futures.getUnchecked(controller.terminate());
            } finally {
                twillRunner.stop();
            }
        }
    });

    try {
        controller.awaitTerminated();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }
}

From source file:mvm.rya.indexing.external.ExternalIndexMain.java

public static void main(String[] args) throws Exception {
    Preconditions.checkArgument(args.length == 6, "java " + ExternalIndexMain.class.getCanonicalName()
            + " sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix.");

    final String sparqlFile = args[0];

    instStr = args[1];/*from   w ww . ja  v a  2s. c  o  m*/
    zooStr = args[2];
    userStr = args[3];
    passStr = args[4];
    tablePrefix = args[5];

    String queryString = FileUtils.readFileToString(new File(sparqlFile));

    // Look for Extra Indexes
    Instance inst = new ZooKeeperInstance(instStr, zooStr);
    Connector c = inst.getConnector(userStr, passStr.getBytes());

    System.out.println("Searching for Indexes");
    Map<String, String> indexTables = Maps.newLinkedHashMap();
    for (String table : c.tableOperations().list()) {
        if (table.startsWith(tablePrefix + "INDEX_")) {
            Scanner s = c.createScanner(table, new Authorizations());
            s.setRange(Range.exact(new Text("~SPARQL")));
            for (Entry<Key, Value> e : s) {
                indexTables.put(table, e.getValue().toString());
            }
        }
    }

    List<ExternalTupleSet> index = Lists.newArrayList();

    if (indexTables.isEmpty()) {
        System.out.println("No Index found");
    } else {
        for (String table : indexTables.keySet()) {
            String indexSparqlString = indexTables.get(table);
            System.out.println("====================== INDEX FOUND ======================");
            System.out.println(" table : " + table);
            System.out.println(" sparql : ");
            System.out.println(indexSparqlString);

            index.add(new AccumuloIndexSet(indexSparqlString, c, table));
        }
    }

    // Connect to Rya
    Sail s = getRyaSail();
    SailRepository repo = new SailRepository(s);
    repo.initialize();

    // Perform Query

    CountingTupleQueryResultHandler count = new CountingTupleQueryResultHandler();

    SailRepositoryConnection conn;
    if (index.isEmpty()) {
        conn = repo.getConnection();

    } else {
        ExternalProcessor processor = new ExternalProcessor(index);

        Sail processingSail = new ExternalSail(s, processor);
        SailRepository smartSailRepo = new SailRepository(processingSail);
        smartSailRepo.initialize();

        conn = smartSailRepo.getConnection();
    }

    startTime = System.currentTimeMillis();
    lastTime = startTime;
    System.out.println("Query Started");
    conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(count);

    System.out.println("Count of Results found : " + count.i);
    System.out.println("Total query time (s) : " + (System.currentTimeMillis() - startTime) / 1000.);
}

From source file:org.carrot2.examples.clustering.ClusteringNonEnglishContent.java

@SuppressWarnings("unchecked")
public static void main(String[] args) {
    // [[[start:clustering-non-english-content]]]
    /*//w  w w.  j  ava  2s.co  m
     * We use a Controller that reuse instances of Carrot2 processing components 
     * and caches results produced by document sources.
     */
    final Controller controller = ControllerFactory.createCachingPooling(IDocumentSource.class);

    /*
     * In the first call, we'll cluster a document list, setting the language for each
     * document separately.
     */
    final List<Document> documents = Lists.newArrayList();
    for (Document document : SampleDocumentData.DOCUMENTS_DATA_MINING) {
        documents.add(new Document(document.getTitle(), document.getSummary(), document.getContentUrl(),
                LanguageCode.ENGLISH));
    }

    final Map<String, Object> attributes = Maps.newHashMap();
    CommonAttributesDescriptor.attributeBuilder(attributes).documents(documents);
    final ProcessingResult englishResult = controller.process(attributes, LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(englishResult);

    /*
     * In the second call, we will fetch results for a Chinese query from Bing,
     * setting explicitly the Bing's specific language attribute. Based on that
     * attribute, the document source will set the appropriate language for each
     * document.
     */
    attributes.clear();

    CommonAttributesDescriptor.attributeBuilder(attributes).query("?" /* clustering? */).results(100);

    Bing3WebDocumentSourceDescriptor.attributeBuilder(attributes).market(MarketOption.CHINESE_CHINA);
    Bing3WebDocumentSourceDescriptor.attributeBuilder(attributes).appid(BingKeyAccess.getKey()); // use your own ID here!

    final ProcessingResult chineseResult = controller.process(attributes, Bing3WebDocumentSource.class,
            LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(chineseResult);

    /*
     * In the third call, we will fetch results for the same Chinese query from
     * Google. As Google document source does not have its specific attribute for
     * setting the language, it will not set the documents' language for us. To make
     * sure the right lexical resources are used, we will need to set the
     * MultilingualClustering.defaultLanguage attribute to Chinese on our own.
     */
    attributes.clear();

    CommonAttributesDescriptor.attributeBuilder(attributes).query("?" /* clustering? */).results(100);

    MultilingualClusteringDescriptor.attributeBuilder(attributes)
            .defaultLanguage(LanguageCode.CHINESE_SIMPLIFIED);

    final ProcessingResult chineseResult2 = controller.process(attributes, GoogleDocumentSource.class,
            LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(chineseResult2);
    // [[[end:clustering-non-english-content]]]
}

From source file:org.polimi.zarathustra.experiment.DOMsDumpCompareExperiment.java

/**
 * Compares the DOMs found in dumps in two directories.
 *///  w  w w  . ja  va2s.  co  m
public static void main(String[] args) throws Exception {
    if (args.length < 3) {
        System.err.println("Invoke this experiment with at least 3 parameters"
                + "and optionally one or more verification dir(s):"
                + "sourceDir targetDir outputDir verificationDir(s)");
    }

    File sourceDir1 = verifyReadableDir(new File(args[0]));
    File targetDir = verifyReadableDir(new File(args[1]));
    File outputDir = verifyWritableDir(new File(args[2]));
    List<File> verificationDirs = Lists.newArrayList();

    for (int i = 3; i < args.length; i++) {
        verificationDirs.add(verifyReadableDir(new File(args[i])));
    }

    for (String filename : sourceDir1.list()) {
        if (filename.endsWith(DOMHelper.DOM_DUMP_SUFFIX)) {
            compareAndStoreDom(filename, sourceDir1, targetDir, outputDir, verificationDirs);
        }
    }
}

From source file:com.github.fge.jsonschema.main.cli.Main.java

public static void main(final String... args) throws IOException, ProcessingException {
    final OptionParser parser = new OptionParser();
    parser.accepts("help", "show this help").forHelp();
    parser.acceptsAll(Arrays.asList("s", "brief"), "only show validation status (OK/NOT OK)");
    parser.acceptsAll(Arrays.asList("q", "quiet"), "no output; exit with the relevant return code (see below)");
    parser.accepts("syntax", "check the syntax of schema(s) given as argument(s)");
    parser.accepts("fakeroot", "pretend that the current directory is absolute URI \"uri\"").withRequiredArg();
    parser.formatHelpWith(HELP);// w ww. ja  v a2s. com

    final OptionSet optionSet;
    final boolean isSyntax;
    final int requiredArgs;

    Reporter reporter = Reporters.DEFAULT;
    String fakeRoot = null;

    try {
        optionSet = parser.parse(args);
    } catch (OptionException e) {
        System.err.println("unrecognized option(s): " + CustomHelpFormatter.OPTIONS_JOINER.join(e.options()));
        parser.printHelpOn(System.err);
        System.exit(CMD_ERROR.get());
        throw new IllegalStateException("WTF??");
    }

    if (optionSet.has("help")) {
        parser.printHelpOn(System.out);
        System.exit(ALL_OK.get());
    }

    if (optionSet.has("s") && optionSet.has("q")) {
        System.err.println("cannot specify both \"--brief\" and " + "\"--quiet\"");
        parser.printHelpOn(System.err);
        System.exit(CMD_ERROR.get());
    }

    if (optionSet.has("fakeroot"))
        fakeRoot = (String) optionSet.valueOf("fakeroot");

    isSyntax = optionSet.has("syntax");
    requiredArgs = isSyntax ? 1 : 2;

    @SuppressWarnings("unchecked")
    final List<String> arguments = (List<String>) optionSet.nonOptionArguments();

    if (arguments.size() < requiredArgs) {
        System.err.println("missing arguments");
        parser.printHelpOn(System.err);
        System.exit(CMD_ERROR.get());
    }

    final List<File> files = Lists.newArrayList();
    for (final String target : arguments)
        files.add(new File(target).getCanonicalFile());

    if (optionSet.has("brief"))
        reporter = Reporters.BRIEF;
    else if (optionSet.has("quiet")) {
        System.out.close();
        System.err.close();
        reporter = Reporters.QUIET;
    }

    new Main(fakeRoot).proceed(reporter, files, isSyntax);
}

From source file:cluster.ClusterComput.java

@SuppressWarnings("unchecked")
public static void main(String[] args) {
    List<OnlineText> onlineTexts = PMHibernateImpl.getInstance().retrieveOnlineText();
    // [[[start:clustering-non-english-content]]]
    /*/* w ww  . ja  v a2  s  .c o  m*/
     * We use a Controller that reuse instances of Carrot2 processing
     * components and caches results produced by document sources.
     */
    final Controller controller = ControllerFactory.createCachingPooling(IDocumentSource.class);

    /*
     * In the first call, we'll cluster a document list, setting the
     * language for each document separately.
     */
    final List<Document> documents = Lists.newArrayList();
    // for (Document document : SampleDocumentData.DOCUMENTS_DATA_MINING)
    // {
    // documents.add(new Document(document.getTitle(),
    // document.getSummary(),
    // document.getContentUrl(), LanguageCode.ENGLISH));
    // }

    for (OnlineText onlineText : onlineTexts) {
        documents.add(new Document(onlineText.getTitle(), onlineText.getContext(), "",
                LanguageCode.CHINESE_SIMPLIFIED));
    }
    final Map<String, Object> attributes = Maps.newHashMap();
    CommonAttributesDescriptor.attributeBuilder(attributes).documents(documents);
    final ProcessingResult englishResult = controller.process(attributes, LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(englishResult);
    final List<Cluster> clustersByTopic = englishResult.getClusters();
    for (Cluster cluster : clustersByTopic) {
        // System.out.println(clustersByTopic.indexOf(cluster) +
        // "? "
        // + cluster.getLabel());
        System.out.println(cluster.getScore());
        List<Document> cDocLst = cluster.getAllDocuments();
        for (Document doc : cDocLst) {
            // System.out.println(documents.indexOf(doc) + "--------"
            // + doc.getTitle());
            int index = documents.indexOf(doc);
            String file_title = documents.get(index).getTitle();
            String url = onlineTexts.get(index).getOnlinetext_id();
            ClusterRelation clusterRelation = new ClusterRelation();
            clusterRelation.setMetaFile(url);
            clusterRelation.setScore(cluster.getScore());
            clusterRelation.setName(cluster.getLabel());
            clusterRelation.setTitle(file_title);
            PMHibernateImpl.getInstance().save(clusterRelation);
        }
    }

    /*
     * In the second call, we will fetch results for a Chinese query from
     * Bing, setting explicitly the Bing's specific language attribute.
     * Based on that attribute, the document source will set the appropriate
     * language for each document.
     */
    attributes.clear();

    CommonAttributesDescriptor.attributeBuilder(attributes).query("?" /* clustering? */).results(100);

    Bing3WebDocumentSourceDescriptor.attributeBuilder(attributes).market(MarketOption.CHINESE_CHINA);
    Bing3WebDocumentSourceDescriptor.attributeBuilder(attributes).appid(BingKeyAccess.getKey()); // use your own ID here!

    final ProcessingResult chineseResult = controller.process(attributes, Bing3WebDocumentSource.class,
            LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(chineseResult);

    /*
     * In the third call, we will fetch results for the same Chinese query
     * from Google. As Google document source does not have its specific
     * attribute for setting the language, it will not set the documents'
     * language for us. To make sure the right lexical resources are used,
     * we will need to set the MultilingualClustering.defaultLanguage
     * attribute to Chinese on our own.
     */
    attributes.clear();

    CommonAttributesDescriptor.attributeBuilder(attributes).query("?" /* clustering? */).results(100);

    MultilingualClusteringDescriptor.attributeBuilder(attributes)
            .defaultLanguage(LanguageCode.CHINESE_SIMPLIFIED);

    final ProcessingResult chineseResult2 = controller.process(attributes, GoogleDocumentSource.class,
            LingoClusteringAlgorithm.class);
    ConsoleFormatter.displayResults(chineseResult2);

    // [[[end:clustering-non-english-content]]]
}

From source file:com.sina.dip.twill.HelloWorldClassDependent.java

public static void main(String[] args) {
    String zkStr = "localhost:2181";

    YarnConfiguration yarnConfiguration = new YarnConfiguration();

    final TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);

    twillRunner.start();//w  w w. j a va2s.c o m

    String yarnClasspath = yarnConfiguration.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            "/usr/lib/hadoop/*,/usr/lib/hadoop-0.20-mapreduce/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-yarn/*");

    List<String> applicationClassPaths = Lists.newArrayList();

    Iterables.addAll(applicationClassPaths, Splitter.on(",").split(yarnClasspath));

    final TwillController controller = twillRunner.prepare(new HelloWorldApplication())
            .withApplicationClassPaths(applicationClassPaths)
            .withBundlerClassAcceptor(new HadoopClassExcluder()).start();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                Futures.getUnchecked(controller.terminate());
            } finally {
                twillRunner.stop();
            }
        }
    });

    try {
        controller.awaitTerminated();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }
}

From source file:com.mapr.synth.Synth.java

public static void main(String[] args)
        throws IOException, CmdLineException, InterruptedException, ExecutionException {
    final Options opts = new Options();
    CmdLineParser parser = new CmdLineParser(opts);
    try {/*  ww w.  j av  a 2  s  .  co m*/
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        System.err.println("Usage: " + "[ -count <number>G|M|K ] " + "-schema schema-file "
                + "[-quote DOUBLE_QUOTE|BACK_SLASH|OPTIMISTIC] " + "[-format JSON|TSV|CSV|XML ] "
                + "[-threads n] " + "[-output output-directory-name] ");
        throw e;
    }
    Preconditions.checkArgument(opts.threads > 0 && opts.threads <= 2000,
            "Must have at least one thread and no more than 2000");

    if (opts.threads > 1) {
        Preconditions.checkArgument(!"-".equals(opts.output),
                "If more than on thread is used, you have to use -output to set the output directory");
    }

    File outputDir = new File(opts.output);
    if (!"-".equals(opts.output)) {
        if (!outputDir.exists()) {
            Preconditions.checkState(outputDir.mkdirs(),
                    String.format("Couldn't create output directory %s", opts.output));
        }
        Preconditions.checkArgument(outputDir.exists() && outputDir.isDirectory(),
                String.format("Couldn't create directory %s", opts.output));
    }

    if (opts.schema == null) {
        throw new IllegalArgumentException("Must specify schema file using [-schema filename] option");
    }
    final SchemaSampler sampler = new SchemaSampler(opts.schema);
    final AtomicLong rowCount = new AtomicLong();

    final List<ReportingWorker> tasks = Lists.newArrayList();
    int limit = (opts.count + opts.threads - 1) / opts.threads;
    int remaining = opts.count;
    for (int i = 0; i < opts.threads; i++) {

        final int count = Math.min(limit, remaining);
        remaining -= count;

        tasks.add(new ReportingWorker(opts, sampler, rowCount, count, i));
    }

    final double t0 = System.nanoTime() * 1e-9;
    ExecutorService pool = Executors.newFixedThreadPool(opts.threads);
    ScheduledExecutorService blinker = Executors.newScheduledThreadPool(1);
    final AtomicBoolean finalRun = new AtomicBoolean(false);

    final PrintStream sideLog = new PrintStream(new FileOutputStream("side-log"));
    Runnable blink = new Runnable() {
        public double oldT;
        private long oldN;

        @Override
        public void run() {
            double t = System.nanoTime() * 1e-9;
            long n = rowCount.get();
            System.err.printf("%s\t%d\t%.1f\t%d\t%.1f\t%.3f\n", finalRun.get() ? "F" : "R", opts.threads,
                    t - t0, n, n / (t - t0), (n - oldN) / (t - oldT));
            for (ReportingWorker task : tasks) {
                ReportingWorker.ThreadReport r = task.report();
                sideLog.printf("\t%d\t%.2f\t%.2f\t%.2f\t%.1f\t%.1f\n", r.fileNumber, r.threadTime, r.userTime,
                        r.wallTime, r.rows / r.threadTime, r.rows / r.wallTime);
            }
            oldN = n;
            oldT = t;
        }
    };
    if (!"-".equals(opts.output)) {
        blinker.scheduleAtFixedRate(blink, 0, 10, TimeUnit.SECONDS);
    }
    List<Future<Integer>> results = pool.invokeAll(tasks);

    int total = 0;
    for (Future<Integer> result : results) {
        total += result.get();
    }
    Preconditions.checkState(total == opts.count, String
            .format("Expected to generate %d lines of output, but actually generated %d", opts.count, total));
    pool.shutdownNow();
    blinker.shutdownNow();
    finalRun.set(true);
    sideLog.close();
    blink.run();
}

From source file:com.sina.dip.twill.HelloWorldArguments.java

public static void main(String[] args) {
    String zkStr = "localhost:2181";

    YarnConfiguration yarnConfiguration = new YarnConfiguration();

    final TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);

    twillRunner.start();//from w ww  .j a v a2 s  .  co m

    String yarnClasspath = yarnConfiguration.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            "/usr/lib/hadoop/*,/usr/lib/hadoop-0.20-mapreduce/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-yarn/*");

    List<String> applicationClassPaths = Lists.newArrayList();

    Iterables.addAll(applicationClassPaths, Splitter.on(",").split(yarnClasspath));

    final TwillController controller = twillRunner.prepare(new HelloWorldApplication())
            // Application arguments will be visible to all runnables
            .withApplicationArguments("--arg", "arg-app")
            // Arguments only visible to instance of hello1.
            .withArguments("hello1", "--arg1", "arg-hello1").withArguments("hello1", "--arg2", "arg-hello2")
            // Arguments only visible to instance of hello2.
            .withArguments("hello2", "--arg3", "arg-hello3").withArguments("hello2", "--arg4", "arg-hello4")
            .withArguments("hello2", "--arg5", "arg-hello5")
            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
            .withApplicationClassPaths(applicationClassPaths)
            .withBundlerClassAcceptor(new HadoopClassExcluder()).start();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                Futures.getUnchecked(controller.terminate());
            } finally {
                twillRunner.stop();
            }
        }
    });

    try {
        controller.awaitTerminated();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }
}