List of usage examples for org.apache.commons.cli OptionBuilder withLongOpt
public static OptionBuilder withLongOpt(String newLongopt)
From source file:org.apache.hadoop.hdfs.tools.DiskBalancer.java
/** * Adds execute command options.//www . j a va 2s . com * * @param opt Options */ private void addExecuteCommands(Options opt) { Option execute = OptionBuilder.withLongOpt(EXECUTE).hasArg() .withDescription("Takes a plan file and " + "submits it for execution by the datanode.").create(); getExecuteOptions().addOption(execute); opt.addOption(execute); }
From source file:org.apache.hadoop.hdfs.tools.DiskBalancer.java
/** * Adds query command options./*from ww w .java 2 s. com*/ * * @param opt Options */ private void addQueryCommands(Options opt) { Option query = OptionBuilder.withLongOpt(QUERY).hasArg() .withDescription("Queries the disk balancer " + "status of a given datanode.").create(); getQueryOptions().addOption(query); opt.addOption(query); // Please note: Adding this only to Query options since -v is already // added to global table. Option verbose = OptionBuilder.withLongOpt(VERBOSE) .withDescription("Prints details of the plan that is being executed " + "on the node.").create(); getQueryOptions().addOption(verbose); }
From source file:org.apache.hadoop.hdfs.tools.DiskBalancer.java
/** * Adds cancel command options./*from w ww .j a va 2 s. c o m*/ * * @param opt Options */ private void addCancelCommands(Options opt) { Option cancel = OptionBuilder.withLongOpt(CANCEL).hasArg() .withDescription("Cancels a running plan using a plan file.").create(); getCancelOptions().addOption(cancel); opt.addOption(cancel); Option node = OptionBuilder.withLongOpt(NODE).hasArg() .withDescription("Cancels a running plan using a plan ID and hostName").create(); getCancelOptions().addOption(node); opt.addOption(node); }
From source file:org.apache.hadoop.hdfs.tools.DiskBalancer.java
/** * Adds report command options.//ww w .ja va 2 s . c o m * * @param opt Options */ private void addReportCommands(Options opt) { Option report = OptionBuilder.withLongOpt(REPORT) .withDescription("List nodes that will benefit from running " + "DiskBalancer.").create(); getReportOptions().addOption(report); opt.addOption(report); Option top = OptionBuilder.withLongOpt(TOP).hasArg() .withDescription("specify the number of nodes to be listed which has" + " data imbalance.") .create(); getReportOptions().addOption(top); opt.addOption(top); Option node = OptionBuilder.withLongOpt(NODE).hasArg() .withDescription("Datanode address, " + "it can be DataNodeID, IP or hostname.").create(); getReportOptions().addOption(node); opt.addOption(node); }
From source file:org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.java
/** * Build command-line options and descriptions * * @return command line options//from w ww . jav a 2s . c om */ public static Options buildOptions() { Options options = new Options(); // Build in/output file arguments, which are required, but there is no // addOption method that can specify this OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("outputFilename"); options.addOption(OptionBuilder.create("o")); OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("inputFilename"); options.addOption(OptionBuilder.create("i")); options.addOption("p", "processor", true, ""); options.addOption("v", "verbose", false, ""); options.addOption("f", "fix-txids", false, ""); options.addOption("r", "recover", false, ""); options.addOption("h", "help", false, ""); return options; }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageDecompressor.java
/** * Build command-line options and descriptions */// w ww.j a v a 2 s . c o m public static Options buildOptions() { Options options = new Options(); // Build in/output file arguments, which are required, but there is no // addOption method that can specify this OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("outputFile"); options.addOption(OptionBuilder.create("o")); OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("inputFile"); options.addOption(OptionBuilder.create("i")); options.addOption("h", "help", false, ""); return options; }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer.java
/** * Build command-line options and descriptions *//*from w w w . ja va 2 s . c o m*/ public static Options buildOptions() { Options options = new Options(); // Build in/output file arguments, which are required, but there is no // addOption method that can specify this OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("outputFile"); options.addOption(OptionBuilder.create("o")); OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("inputFile"); options.addOption(OptionBuilder.create("i")); options.addOption("p", "processor", true, ""); options.addOption("h", "help", false, ""); options.addOption("skipBlocks", false, ""); options.addOption("printToScreen", false, ""); options.addOption("delimiter", true, ""); return options; }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB.java
/** * Build command-line options and descriptions *//*from ww w . j av a 2 s. co m*/ private static Options buildOptions() { Options options = new Options(); // Build in/output file arguments, which are required, but there is no // addOption method that can specify this OptionBuilder.isRequired(); OptionBuilder.hasArgs(); OptionBuilder.withLongOpt("inputFile"); options.addOption(OptionBuilder.create("i")); options.addOption("o", "outputFile", true, ""); options.addOption("p", "processor", true, ""); options.addOption("h", "help", false, ""); options.addOption("maxSize", true, ""); options.addOption("step", true, ""); options.addOption("addr", true, ""); options.addOption("delimiter", true, ""); options.addOption("t", "temp", true, ""); return options; }
From source file:org.apache.hadoop.hive.llap.LlapDump.java
static Options createOptions() { Options result = new Options(); result.addOption(OptionBuilder.withLongOpt("location").withDescription("HS2 url").hasArg().create('l')); result.addOption(OptionBuilder.withLongOpt("user").withDescription("user name").hasArg().create('u')); result.addOption(OptionBuilder.withLongOpt("pwd").withDescription("password").hasArg().create('p')); result.addOption(OptionBuilder.withLongOpt("num").withDescription("number of splits").hasArg().create('n')); result.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("property=value") .withLongOpt("hiveconf").withDescription("Use value for given property").create()); result.addOption(OptionBuilder.withLongOpt("help").withDescription("help").hasArg(false).create('h')); return result; }
From source file:org.apache.hadoop.hive.metastore.hbase.HBaseImport.java
private int init(String... args) throws ParseException { Options options = new Options(); doAll = doKerberos = false;//w w w. j av a2s . c om parallel = 1; batchSize = 1000; options.addOption( OptionBuilder.withLongOpt("all").withDescription("Import the full metastore").create('a')); options.addOption(OptionBuilder.withLongOpt("batchsize") .withDescription("Number of partitions to read and write in a batch, defaults to 1000").hasArg() .create('b')); options.addOption(OptionBuilder.withLongOpt("database").withDescription("Import a single database") .hasArgs().create('d')); options.addOption(OptionBuilder.withLongOpt("help").withDescription("You're looking at it").create('h')); options.addOption(OptionBuilder.withLongOpt("function").withDescription("Import a single function") .hasArgs().create('f')); options.addOption(OptionBuilder.withLongOpt("kerberos") .withDescription("Import all kerberos related objects (master key, tokens)").create('k')); options.addOption(OptionBuilder.withLongOpt("parallel") .withDescription( "Parallel factor for loading (only applied to tables and partitions), " + "defaults to 1") .hasArg().create('p')); options.addOption( OptionBuilder.withLongOpt("role").withDescription("Import a single role").hasArgs().create('r')); options.addOption(OptionBuilder.withLongOpt("tables").withDescription("Import a single tables").hasArgs() .create('t')); CommandLine cli = new GnuParser().parse(options, args); // Process help, if it was asked for, this must be done first if (cli.hasOption('h')) { printHelp(options); return 1; } boolean hasCmd = false; // Now process the other command line args if (cli.hasOption('a')) { hasCmd = true; doAll = true; } if (cli.hasOption('b')) { batchSize = Integer.parseInt(cli.getOptionValue('b')); } if (cli.hasOption('d')) { hasCmd = true; dbsToImport = Arrays.asList(cli.getOptionValues('d')); } if (cli.hasOption('f')) { hasCmd = true; functionsToImport = Arrays.asList(cli.getOptionValues('f')); } if (cli.hasOption('p')) { parallel = Integer.parseInt(cli.getOptionValue('p')); } if (cli.hasOption('r')) { hasCmd = true; rolesToImport = Arrays.asList(cli.getOptionValues('r')); } if (cli.hasOption('k')) { doKerberos = true; } if (cli.hasOption('t')) { hasCmd = true; tablesToImport = Arrays.asList(cli.getOptionValues('t')); } if (!hasCmd) { printHelp(options); return 1; } dbs = new ArrayList<>(); // We don't want to bound the size of the table queue because we keep it all in memory partitionedTables = new LinkedBlockingQueue<>(); tableNameQueue = new LinkedBlockingQueue<>(); indexNameQueue = new LinkedBlockingQueue<>(); // Bound the size of this queue so we don't get too much in memory. partQueue = new ArrayBlockingQueue<>(parallel * 2); return 0; }