Example usage for org.apache.commons.cli OptionBuilder hasArg

List of usage examples for org.apache.commons.cli OptionBuilder hasArg

Introduction

In this page you can find the example usage for org.apache.commons.cli OptionBuilder hasArg.

Prototype

public static OptionBuilder hasArg() 

Source Link

Document

The next Option created will require an argument value.

Usage

From source file:org.apache.geronimo.cli.deployer.InstallLibraryCommandArgsImpl.java

protected void addGroupId() {
    OptionBuilder optionBuilder = OptionBuilder.hasArg().withArgName(ARGUMENT_GROUP_ID);
    optionBuilder = optionBuilder.withLongOpt(ARGUMENT_GROUP_ID);
    optionBuilder = optionBuilder/*from  www  .  ja  v  a2  s  .c om*/
            .withDescription("If a groupId is provided, the library file will be installed under that groupId. "
                    + "Otherwise, default will be used.");
    Option option = optionBuilder.create(ARGUMENT_GROUP_ID_SHORTFORM);
    options.addOption(option);
}

From source file:org.apache.hadoop.hdfs.tools.JMXGet.java

/**
 * parse args/*  w w w .  j a va2  s.  co  m*/
 */
private static CommandLine parseArgs(Options opts, String... args) throws IllegalArgumentException {

    OptionBuilder.withArgName("NameNode|DataNode");
    OptionBuilder.hasArg();
    OptionBuilder.withDescription("specify jmx service (NameNode by default)");
    Option jmx_service = OptionBuilder.create("service");

    OptionBuilder.withArgName("mbean server");
    OptionBuilder.hasArg();
    OptionBuilder.withDescription("specify mbean server (localhost by default)");
    Option jmx_server = OptionBuilder.create("server");

    OptionBuilder.withDescription("print help");
    Option jmx_help = OptionBuilder.create("help");

    OptionBuilder.withArgName("mbean server port");
    OptionBuilder.hasArg();
    OptionBuilder.withDescription("specify mbean server port, "
            + "if missing - it will try to connect to MBean Server in the same VM");
    Option jmx_port = OptionBuilder.create("port");

    OptionBuilder.withArgName("VM's connector url");
    OptionBuilder.hasArg();
    OptionBuilder.withDescription("connect to the VM on the same machine;"
            + "\n use:\n jstat -J-Djstat.showUnsupported=true -snap <vmpid> | "
            + "grep sun.management.JMXConnectorServer.address\n " + "to find the url");
    Option jmx_localVM = OptionBuilder.create("localVM");

    opts.addOption(jmx_server);
    opts.addOption(jmx_help);
    opts.addOption(jmx_service);
    opts.addOption(jmx_port);
    opts.addOption(jmx_localVM);

    CommandLine commandLine = null;
    CommandLineParser parser = new GnuParser();
    try {
        commandLine = parser.parse(opts, args, true);
    } catch (ParseException e) {
        printUsage(opts);
        throw new IllegalArgumentException("invalid args: " + e.getMessage());
    }
    return commandLine;
}

From source file:org.apache.hadoop.hive.cli.OptionsProcessor.java

@SuppressWarnings("static-access")
public OptionsProcessor() {

    // -database database
    options.addOption(OptionBuilder.hasArg().withArgName("databasename").withLongOpt("database")
            .withDescription("Specify the database to use").create());

    // -e 'quoted-query-string'
    options.addOption(OptionBuilder.hasArg().withArgName("quoted-query-string")
            .withDescription("SQL from command line").create('e'));

    // -f <query-file>
    options.addOption(//  w ww.  j  a  v a2 s .  c  o  m
            OptionBuilder.hasArg().withArgName("filename").withDescription("SQL from files").create('f'));

    // -i <init-query-file>
    options.addOption(OptionBuilder.hasArg().withArgName("filename").withDescription("Initialization SQL file")
            .create('i'));

    // -hiveconf x=y
    options.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("property=value")
            .withLongOpt("hiveconf").withDescription("Use value for given property").create());

    // Substitution option -d, --define
    options.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("key=value")
            .withLongOpt("define")
            .withDescription("Variable substitution to apply to Hive commands. e.g. -d A=B or --define A=B")
            .create('d'));

    // Substitution option --hivevar
    options.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("key=value")
            .withLongOpt("hivevar")
            .withDescription("Variable substitution to apply to Hive commands. e.g. --hivevar A=B").create());

    // [-S|--silent]
    options.addOption(new Option("S", "silent", false, "Silent mode in interactive shell"));

    // [-v|--verbose]
    options.addOption(new Option("v", "verbose", false, "Verbose mode (echo executed SQL to the console)"));

    // [-H|--help]
    options.addOption(new Option("H", "help", false, "Print help information"));

}

From source file:org.apache.hadoop.hive.llap.cli.LlapOptionsProcessor.java

@SuppressWarnings("static-access")
public LlapOptionsProcessor() {

    // set the number of instances on which llap should run
    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_INSTANCES).withLongOpt(OPTION_INSTANCES)
            .withDescription("Specify the number of instances to run this on").create('i'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_NAME).withLongOpt(OPTION_NAME)
            .withDescription("Cluster name for YARN registry").create('n'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_DIRECTORY).withLongOpt(OPTION_DIRECTORY)
            .withDescription("Temp directory for jars etc.").create('d'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_ARGS).withLongOpt(OPTION_ARGS)
            .withDescription("java arguments to the llap instance").create('a'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_LOGLEVEL).withLongOpt(OPTION_LOGLEVEL)
            .withDescription("log levels for the llap instance").create('l'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_LOGGER).withLongOpt(OPTION_LOGGER)
            .withDescription("logger for llap instance ([" + LogHelpers.LLAP_LOGGER_NAME_RFA + "], "
                    + LogHelpers.LLAP_LOGGER_NAME_QUERY_ROUTING + ", " + LogHelpers.LLAP_LOGGER_NAME_CONSOLE)
            .create());//  ww w . j ava2s.c o m

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_CHAOS_MONKEY).withLongOpt(OPTION_CHAOS_MONKEY)
            .withDescription("chaosmonkey interval").create('m'));

    options.addOption(OptionBuilder.hasArg(false).withArgName(OPTION_SLIDER_DEFAULT_KEYTAB)
            .withLongOpt(OPTION_SLIDER_DEFAULT_KEYTAB)
            .withDescription("try to set default settings for Slider AM keytab; mostly for dev testing")
            .create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_KEYTAB_DIR)
            .withLongOpt(OPTION_SLIDER_KEYTAB_DIR)
            .withDescription(
                    "Slider AM keytab directory on HDFS (where the headless user keytab is stored by Slider keytab installation, e.g. .slider/keytabs/llap)")
            .create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_KEYTAB).withLongOpt(OPTION_SLIDER_KEYTAB)
            .withDescription("Slider AM keytab file name inside " + OPTION_SLIDER_KEYTAB_DIR).create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_PRINCIPAL)
            .withLongOpt(OPTION_SLIDER_PRINCIPAL)
            .withDescription(
                    "Slider AM principal; should be the user running the cluster, e.g. hive@EXAMPLE.COM")
            .create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_PLACEMENT)
            .withLongOpt(OPTION_SLIDER_PLACEMENT)
            .withDescription(
                    "Slider placement policy; see slider documentation at https://slider.incubator.apache.org/docs/placement.html."
                            + " 4 means anti-affinity (the default; unnecessary if LLAP is going to take more than half of the YARN capacity of a node), 0 is normal.")
            .create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_EXECUTORS).withLongOpt(OPTION_EXECUTORS)
            .withDescription("executor per instance").create('e'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_CACHE).withLongOpt(OPTION_CACHE)
            .withDescription("cache size per instance").create('c'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SIZE).withLongOpt(OPTION_SIZE)
            .withDescription("container size per instance").create('s'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_XMX).withLongOpt(OPTION_XMX)
            .withDescription("working memory size").create('w'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_LLAP_QUEUE).withLongOpt(OPTION_LLAP_QUEUE)
            .withDescription("The queue within which LLAP will be started").create('q'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_OUTPUT_DIR).withLongOpt(OPTION_OUTPUT_DIR)
            .withDescription("Output directory for the generated scripts").create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_AUXJARS).withLongOpt(OPTION_AUXJARS)
            .withDescription(
                    "additional jars to package (by default, JSON SerDe jar is packaged" + " if available)")
            .create('j'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_AUXHBASE).withLongOpt(OPTION_AUXHBASE)
            .withDescription("whether to package the HBase jars (true by default)").create('h'));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_AUXHIVE).withLongOpt(OPTION_AUXHIVE)
            .withDescription("whether to package the Hive aux jars (true by default)").create(OPTION_AUXHIVE));

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_JAVA_HOME).withLongOpt(OPTION_JAVA_HOME)
            .withDescription(
                    "Path to the JRE/JDK. This should be installed at the same location on all cluster nodes ($JAVA_HOME, java.home by default)")
            .create());

    // -hiveconf x=y
    options.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("property=value")
            .withLongOpt(OPTION_HIVECONF)
            .withDescription("Use value for given property. Overridden by explicit parameters").create());

    options.addOption(OptionBuilder.hasArg().withArgName("b").withLongOpt(OPTION_SLIDER_AM_CONTAINER_MB)
            .withDescription("The size of the slider AppMaster container in MB").create('b'));

    options.addOption(OptionBuilder.withValueSeparator().hasArgs(2).withArgName("property=value")
            .withLongOpt(OPTION_SLIDER_APPCONFIG_GLOBAL)
            .withDescription("Property (key=value) to be set in the global section of the Slider appConfig")
            .create());

    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_IO_THREADS).withLongOpt(OPTION_IO_THREADS)
            .withDescription("executor per instance").create('t'));

    options.addOption(OptionBuilder.hasArg(false).withArgName(OPTION_START).withLongOpt(OPTION_START)
            .withDescription("immediately start the cluster").create('z'));

    // [-H|--help]
    options.addOption(new Option("H", "help", false, "Print help information"));
}

From source file:org.apache.hadoop.hive.metastore.tools.SchemaToolCommandLine.java

@SuppressWarnings("static-access")
private Options createOptions(OptionGroup additionalOptions) {
    Option help = new Option("help", "print this message");
    Option infoOpt = new Option("info", "Show config and schema details");
    Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade");
    Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg()
            .withDescription("Schema upgrade from a version").create("upgradeSchemaFrom");
    Option initOpt = new Option("initSchema", "Schema initialization");
    Option initToOpt = OptionBuilder.withArgName("initTo").hasArg()
            .withDescription("Schema initialization to a version").create("initSchemaTo");
    Option initOrUpgradeSchemaOpt = new Option("initOrUpgradeSchema",
            "Initialize or upgrade schema to latest version");
    Option validateOpt = new Option("validate", "Validate the database");
    Option createCatalog = OptionBuilder.hasArg()
            .withDescription("Create a catalog, requires --catalogLocation parameter as well")
            .create("createCatalog");
    Option alterCatalog = OptionBuilder.hasArg()
            .withDescription(/*from  w  w w . ja  v  a2s  .  c  o m*/
                    "Alter a catalog, requires --catalogLocation and/or --catalogDescription parameter as well")
            .create("alterCatalog");
    Option moveDatabase = OptionBuilder.hasArg()
            .withDescription("Move a database between catalogs.  Argument is the database name. "
                    + "Requires --fromCatalog and --toCatalog parameters as well")
            .create("moveDatabase");
    Option moveTable = OptionBuilder.hasArg()
            .withDescription("Move a table to a different database.  Argument is the table name. "
                    + "Requires --fromCatalog, --toCatalog, --fromDatabase, and --toDatabase "
                    + " parameters as well.")
            .create("moveTable");
    Option createUserOpt = new Option("createUser", "Create the Hive user, set hiveUser to the db"
            + " admin user and the hive password to the db admin password with this");

    OptionGroup optGroup = new OptionGroup();
    optGroup.addOption(help).addOption(infoOpt).addOption(upgradeOpt).addOption(upgradeFromOpt)
            .addOption(initOpt).addOption(initToOpt).addOption(initOrUpgradeSchemaOpt).addOption(validateOpt)
            .addOption(createCatalog).addOption(alterCatalog).addOption(moveDatabase).addOption(moveTable)
            .addOption(createUserOpt);
    optGroup.setRequired(true);

    Option userNameOpt = OptionBuilder.withArgName("user").hasArgs()
            .withDescription("Override config file user name").create("userName");
    Option passwdOpt = OptionBuilder.withArgName("password").hasArgs()
            .withDescription("Override config file password").create("passWord");
    Option dbTypeOpt = OptionBuilder.withArgName("databaseType").hasArgs()
            .withDescription("Metastore database type").isRequired().create("dbType");
    Option hiveUserOpt = OptionBuilder.hasArg().withDescription("Hive user (for use with createUser)")
            .create("hiveUser");
    Option hivePasswdOpt = OptionBuilder.hasArg().withDescription("Hive password (for use with createUser)")
            .create("hivePassword");
    Option hiveDbOpt = OptionBuilder.hasArg().withDescription("Hive database (for use with createUser)")
            .create("hiveDb");
    /*
    Option metaDbTypeOpt = OptionBuilder.withArgName("metaDatabaseType")
        .hasArgs().withDescription("Used only if upgrading the system catalog for hive")
        .create("metaDbType");
        */
    Option urlOpt = OptionBuilder.withArgName("url").hasArgs().withDescription("connection url to the database")
            .create("url");
    Option driverOpt = OptionBuilder.withArgName("driver").hasArgs()
            .withDescription("driver name for connection").create("driver");
    Option dbOpts = OptionBuilder.withArgName("databaseOpts").hasArgs()
            .withDescription("Backend DB specific options").create("dbOpts");
    Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)");
    Option verboseOpt = new Option("verbose", "only print SQL statements");
    Option serversOpt = OptionBuilder.withArgName("serverList").hasArgs()
            .withDescription("a comma-separated list of servers used in location validation in the format of "
                    + "scheme://authority (e.g. hdfs://localhost:8000)")
            .create("servers");
    Option catalogLocation = OptionBuilder.hasArg()
            .withDescription("Location of new catalog, required when adding a catalog")
            .create("catalogLocation");
    Option catalogDescription = OptionBuilder.hasArg().withDescription("Description of new catalog")
            .create("catalogDescription");
    Option ifNotExists = OptionBuilder
            .withDescription("If passed then it is not an error to create an existing catalog")
            .create("ifNotExists");
    Option fromCatalog = OptionBuilder.hasArg()
            .withDescription("Catalog a moving database or table is coming from.  This is "
                    + "required if you are moving a database or table.")
            .create("fromCatalog");
    Option toCatalog = OptionBuilder.hasArg()
            .withDescription("Catalog a moving database or table is going to.  This is "
                    + "required if you are moving a database or table.")
            .create("toCatalog");
    Option fromDatabase = OptionBuilder.hasArg()
            .withDescription(
                    "Database a moving table is coming from.  This is " + "required if you are moving a table.")
            .create("fromDatabase");
    Option toDatabase = OptionBuilder.hasArg()
            .withDescription(
                    "Database a moving table is going to.  This is " + "required if you are moving a table.")
            .create("toDatabase");

    Options options = new Options();
    options.addOption(help);
    options.addOptionGroup(optGroup);
    options.addOption(dbTypeOpt);
    //options.addOption(metaDbTypeOpt);
    options.addOption(userNameOpt);
    options.addOption(passwdOpt);
    options.addOption(urlOpt);
    options.addOption(driverOpt);
    options.addOption(dbOpts);
    options.addOption(dryRunOpt);
    options.addOption(verboseOpt);
    options.addOption(serversOpt);
    options.addOption(catalogLocation);
    options.addOption(catalogDescription);
    options.addOption(ifNotExists);
    options.addOption(fromCatalog);
    options.addOption(toCatalog);
    options.addOption(fromDatabase);
    options.addOption(toDatabase);
    options.addOption(hiveUserOpt);
    options.addOption(hivePasswdOpt);
    options.addOption(hiveDbOpt);
    if (additionalOptions != null)
        options.addOptionGroup(additionalOptions);

    return options;
}

From source file:org.apache.hadoop.hive.ql.processors.CryptoProcessor.java

public CryptoProcessor(HadoopShims.HdfsEncryptionShim encryptionShim, HiveConf conf) {
    this.encryptionShim = encryptionShim;
    this.conf = conf;

    CREATE_KEY_OPTIONS = new Options();
    CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());
    CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("bitLength").create()); // optional

    DELETE_KEY_OPTIONS = new Options();
    DELETE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());

    CREATE_ZONE_OPTIONS = new Options();
    CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());
    CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("path").isRequired().create());
}

From source file:org.apache.hadoop.hive.ql.processors.ErasureProcessor.java

/**
 * Get an erasure coding policy for a Path.
 * @param params Parameters passed to the command.
 * @throws Exception if command failed./*from w w  w  .j a v a2  s. c  o m*/
 */
private void getPolicy(String[] params) throws Exception {
    String command = "getPolicy";
    try {
        // getPolicy -path <path>
        Options getPolicyOptions = new Options();

        String pathOptionName = "path";
        Option policyOption = OptionBuilder.hasArg().isRequired().withLongOpt(pathOptionName)
                .withDescription("Path for which Policy should be fetched").create();
        getPolicyOptions.addOption(policyOption);

        CommandLine args = parseCommandArgs(getPolicyOptions, params);
        String path = args.getOptionValue(pathOptionName);

        HdfsFileErasureCodingPolicy policy = erasureCodingShim.getErasureCodingPolicy(new Path(path));
        writeTestOutput("EC policy is '" + (policy != null ? policy.getName() : "REPLICATED") + "'");

    } catch (ParseException pe) {
        writeTestOutput("Error parsing options for " + command + " " + pe.getMessage());
    } catch (Exception e) {
        writeTestOutput("Caught exception running " + command + ": " + e.getMessage());
        throw new Exception("Cannot run " + command + ": " + e.getMessage(), e);
    }
}

From source file:org.apache.hadoop.hive.ql.processors.ErasureProcessor.java

/**
 * Enable an erasure coding policy.//from  w  ww.  j a  v  a  2  s .co  m
 * @param params Parameters passed to the command.
 * @throws Exception If command failed.
 */
private void enablePolicy(String[] params) throws Exception {
    String command = "enablePolicy";
    try {
        // enablePolicy -policy <policyName>
        Options enablePolicyOptions = new Options();

        String policyOptionName = "policy";
        Option policyOption = OptionBuilder.hasArg().isRequired().withLongOpt(policyOptionName)
                .withDescription("Policy to enable").hasArg().create();
        enablePolicyOptions.addOption(policyOption);

        CommandLine args = parseCommandArgs(enablePolicyOptions, params);
        String policyName = args.getOptionValue(policyOptionName);

        erasureCodingShim.enableErasureCodingPolicy(policyName);
        writeTestOutput("Enabled EC policy '" + policyName + "'");
    } catch (ParseException pe) {
        writeTestOutput("Error parsing options for " + command + " " + pe.getMessage());
    } catch (Exception e) {
        writeTestOutput("Caught exception running " + command + ": " + e.getMessage());
        throw new Exception("Cannot run " + command + ": " + e.getMessage());
    }
}

From source file:org.apache.hadoop.hive.ql.processors.ErasureProcessor.java

/**
 * Remove an erasure coding policy.//w  w  w  .  j a va  2  s .  c  o  m
 * @param params Parameters passed to the command.
 * @throws Exception if command failed.
 */
private void removePolicy(String[] params) throws Exception {
    String command = "removePolicy";
    try {
        // removePolicy -policy <policyName>
        Options removePolicyOptions = new Options();

        String policyOptionName = "policy";
        Option policyOption = OptionBuilder.hasArg().isRequired().withLongOpt(policyOptionName)
                .withDescription("Policy to remove").create();
        removePolicyOptions.addOption(policyOption);

        CommandLine args = parseCommandArgs(removePolicyOptions, params);
        String policyName = args.getOptionValue(policyOptionName);

        erasureCodingShim.removeErasureCodingPolicy(policyName);
        writeTestOutput("Removed EC policy '" + policyName + "'");
    } catch (ParseException pe) {
        writeTestOutput("Error parsing options for " + command + " " + pe.getMessage());
    } catch (Exception e) {
        writeTestOutput("Caught exception running " + command + ": " + e.getMessage());
        throw new Exception("Cannot run " + command + ": " + e.getMessage());
    }
}

From source file:org.apache.hadoop.hive.ql.processors.ErasureProcessor.java

/**
 * Disable an erasure coding policy./*from w  ww.  j  a v a2 s .c o  m*/
 * @param params Parameters passed to the command.
 * @throws Exception If command failed.
 */
private void disablePolicy(String[] params) throws Exception {
    String command = "disablePolicy";
    try {
        // disablePolicy -policy <policyName>
        Options disablePolicyOptions = new Options();

        String policyOptionName = "policy";
        Option policyOption = OptionBuilder.hasArg().isRequired().withLongOpt(policyOptionName)
                .withDescription("Policy to disable").create();
        disablePolicyOptions.addOption(policyOption);

        CommandLine args = parseCommandArgs(disablePolicyOptions, params);
        String policyName = args.getOptionValue(policyOptionName);

        erasureCodingShim.disableErasureCodingPolicy(policyName);
        writeTestOutput("Disabled EC policy '" + policyName + "'");
    } catch (ParseException pe) {
        writeTestOutput("Error parsing options for " + command + " " + pe.getMessage());
    } catch (Exception e) {
        writeTestOutput("Caught exception running " + command + ": " + e.getMessage());
        throw new Exception("Cannot run " + command + ": " + e.getMessage());
    }
}